From baf1df92eed1571b8c2cc3d891f1916ff8dd0629 Mon Sep 17 00:00:00 2001 From: Ashley Date: Mon, 23 Sep 2019 01:52:01 +1200 Subject: [PATCH 001/359] Migrated code to 2018 edition, updated dependencies (#221) --- kvdb-rocksdb/Cargo.toml | 9 +++++---- kvdb-rocksdb/src/lib.rs | 31 ++++++++----------------------- 2 files changed, 13 insertions(+), 27 deletions(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 57e7e6a2b..e18128662 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,10 +1,11 @@ [package] name = "kvdb-rocksdb" -version = "0.1.4" +version = "0.1.5" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by rocksDB" license = "GPL-3.0" +edition = "2018" [dependencies] elastic-array = "0.10" @@ -12,9 +13,9 @@ fs-swap = "0.2.4" interleaved-ordered = "0.1.0" kvdb = { version = "0.1", path = "../kvdb" } log = "0.4" -num_cpus = "1.0" -parking_lot = "0.6" -regex = "1.0" +num_cpus = "1.10" +parking_lot = "0.9" +regex = "1.3" parity-rocksdb = "0.5" [dev-dependencies] diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index b5ecf3059..d5ab23829 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -14,26 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -#[macro_use] -extern crate log; - -extern crate elastic_array; -extern crate fs_swap; -extern crate interleaved_ordered; -extern crate num_cpus; -extern crate parking_lot; -extern crate regex; -extern crate parity_rocksdb; - -#[cfg(test)] -extern crate ethereum_types; - -extern crate kvdb; - -use std::collections::HashMap; -use std::marker::PhantomData; -use std::{cmp, fs, io, mem, result, error}; -use std::path::Path; +use std::{ + cmp, fs, io, mem, result, error, + collections::HashMap, marker::PhantomData, path::Path +}; use parking_lot::{Mutex, MutexGuard, RwLock}; use parity_rocksdb::{ @@ -42,6 +26,7 @@ use parity_rocksdb::{ }; use interleaved_ordered::{interleave_ordered, InterleaveOrdered}; +use log::{debug, warn}; use elastic_array::ElasticArray32; use fs_swap::{swap, swap_nonatomic}; use kvdb::{KeyValueDB, DBTransaction, DBValue, DBOp}; @@ -55,7 +40,7 @@ use std::fs::File; #[cfg(target_os = "linux")] use std::path::PathBuf; -fn other_io_err(e: E) -> io::Error where E: Into> { +fn other_io_err(e: E) -> io::Error where E: Into> { io::Error::new(io::ErrorKind::Other, e) } @@ -692,13 +677,13 @@ impl KeyValueDB for Database { Database::flush(self) } - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { let unboxed = Database::iter(self, col); Box::new(unboxed.into_iter().flat_map(|inner| inner)) } fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) - -> Box, Box<[u8]>)> + 'a> + -> Box, Box<[u8]>)> + 'a> { let unboxed = Database::iter_from_prefix(self, col, prefix); Box::new(unboxed.into_iter().flat_map(|inner| inner)) From 085d18b6c54728d53585b1a71372856dbf225fe3 Mon Sep 17 00:00:00 2001 From: Ashley Date: Mon, 23 Sep 2019 19:34:17 +1200 Subject: [PATCH 002/359] [kvdb-memorydb] Migrated code to 2018 edition, updated parking_lot (#222) * Migrated code to 2018 edition, updated parking_lot * revert parking_lot update due to breakages --- kvdb-memorydb/Cargo.toml | 3 ++- kvdb-memorydb/src/lib.rs | 10 +++------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 301e562bf..d1dedb37b 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,10 +1,11 @@ [package] name = "kvdb-memorydb" -version = "0.1.0" +version = "0.1.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" license = "GPL-3.0" +edition = "2018" [dependencies] parking_lot = "0.6" diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 7a4590632..2762b6081 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -14,11 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -extern crate parking_lot; -extern crate kvdb; - -use std::collections::{BTreeMap, HashMap}; -use std::io; +use std::{io, collections::{BTreeMap, HashMap}}; use parking_lot::RwLock; use kvdb::{DBValue, DBTransaction, KeyValueDB, DBOp}; @@ -87,7 +83,7 @@ impl KeyValueDB for InMemory { Ok(()) } - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { match self.columns.read().get(&col) { Some(map) => Box::new( // TODO: worth optimizing at all? map.clone() @@ -99,7 +95,7 @@ impl KeyValueDB for InMemory { } fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) - -> Box, Box<[u8]>)> + 'a> + -> Box, Box<[u8]>)> + 'a> { match self.columns.read().get(&col) { Some(map) => Box::new( From 31b6479d51f3db44c6f344e146f3972a988024fe Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 25 Sep 2019 15:17:16 +0200 Subject: [PATCH 003/359] [kvdb-web] indexeddb implementation (#202) * [kvdb-web] indexeddb implementation * Downgrade futures-preview We are forcing version alpha.17 in Substrate because of a compilation error with alpha.18. Co-Authored-By: Pierre Krieger * [.travis.yml] disable headless tests on macOS * [kvdb-web] fix compiler warning * [kvdb-web] add a comment about sync unsafety * [.travis.yml] fix headless tests * [kvdb-web] put indexed_db into Mutex * [kvdb-web] try creating only new columns * [kvdb-web] remove invalid comment * [kvdb-web] expose version * [kvdb-web] convert some expect_throws to expects * [kvdb-web] introduce the error module * [kvdb-web] better error handling * [kvdb-web] remove unused error * [kvdb-web] add license header to tests * [kvdb-web] fix license copy-paste * [kvdb-web] implement automatic version bump hack * [kvdb-web] add docs to Database * [kvdb-web] mention reading the whole db in memory * [kvdb-web] document why we reopen the db * [kvdb-web] add a warning on transaction failure * [kvdb-web] fix typo in docs * grammar pass by @dvdplm --- .travis.yml | 5 + Cargo.toml | 1 + kvdb-web/Cargo.toml | 43 ++++++ kvdb-web/src/error.rs | 59 ++++++++ kvdb-web/src/indexed_db.rs | 256 +++++++++++++++++++++++++++++++++++ kvdb-web/src/lib.rs | 174 ++++++++++++++++++++++++ kvdb-web/tests/indexed_db.rs | 66 +++++++++ 7 files changed, 604 insertions(+) create mode 100644 kvdb-web/Cargo.toml create mode 100644 kvdb-web/src/error.rs create mode 100644 kvdb-web/src/indexed_db.rs create mode 100644 kvdb-web/src/lib.rs create mode 100644 kvdb-web/tests/indexed_db.rs diff --git a/.travis.yml b/.travis.yml index b38c5ff5c..cd1563c4b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,6 +22,8 @@ matrix: rust: stable allow_failures: - rust: nightly +install: + - curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh script: - cargo check --all --tests - cargo build --all @@ -40,3 +42,6 @@ script: - cd parity-util-mem/ && cargo test --features=mimalloc-global && cd .. - cd rlp/ && cargo test --no-default-features && cargo check --benches && cd .. - cd triehash/ && cargo check --benches && cd .. + - if [ "$TRAVIS_OS_NAME" == "linux" ]; then + cd kvdb-web/ && wasm-pack test --headless --chrome --firefox && cd ..; + fi diff --git a/Cargo.toml b/Cargo.toml index c4dc4ea9e..6c12c3205 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "kvdb", "kvdb-memorydb", "kvdb-rocksdb", + "kvdb-web", "parity-bytes", "parity-crypto", "parity-path", diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml new file mode 100644 index 000000000..27ed6ecd0 --- /dev/null +++ b/kvdb-web/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "kvdb-web" +version = "0.1.0" +authors = ["Parity Technologies "] +repository = "https://github.com/paritytech/parity-common" +description = "A key-value database for use in browsers" +documentation = "https://docs.rs/kvdb-web/" +license = "GPL-3.0" +edition = "2018" + +[dependencies] +wasm-bindgen = "0.2.49" +js-sys = "0.3.26" +kvdb = { version = "0.1", path = "../kvdb" } +kvdb-memorydb = { version = "0.1", path = "../kvdb-memorydb" } +futures-preview = "0.3.0-alpha.17" +log = "0.4.8" +send_wrapper = "0.2.0" + +[dependencies.web-sys] +version = "0.3.26" +features = [ + 'console', + 'Window', + 'IdbFactory', + 'IdbDatabase', + 'IdbTransaction', + 'IdbTransactionMode', + 'IdbOpenDbRequest', + 'IdbRequest', + 'IdbObjectStore', + 'Event', + 'EventTarget', + 'IdbCursor', + 'IdbCursorWithValue', + 'DomStringList', +] + +[dev-dependencies] +wasm-bindgen-test = "0.2.49" +futures-preview = { version = "0.3.0-alpha.18", features = ['compat'] } +futures01 = { package = "futures", version = "0.1" } +console_log = "0.1.2" diff --git a/kvdb-web/src/error.rs b/kvdb-web/src/error.rs new file mode 100644 index 000000000..64361830d --- /dev/null +++ b/kvdb-web/src/error.rs @@ -0,0 +1,59 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Errors that can occur when working with IndexedDB. + +use std::fmt; + + +/// An error that occurred when working with IndexedDB. +#[derive(Clone, PartialEq, Debug)] +pub enum Error { + /// Accessing a Window has failed. + /// Are we in a WebWorker? + WindowNotAvailable, + /// IndexedDB is not supported by your browser. + NotSupported(String), + /// This enum may grow additional variants, + /// so this makes sure clients don't count on exhaustive matching. + /// (Otherwise, adding a new variant could break existing code.) + #[doc(hidden)] + __Nonexhaustive, +} + +impl std::error::Error for Error { + fn description(&self) -> &str { + match *self { + Error::WindowNotAvailable => "Accessing a Window has failed", + Error::NotSupported(_) => "IndexedDB is not supported by your browser", + Error::__Nonexhaustive => unreachable!(), + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::WindowNotAvailable => write!(f, "Accessing a Window has failed"), + Error::NotSupported(ref err) => write!( + f, + "IndexedDB is not supported by your browser: {}", + err, + ), + Error::__Nonexhaustive => unreachable!(), + } + } +} diff --git a/kvdb-web/src/indexed_db.rs b/kvdb-web/src/indexed_db.rs new file mode 100644 index 000000000..a75f940a6 --- /dev/null +++ b/kvdb-web/src/indexed_db.rs @@ -0,0 +1,256 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Utility functions to interact with IndexedDB browser API. + +use wasm_bindgen::{JsCast, JsValue, closure::Closure}; +use web_sys::{ + IdbDatabase, IdbRequest, IdbOpenDbRequest, + Event, IdbCursorWithValue, + IdbTransactionMode, +}; +use js_sys::{Array, Uint8Array, ArrayBuffer}; + +use futures::channel; +use futures::prelude::*; + +use kvdb::{DBOp, DBTransaction}; + +use std::ops::Deref; +use log::{debug, warn}; + + +use crate::{Column, error::Error}; + +pub struct IndexedDB { + pub version: u32, + pub columns: u32, + pub inner: super::SendWrapper, +} + +/// Opens the IndexedDB with the given name, version and the specified number of columns +/// (including the default one). +pub fn open(name: &str, version: Option, columns: u32) -> impl Future> { + let (tx, rx) = channel::oneshot::channel::(); + + let window = match web_sys::window() { + Some(window) => window, + None => return future::Either::Right(future::err(Error::WindowNotAvailable)), + }; + let idb_factory = window.indexed_db(); + + let idb_factory = match idb_factory { + Ok(idb_factory) => idb_factory.expect("We can't get a null pointer back; qed"), + Err(err) => return future::Either::Right(future::err(Error::NotSupported(format!("{:?}", err)))), + }; + + let open_request = match version { + Some(version) => idb_factory.open_with_u32(name, version) + .expect("TypeError is not possible with Rust; qed"), + None => idb_factory.open(name).expect("TypeError is not possible with Rust; qed"), + }; + + try_create_missing_stores(&open_request, columns, version); + + let on_success = Closure::once(move |event: &Event| { + // Extract database handle from the event + let target = event.target().expect("Event should have a target; qed"); + let req = target.dyn_ref::().expect("Event target is IdbRequest; qed"); + + let result = req + .result() + .expect("IndexedDB.onsuccess should have a valid result; qed"); + assert!(result.is_instance_of::()); + + let db = IdbDatabase::from(result); + // JS returns version as f64 + let version = db.version().round() as u32; + let columns = db.object_store_names().length(); + + // errors if the receiving end was dropped before this call + let _ = tx.send(IndexedDB { + version, + columns, + inner: super::SendWrapper::new(db), + }); + }); + open_request.set_onsuccess(Some(on_success.as_ref().unchecked_ref())); + on_success.forget(); + + future::Either::Left( + rx.then(|r| future::ok(r.expect("Sender isn't dropped; qed"))) + ) +} + +fn store_name(num: u32) -> String { + format!("col{}", num) +} + +fn column_to_number(column: Column) -> u32 { + column.map(|c| c + 1).unwrap_or_default() +} + + +// Returns js objects representing store names for each column +fn store_names_js(columns: u32) -> Array { + let column_names = (0..=columns).map(store_name); + + let js_array = Array::new(); + for name in column_names { + js_array.push(&JsValue::from(name)); + } + + js_array +} + +fn try_create_missing_stores(req: &IdbOpenDbRequest, columns: u32, version: Option) { + let on_upgradeneeded = Closure::once(move |event: &Event| { + debug!("Upgrading or creating the database to version {:?}, columns {}", version, columns); + // Extract database handle from the event + let target = event.target().expect("Event should have a target; qed"); + let req = target.dyn_ref::().expect("Event target is IdbRequest; qed"); + let result = req.result().expect("IdbRequest should have a result; qed"); + let db: &IdbDatabase = result.unchecked_ref(); + + let previous_columns = db.object_store_names().length(); + debug!("Previous version: {}, columns {}", db.version(), previous_columns); + + for name in (previous_columns..=columns).map(store_name) { + let res = db.create_object_store(name.as_str()); + if let Err(err) = res { + debug!("error creating object store {}: {:?}", name, err); + } + } + }); + + req.set_onupgradeneeded(Some(on_upgradeneeded.as_ref().unchecked_ref())); + on_upgradeneeded.forget(); +} + +/// Commit a transaction to the IndexedDB. +pub fn idb_commit_transaction( + idb: &IdbDatabase, + txn: &DBTransaction, + columns: u32, +) -> impl Future { + let store_names_js = store_names_js(columns); + + // Create a transaction + let mode = IdbTransactionMode::Readwrite; + let idb_txn = idb.transaction_with_str_sequence_and_mode(&store_names_js, mode) + .expect("The provided mode and store names are valid; qed"); + + // Open object stores (columns) + let object_stores = (0..=columns).map(|n| { + idb_txn.object_store(store_name(n).as_str()) + .expect("Object stores were created in try_create_object_stores; qed") + }).collect::>(); + + for op in &txn.ops { + match op { + DBOp::Insert { col, key, value } => { + let column = column_to_number(*col) as usize; + + // Convert rust bytes to js arrays + let key_js = Uint8Array::from(key.as_ref()); + let val_js = Uint8Array::from(value.as_ref()); + + // Insert key/value pair into the object store + let res = object_stores[column].put_with_key(val_js.as_ref(), key_js.as_ref()); + if let Err(err) = res { + warn!("error inserting key/values into col_{}: {:?}", column, err); + } + }, + DBOp::Delete { col, key } => { + let column = column_to_number(*col) as usize; + + // Convert rust bytes to js arrays + let key_js = Uint8Array::from(key.as_ref()); + + // Delete key/value pair from the object store + let res = object_stores[column].delete(key_js.as_ref()); + if let Err(err) = res { + warn!("error deleting key from col_{}: {:?}", column, err); + } + }, + } + } + + let (tx, rx) = channel::oneshot::channel::<()>(); + + let on_complete = Closure::once(move || { + let _ = tx.send(()); + }); + idb_txn.set_oncomplete(Some(on_complete.as_ref().unchecked_ref())); + on_complete.forget(); + + let on_error = Closure::once(move || { + warn!("Failed to commit a transaction to IndexedDB"); + }); + idb_txn.set_onerror(Some(on_error.as_ref().unchecked_ref())); + on_error.forget(); + + rx.map(|_| ()) +} + + +/// Returns a cursor to a database column with the given column number. +pub fn idb_cursor(idb: &IdbDatabase, col: u32) -> impl Stream, Vec)> { + // TODO: we could read all the columns in one db transaction + let store_name = store_name(col); + let store_name = store_name.as_str(); + let txn = idb.transaction_with_str(store_name) + .expect("The stores were created on open: {}; qed"); + + let store = txn.object_store(store_name).expect("Opening a store shouldn't fail; qed"); + let cursor = store.open_cursor().expect("Opening a cursor shoudn't fail; qed"); + + let (tx, rx) = channel::mpsc::unbounded(); + + let on_cursor = Closure::wrap(Box::new(move |event: &Event| { + // Extract the cursor from the event + let target = event.target().expect("on_cursor should have a target; qed"); + let req = target.dyn_ref::().expect("target should be IdbRequest; qed"); + let result = req.result().expect("IdbRequest should have a result; qed"); + let cursor: &IdbCursorWithValue = result.unchecked_ref(); + + if let (Ok(key), Ok(value)) = (cursor.deref().key(), cursor.value()) { + let k: &ArrayBuffer = key.unchecked_ref(); + let v: &Uint8Array = value.unchecked_ref(); + + // Copy js arrays into rust `Vec`s + let mut kv = vec![0u8; k.byte_length() as usize]; + let mut vv = vec![0u8; v.byte_length() as usize]; + Uint8Array::new(k).copy_to(&mut kv[..]); + v.copy_to(&mut vv[..]); + + if let Err(e) = tx.unbounded_send((kv, vv)) { + warn!("on_cursor: error sending to a channel {:?}", e); + } + if let Err(e) = cursor.deref().continue_() { + warn!("cursor advancement has failed {:?}", e); + } + } else { + // we're done + tx.close_channel(); + } + }) as Box); + + cursor.set_onsuccess(Some(on_cursor.as_ref().unchecked_ref())); + on_cursor.forget(); + + rx +} diff --git a/kvdb-web/src/lib.rs b/kvdb-web/src/lib.rs new file mode 100644 index 000000000..f966f69ce --- /dev/null +++ b/kvdb-web/src/lib.rs @@ -0,0 +1,174 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! A key-value database for use in browsers +//! +//! Writes data both into memory and IndexedDB, reads the whole database in memory +//! from the IndexedDB on `open`. + +#![deny(missing_docs)] + +mod error; +mod indexed_db; + +use std::io; +use std::rc::Rc; +use std::sync::Mutex; +use kvdb::{DBValue, DBTransaction}; +use kvdb_memorydb::{InMemory, self as in_memory}; +use send_wrapper::SendWrapper; + +pub use error::Error; +pub use kvdb::KeyValueDB; + +use futures::prelude::*; + +use web_sys::IdbDatabase; + +/// Database backed by both IndexedDB and in memory implementation. +pub struct Database { + name: String, + version: u32, + columns: u32, + in_memory: InMemory, + indexed_db: Mutex>, +} + +// The default column is represented as `None`. +type Column = Option; + +fn number_to_column(col: u32) -> Column { + col.checked_sub(1) +} + + +impl Database { + /// Opens the database with the given name, + /// and the specified number of columns (not including the default one). + pub fn open(name: String, columns: u32) -> impl Future> { + // let's try to open the latest version of the db first + let open_request = indexed_db::open(name.as_str(), None, columns); + let name_clone = name.clone(); + open_request.then(move |db| { + let db = match db { + Ok(db) => db, + Err(err) => return future::Either::Right(future::err(err)), + }; + + // If we need more column than the latest version has, + // then bump the version (+ 1 for the default column). + // In order to bump the version, we close the database + // and reopen it with a higher version than it was opened with previously. + // cf. https://github.com/paritytech/parity-common/pull/202#discussion_r321221751 + if columns + 1 > db.columns { + let next_version = db.version + 1; + drop(db); + future::Either::Left(indexed_db::open(name.as_str(), Some(next_version), columns).boxed()) + } else { + future::Either::Left(future::ok(db).boxed()) + } + // populate the in_memory db from the IndexedDB + }).then(move |db| { + let db = match db { + Ok(db) => db, + Err(err) => return future::Either::Right(future::err(err)), + }; + + let indexed_db::IndexedDB { version, inner, .. } = db; + let rc = Rc::new(inner.take()); + let weak = Rc::downgrade(&rc); + // read the columns from the IndexedDB + future::Either::Left(stream::iter(0..=columns).map(move |n| { + let db = weak.upgrade().expect("rc should live at least as long; qed"); + indexed_db::idb_cursor(&db, n).fold(DBTransaction::new(), move |mut txn, (key, value)| { + let column = number_to_column(n); + txn.put_vec(column, key.as_ref(), value); + future::ready(txn) + }) + // write each column into memory + }).fold(in_memory::create(columns), |m, txn| { + txn.then(|txn| { + m.write_buffered(txn); + future::ready(m) + }) + }).then(move |in_memory| future::ok(Database { + name: name_clone, + version, + columns, + in_memory, + indexed_db: Mutex::new(SendWrapper::new( + Rc::try_unwrap(rc).expect("should have only 1 ref at this point; qed") + )), + }))) + }) + } + + /// Get the database name. + pub fn name(&self) -> &str { + self.name.as_str() + } + + /// Get the database version. + pub fn version(&self) -> u32 { + self.version + } +} + +impl Drop for Database { + fn drop(&mut self) { + if let Ok(db) = self.indexed_db.lock() { + db.close(); + } + } +} + +impl KeyValueDB for Database { + fn get(&self, col: Option, key: &[u8]) -> io::Result> { + self.in_memory.get(col, key) + } + + fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { + self.in_memory.get_by_prefix(col, prefix) + } + + fn write_buffered(&self, transaction: DBTransaction) { + if let Ok(guard) = self.indexed_db.lock() { + let _ = indexed_db::idb_commit_transaction(&*guard, &transaction, self.columns); + } + self.in_memory.write_buffered(transaction); + } + + fn flush(&self) -> io::Result<()> { + Ok(()) + } + + // NOTE: clones the whole db + fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + self.in_memory.iter(col) + } + + // NOTE: clones the whole db + fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) + -> Box, Box<[u8]>)> + 'a> + { + self.in_memory.iter_from_prefix(col, prefix) + } + + // NOTE: not supported + fn restore(&self, _new_db: &str) -> std::io::Result<()> { + Err(io::Error::new(io::ErrorKind::Other, "Not supported yet")) + } +} diff --git a/kvdb-web/tests/indexed_db.rs b/kvdb-web/tests/indexed_db.rs new file mode 100644 index 000000000..1286faf66 --- /dev/null +++ b/kvdb-web/tests/indexed_db.rs @@ -0,0 +1,66 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! IndexedDB tests. + +use futures::compat; +use futures::future::{self, FutureExt as _, TryFutureExt as _}; + +use kvdb_web::{Database, KeyValueDB as _}; + +use wasm_bindgen_test::*; +use wasm_bindgen::JsValue; + +wasm_bindgen_test_configure!(run_in_browser); + +#[wasm_bindgen_test(async)] +fn reopen_the_database_with_more_columns() -> impl futures01::Future { + let _ = console_log::init_with_level(log::Level::Trace); + + fn open_db(col: u32) -> impl future::Future { + Database::open("MyAsyncTest".into(), col) + .unwrap_or_else(|err| panic!("{}", err)) + } + + let fut = open_db(1).then(|db| { + // Write a value into the database + let mut batch = db.transaction(); + batch.put(None, b"hello", b"world"); + db.write_buffered(batch); + + assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); + + // Check the database version + assert_eq!(db.version(), 1); + + // Close the database + drop(db); + + // Reopen it again with 3 columns + open_db(3) + }).map(|db| { + // The value should still be present + assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); + assert!(db.get(None, b"trash").unwrap().is_none()); + + // The version should be bumped + assert_eq!(db.version(), 2); + + Ok(()) + }); + + compat::Compat::new(fut) +} From bf0ba8434f16a3700d0b445f48effa50c0e14b1a Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 25 Sep 2019 16:21:07 +0200 Subject: [PATCH 004/359] [kvdb-web] bump futures-preview to 0.3.0-alpha.18 (#225) --- kvdb-web/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 27ed6ecd0..0774f586d 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -13,7 +13,7 @@ wasm-bindgen = "0.2.49" js-sys = "0.3.26" kvdb = { version = "0.1", path = "../kvdb" } kvdb-memorydb = { version = "0.1", path = "../kvdb-memorydb" } -futures-preview = "0.3.0-alpha.17" +futures-preview = "0.3.0-alpha.18" log = "0.4.8" send_wrapper = "0.2.0" From 28781a098cac6f559212a8d7d5386b58990525e3 Mon Sep 17 00:00:00 2001 From: David Date: Thu, 26 Sep 2019 15:56:26 +0200 Subject: [PATCH 005/359] Bump parking_lot version (#227) * Bump parking_lot version Implies 0.2 of `kvdb-memorydb` and `kvdb-web`. [parity-ethereum PR]() * Update kvdb-memorydb/Cargo.toml Co-Authored-By: Andronik Ordian * Update kvdb-web/Cargo.toml Co-Authored-By: Andronik Ordian * Update kvdb-web/Cargo.toml --- kvdb-memorydb/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index d1dedb37b..22312e8c0 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.1.1" +version = "0.1.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -8,5 +8,5 @@ license = "GPL-3.0" edition = "2018" [dependencies] -parking_lot = "0.6" +parking_lot = "0.9" kvdb = { version = "0.1", path = "../kvdb" } From b0ec18dafed2d85a947d866a2ea0477ab2238a76 Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade Date: Tue, 1 Oct 2019 10:28:34 +0300 Subject: [PATCH 006/359] Fix typos (#229) --- kvdb-rocksdb/src/lib.rs | 2 +- kvdb-web/src/indexed_db.rs | 2 +- rlp/src/stream.rs | 2 +- transaction-pool/src/lib.rs | 2 +- transaction-pool/src/listener.rs | 2 +- transaction-pool/src/pool.rs | 4 ++-- uint/Cargo.toml | 2 +- uint/src/uint.rs | 6 +++--- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index d5ab23829..34388d47c 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -526,7 +526,7 @@ impl Database { pub fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { self.iter_from_prefix(col, prefix).and_then(|mut iter| { match iter.next() { - // TODO: use prefix_same_as_start read option (not availabele in C API currently) + // TODO: use prefix_same_as_start read option (not available in C API currently) Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None }, _ => None } diff --git a/kvdb-web/src/indexed_db.rs b/kvdb-web/src/indexed_db.rs index a75f940a6..09664a267 100644 --- a/kvdb-web/src/indexed_db.rs +++ b/kvdb-web/src/indexed_db.rs @@ -216,7 +216,7 @@ pub fn idb_cursor(idb: &IdbDatabase, col: u32) -> impl Stream, V .expect("The stores were created on open: {}; qed"); let store = txn.object_store(store_name).expect("Opening a store shouldn't fail; qed"); - let cursor = store.open_cursor().expect("Opening a cursor shoudn't fail; qed"); + let cursor = store.open_cursor().expect("Opening a cursor shouldn't fail; qed"); let (tx, rx) = channel::mpsc::unbounded(); diff --git a/rlp/src/stream.rs b/rlp/src/stream.rs index cfddc2d95..596470273 100644 --- a/rlp/src/stream.rs +++ b/rlp/src/stream.rs @@ -211,7 +211,7 @@ impl RlpStream { self } - /// Appends raw (pre-serialised) RLP data. Checks for size oveflow. + /// Appends raw (pre-serialised) RLP data. Checks for size overflow. pub fn append_raw_checked(&mut self, bytes: &[u8], item_count: usize, max_size: usize) -> bool { if self.estimate_size(bytes.len()) > max_size { return false; diff --git a/transaction-pool/src/lib.rs b/transaction-pool/src/lib.rs index 8a8e3ffb0..669441829 100644 --- a/transaction-pool/src/lib.rs +++ b/transaction-pool/src/lib.rs @@ -51,7 +51,7 @@ //! 1. The pool groups transactions from particular sender together //! and stores them ordered by `Scoring` within that group //! i.e. `HashMap>`. -//! 2. Additionaly we maintain the best and the worst transaction from each sender +//! 2. Additionally we maintain the best and the worst transaction from each sender //! (by `Scoring` not `priority`) ordered by `priority`. //! It means that we can easily identify the best transaction inside the entire pool //! and the worst transaction. diff --git a/transaction-pool/src/listener.rs b/transaction-pool/src/listener.rs index 75b59bda6..a599c8763 100644 --- a/transaction-pool/src/listener.rs +++ b/transaction-pool/src/listener.rs @@ -21,7 +21,7 @@ use crate::error::Error; /// /// Listener is being notified about status of every transaction in the pool. pub trait Listener { - /// The transaction has been successfuly added to the pool. + /// The transaction has been successfully added to the pool. /// If second argument is `Some` the transaction has took place of some other transaction /// which was already in pool. /// NOTE: You won't be notified about drop of `old` transaction separately. diff --git a/transaction-pool/src/pool.rs b/transaction-pool/src/pool.rs index 4e9bc35d2..efc12b23d 100644 --- a/transaction-pool/src/pool.rs +++ b/transaction-pool/src/pool.rs @@ -33,9 +33,9 @@ use crate::{ /// Internal representation of transaction. /// -/// Includes unique insertion id that can be used for scoring explictly, +/// Includes unique insertion id that can be used for scoring explicitly, /// but internally is used to resolve conflicts in case of equal scoring -/// (newer transactionsa are preferred). +/// (newer transactions are preferred). #[derive(Debug)] pub struct Transaction { /// Sequential id of the transaction diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 2a151f87e..e35d809b6 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -1,5 +1,5 @@ [package] -description = "Large fixed-size integers arithmetics" +description = "Large fixed-size integer arithmetic" homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT/Apache-2.0" diff --git a/uint/src/uint.rs b/uint/src/uint.rs index c748d5a3f..cc58ee220 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -424,7 +424,7 @@ macro_rules! construct_uint { #[derive(Copy, Clone, Eq, PartialEq, Hash)] $visibility struct $name (pub [u64; $n_words]); - /// Get a reference to the underlying little-endian words. + /// Get a reference to the underlying little-endian words. impl AsRef<[u64]> for $name { #[inline] fn as_ref(&self) -> &[u64] { @@ -818,7 +818,7 @@ macro_rules! construct_uint { self.div_mod_knuth(other, n, m) } - /// Fast exponentation by squaring + /// Fast exponentiation by squaring /// https://en.wikipedia.org/wiki/Exponentiation_by_squaring /// /// # Panics @@ -849,7 +849,7 @@ macro_rules! construct_uint { x * y } - /// Fast exponentation by squaring. Returns result and overflow flag. + /// Fast exponentiation by squaring. Returns result and overflow flag. pub fn overflowing_pow(self, expon: Self) -> (Self, bool) { if expon.is_zero() { return (Self::one(), false) } From f884d77f250c5072ebac2d3a079c80ab444ec84f Mon Sep 17 00:00:00 2001 From: David Date: Tue, 1 Oct 2019 10:01:55 +0200 Subject: [PATCH 007/359] Fix deprecation warning (#168) * Fix warning * Edition * Bump version * Switch home-dir resolver to the home crate * Update parity-path/Cargo.toml Co-Authored-By: Andronik Ordian --- parity-path/Cargo.toml | 4 +++- parity-path/src/lib.rs | 8 +++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/parity-path/Cargo.toml b/parity-path/Cargo.toml index e5f2d4f0e..b19b176c4 100644 --- a/parity-path/Cargo.toml +++ b/parity-path/Cargo.toml @@ -1,9 +1,11 @@ [package] name = "parity-path" -version = "0.1.1" +version = "0.1.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Path utilities" license = "GPL-3.0" +edition = "2018" [dependencies] +home = "0.5" diff --git a/parity-path/src/lib.rs b/parity-path/src/lib.rs index 38608db66..ffd4e9a0c 100644 --- a/parity-path/src/lib.rs +++ b/parity-path/src/lib.rs @@ -18,11 +18,13 @@ use std::path::Path; use std::path::PathBuf; +use home::home_dir; + #[cfg(target_os = "macos")] /// Get the config path for application `name`. /// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`. pub fn config_path(name: &str) -> PathBuf { - let mut home = ::std::env::home_dir().expect("Failed to get home dir"); + let mut home = home_dir().expect("Failed to get home dir"); home.push("Library"); home.push(name); home @@ -32,7 +34,7 @@ pub fn config_path(name: &str) -> PathBuf { /// Get the config path for application `name`. /// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`. pub fn config_path(name: &str) -> PathBuf { - let mut home = ::std::env::home_dir().expect("Failed to get home dir"); + let mut home = home_dir().expect("Failed to get home dir"); home.push("AppData"); home.push("Roaming"); home.push(name); @@ -43,7 +45,7 @@ pub fn config_path(name: &str) -> PathBuf { /// Get the config path for application `name`. /// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`. pub fn config_path(name: &str) -> PathBuf { - let mut home = ::std::env::home_dir().expect("Failed to get home dir"); + let mut home = home_dir().expect("Failed to get home dir"); home.push(format!(".{}", name.to_lowercase())); home } From 1e9f990eb57abd1633bb7898d16abc473061f968 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Tue, 1 Oct 2019 12:50:18 +0200 Subject: [PATCH 008/359] update rand to 0.7 (breaking change) (#217) * update rand to 0.7 (breaking change) * [kvdb-rocksdb]: revert version bump * [keccak-hash]: bump version * [contract-address]: bump keccak-hash * [contract-address]: bump version `0.3.0` * [rlp]: downgrade to 0.2 --- contract-address/Cargo.toml | 6 +++--- ethbloom/Cargo.toml | 4 ++-- ethereum-types/Cargo.toml | 8 ++++---- fixed-hash/Cargo.toml | 7 +++++-- fixed-hash/src/hash.rs | 26 +++++++++++++------------- fixed-hash/src/lib.rs | 3 +++ fixed-hash/src/tests.rs | 3 ++- keccak-hash/Cargo.toml | 4 ++-- kvdb-rocksdb/Cargo.toml | 2 +- primitive-types/Cargo.toml | 4 ++-- rlp/Cargo.toml | 2 +- transaction-pool/Cargo.toml | 2 +- triehash/Cargo.toml | 2 +- 13 files changed, 40 insertions(+), 33 deletions(-) diff --git a/contract-address/Cargo.toml b/contract-address/Cargo.toml index 9ba26c51d..83cd3ed13 100644 --- a/contract-address/Cargo.toml +++ b/contract-address/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "contract-address" -version = "0.2.0" +version = "0.3.0" authors = ["Parity Technologies "] license = "MIT" homepage = "https://github.com/paritytech/parity-common" @@ -11,9 +11,9 @@ edition = "2018" readme = "README.md" [dependencies] -ethereum-types = { version = "0.7", path = "../ethereum-types" } +ethereum-types = { version = "0.8", path = "../ethereum-types" } rlp = { version = "0.4", path = "../rlp" } -keccak-hash = { version = "0.3", path = "../keccak-hash", default-features = false } +keccak-hash = { version = "0.4", path = "../keccak-hash", default-features = false } [features] default = [] diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 5e3347991..829e2ac73 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.7.0" +version = "0.8.0" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT" @@ -12,7 +12,7 @@ edition = "2018" [dependencies] tiny-keccak = "1.5" crunchy = { version = "0.2", default-features = false, features = ["limit_256"] } -fixed-hash = { path = "../fixed-hash", version = "0.4", default-features = false } +fixed-hash = { path = "../fixed-hash", version = "0.5", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.2", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 97dbc2e3b..ae5550d23 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.7.0" +version = "0.8.0" authors = ["Parity Technologies "] license = "MIT" homepage = "https://github.com/paritytech/parity-common" @@ -8,10 +8,10 @@ description = "Ethereum types" edition = "2018" [dependencies] -ethbloom = { path = "../ethbloom", version = "0.7", default-features = false } -fixed-hash = { path = "../fixed-hash", version = "0.4", default-features = false, features = ["byteorder", "rustc-hex"] } +ethbloom = { path = "../ethbloom", version = "0.8", default-features = false } +fixed-hash = { path = "../fixed-hash", version = "0.5", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.8", default-features = false } -primitive-types = { path = "../primitive-types", version = "0.5", features = ["rlp", "byteorder", "rustc-hex"], default-features = false } +primitive-types = { path = "../primitive-types", version = "0.6", features = ["rlp", "byteorder", "rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.2", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 9947ceaa2..1facbf088 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fixed-hash" -version = "0.4.0" +version = "0.5.0" authors = ["Parity Technologies "] license = "MIT" homepage = "https://github.com/paritytech/parity-common" @@ -13,12 +13,15 @@ readme = "README.md" features = ["quickcheck", "api-dummy"] [dependencies] -rand = { version = "0.5", optional = true, default-features = false } +rand = { version = "0.7", optional = true, default-features = false } rustc-hex = { version = "2.0", optional = true, default-features = false } quickcheck = { version = "0.7", optional = true } byteorder = { version = "1.2", optional = true, default-features = false } static_assertions = "0.2" +[dev-dependencies] +rand_xorshift = "0.2.0" + [target.'cfg(not(target_os = "unknown"))'.dependencies] libc = { version = "0.2", optional = true, default-features = false } diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 95c762848..f8ffcb426 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -67,7 +67,7 @@ macro_rules! construct_fixed_hash { impl<'a> From<&'a [u8; $n_bytes]> for $name { /// Constructs a hash type from the given reference - /// to the bytes array of fixed length. + /// to the bytes array of fixed length. /// /// # Note /// @@ -80,7 +80,7 @@ macro_rules! construct_fixed_hash { impl<'a> From<&'a mut [u8; $n_bytes]> for $name { /// Constructs a hash type from the given reference - /// to the mutable bytes array of fixed length. + /// to the mutable bytes array of fixed length. /// /// # Note /// @@ -328,7 +328,7 @@ macro_rules! construct_fixed_hash { } // Implementation for disabled byteorder crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -342,7 +342,7 @@ macro_rules! impl_byteorder_for_fixed_hash { } // Implementation for enabled byteorder crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -461,7 +461,7 @@ macro_rules! impl_byteorder_for_fixed_hash { } // Implementation for disabled rand crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -475,7 +475,7 @@ macro_rules! impl_rand_for_fixed_hash { } // Implementation for enabled rand crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -512,7 +512,7 @@ macro_rules! impl_rand_for_fixed_hash { /// Assign `self` to a cryptographically random value. pub fn randomize(&mut self) { - let mut rng = $crate::rand::rngs::EntropyRng::new(); + let mut rng = $crate::rand::rngs::OsRng; self.randomize_using(&mut rng); } @@ -538,7 +538,7 @@ macro_rules! impl_rand_for_fixed_hash { } // Implementation for disabled libc crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -566,7 +566,7 @@ macro_rules! impl_libc_for_fixed_hash { } // Implementation for enabled libc crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -612,7 +612,7 @@ macro_rules! impl_libc_for_fixed_hash { } // Implementation for disabled rustc-hex crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -626,7 +626,7 @@ macro_rules! impl_rustc_hex_for_fixed_hash { } // Implementation for enabled rustc-hex crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -667,7 +667,7 @@ macro_rules! impl_rustc_hex_for_fixed_hash { } // Implementation for disabled quickcheck crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks @@ -681,7 +681,7 @@ macro_rules! impl_quickcheck_for_fixed_hash { } // Implementation for enabled quickcheck crate support. -// +// // # Note // // Feature guarded macro definitions instead of feature guarded impl blocks diff --git a/fixed-hash/src/lib.rs b/fixed-hash/src/lib.rs index 061c33854..f215a771c 100644 --- a/fixed-hash/src/lib.rs +++ b/fixed-hash/src/lib.rs @@ -56,6 +56,9 @@ pub extern crate rand; #[doc(hidden)] pub extern crate quickcheck; +#[cfg(test)] +extern crate rand_xorshift; + #[macro_use] mod hash; diff --git a/fixed-hash/src/tests.rs b/fixed-hash/src/tests.rs index 66869d002..45e22927a 100644 --- a/fixed-hash/src/tests.rs +++ b/fixed-hash/src/tests.rs @@ -250,7 +250,8 @@ mod from_low_u64 { #[cfg(feature = "rand")] mod rand { use super::*; - use rand::{SeedableRng, XorShiftRng}; + use rand::SeedableRng; + use rand_xorshift::XorShiftRng; #[test] fn random() { diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 6a3229fb2..365ea5805 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "keccak-hash" -version = "0.3.0" +version = "0.4.0" description = "`keccak-hash` is a set of utility functions to facilitate working with Keccak hashes (256/512 bits long)." authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" @@ -10,7 +10,7 @@ edition = "2018" [dependencies] tiny-keccak = "1.4" -primitive-types = { path = "../primitive-types", version = "0.5", default-features = false } +primitive-types = { path = "../primitive-types", version = "0.6", default-features = false } [dev-dependencies] tempdir = "0.3" diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index e18128662..76b2ed9ea 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -20,4 +20,4 @@ parity-rocksdb = "0.5" [dev-dependencies] tempdir = "0.3" -ethereum-types = { version = "0.7", path = "../ethereum-types" } +ethereum-types = { version = "0.8", path = "../ethereum-types" } diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 178826eef..a73b00644 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "primitive-types" -version = "0.5.1" +version = "0.6.0" authors = ["Parity Technologies "] license = "Apache-2.0/MIT" homepage = "https://github.com/paritytech/parity-common" description = "Primitive types shared by Ethereum and Substrate" [dependencies] -fixed-hash = { version = "0.4", path = "../fixed-hash", default-features = false } +fixed-hash = { version = "0.5", path = "../fixed-hash", default-features = false } uint = { version = "0.8", path = "../uint", default-features = false } impl-serde = { version = "0.2.1", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 0ad6d40ea..83900e213 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -13,7 +13,7 @@ rustc-hex = { version = "2.0", default-features = false } [dev-dependencies] criterion = "0.3" hex-literal = "0.2" -primitive-types = { path = "../primitive-types", version = "0.5", features = ["impl-rlp"] } +primitive-types = { path = "../primitive-types", version = "0.6", features = ["impl-rlp"] } [features] default = ["std"] diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index 0a2a213fb..fb442756c 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -13,4 +13,4 @@ smallvec = "0.6" trace-time = { path = "../trace-time", version = "0.1" } [dev-dependencies] -ethereum-types = { version = "0.7", path = "../ethereum-types" } +ethereum-types = { version = "0.8", path = "../ethereum-types" } diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index 453815c6c..ff2133d13 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -14,10 +14,10 @@ rlp = { version = "0.4", path = "../rlp" } [dev-dependencies] criterion = "0.3" keccak-hasher = "0.15" +ethereum-types = { version = "0.8", path = "../ethereum-types" } tiny-keccak = "1.5" trie-standardmap = "0.15" hex-literal = "0.2" -ethereum-types = { version = "0.7", path = "../ethereum-types" } [[bench]] name = "triehash" From ec31cb06329eaa7d969cf1ef6df4b142a93ddb7a Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade Date: Tue, 1 Oct 2019 14:37:52 +0300 Subject: [PATCH 009/359] Rename complete_unbounded_list to finalize_unbounded_list (#228) * Rename complete_unbounded_list as finalize_unbounded_list * Use finalize_unbounded_list instead of complete_unbounded_list * Keep documentation and use the deprecation attribute Co-Authored-By: Andronik Ordian * "note" not "reason" Co-Authored-By: Andronik Ordian --- rlp/src/stream.rs | 9 ++++++++- rlp/tests/tests.rs | 6 +++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/rlp/src/stream.rs b/rlp/src/stream.rs index 596470273..1a4fb0d66 100644 --- a/rlp/src/stream.rs +++ b/rlp/src/stream.rs @@ -334,7 +334,7 @@ impl RlpStream { } /// Finalize current unbounded list. Panics if no unbounded list has been opened. - pub fn complete_unbounded_list(&mut self) { + pub fn finalize_unbounded_list(&mut self) { let list = self.unfinished_lists.pop().expect("No open list."); if list.max.is_some() { panic!("List type mismatch."); @@ -344,6 +344,13 @@ impl RlpStream { self.note_appended(1); self.finished_list = true; } + + /// Finalize current unbounded list. Panics if no unbounded list has been opened. + #[deprecated(since = "0.4.3", note = "use finalize_unbounded_list instead")] + pub fn complete_unbounded_list(&mut self) { + self.finalize_unbounded_list(); + } + } pub struct BasicEncoder<'a> { diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index ba5a423b7..07dd3fcc5 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -434,7 +434,7 @@ fn test_rlp_stream_unbounded_list() { stream.append(&40u32); stream.append(&41u32); assert!(!stream.is_finished()); - stream.complete_unbounded_list(); + stream.finalize_unbounded_list(); assert!(stream.is_finished()); } @@ -504,7 +504,7 @@ fn test_nested_list_roundtrip() { s.begin_unbounded_list() .append(&self.0) .append(&self.1) - .complete_unbounded_list(); + .finalize_unbounded_list(); } } @@ -521,7 +521,7 @@ fn test_nested_list_roundtrip() { fn rlp_append(&self, s: &mut RlpStream) { s.begin_unbounded_list() .append_list(&self.0) - .complete_unbounded_list(); + .finalize_unbounded_list(); } } From a185a390583ce9074fb142cb0cea12b573c6bfc7 Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade Date: Tue, 1 Oct 2019 14:55:10 +0300 Subject: [PATCH 010/359] Use default Xenial dist (#230) --- .travis.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index cd1563c4b..1a86532ea 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,17 +5,10 @@ branches: matrix: include: - os: linux -# without this line -# travis downgrades the image to trusty somehow -# which doesn't have C11 support -# see https://travis-ci.org/paritytech/parity-common/jobs/557850274 - dist: xenial rust: stable - os: linux - dist: xenial rust: beta - os: linux - dist: xenial rust: nightly - os: osx osx_image: xcode11 From b2781fa0b877d9d219cb3333d88d1344678da0ba Mon Sep 17 00:00:00 2001 From: Saurav Sharma Date: Tue, 1 Oct 2019 17:54:19 +0545 Subject: [PATCH 011/359] [trace-time] migrate code to 2018 edition (#232) * Update to 2018 edition for trace-time Signed-off-by: Saurav Sharma * Update drop function to use as_millis method Signed-off-by: Saurav Sharma --- trace-time/Cargo.toml | 1 + trace-time/src/lib.rs | 9 +++------ 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/trace-time/Cargo.toml b/trace-time/Cargo.toml index 383d5d3cc..5dad32a52 100644 --- a/trace-time/Cargo.toml +++ b/trace-time/Cargo.toml @@ -5,6 +5,7 @@ version = "0.1.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" license = "GPL-3.0" +edition = "2018" [dependencies] log = "0.4" diff --git a/trace-time/src/lib.rs b/trace-time/src/lib.rs index 4c3b0b274..7a832656e 100644 --- a/trace-time/src/lib.rs +++ b/trace-time/src/lib.rs @@ -16,16 +16,14 @@ //! Performance timer with logging -#[macro_use] -extern crate log; - +use log::trace; use std::time::Instant; #[macro_export] macro_rules! trace_time { ($name: expr) => { let _timer = $crate::PerfTimer::new($name); - } + }; } /// Performance timer with logging. Starts measuring time in the constructor, prints @@ -48,8 +46,7 @@ impl PerfTimer { impl Drop for PerfTimer { fn drop(&mut self) { let elapsed = self.start.elapsed(); - let ms = elapsed.subsec_nanos() as f32 / 1_000_000.0 + - elapsed.as_secs() as f32 * 1_000.0; + let ms = elapsed.as_millis(); trace!(target: "perf", "{}: {:.2}ms", self.name, ms); } } From c2f593c5356e42718e4ea2c65179ba5d7f62d632 Mon Sep 17 00:00:00 2001 From: Saurav Sharma Date: Tue, 1 Oct 2019 22:33:59 +0545 Subject: [PATCH 012/359] [parity-util-mem] migrate code to 2018 edition (#231) * Update to 2018 edition for parity-util-mem Signed-off-by: Saurav Sharma * update missing use statement Signed-off-by: Saurav Sharma * add no_std to ci and use a rstd alias. * Remove 2 files from parity-util-mem Signed-off-by: Saurav Sharma --- .travis.yml | 1 + parity-util-mem/Cargo.toml | 1 + parity-util-mem/get_malloc_size_src.sh | 12 - parity-util-mem/slim_malloc_size_of.patch | 746 ---------------------- parity-util-mem/src/allocators.rs | 11 +- parity-util-mem/src/impls.rs | 10 +- parity-util-mem/src/lib.rs | 18 +- parity-util-mem/src/malloc_size.rs | 39 +- 8 files changed, 34 insertions(+), 804 deletions(-) delete mode 100755 parity-util-mem/get_malloc_size_src.sh delete mode 100644 parity-util-mem/slim_malloc_size_of.patch diff --git a/.travis.yml b/.travis.yml index 1a86532ea..5bedd7f7d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,6 +33,7 @@ script: - cd parity-util-mem/ && cargo test --features=estimate-heapsize && cd .. - cd parity-util-mem/ && cargo test --features=jemalloc-global && cd .. - cd parity-util-mem/ && cargo test --features=mimalloc-global && cd .. + - cd parity-util-mem/ && cargo test --no-default-features --features=dlmalloc-global && cd .. - cd rlp/ && cargo test --no-default-features && cargo check --benches && cd .. - cd triehash/ && cargo check --benches && cd .. - if [ "$TRAVIS_OS_NAME" == "linux" ]; then diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 8462e65e2..a73d23c0a 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" license = "GPL-3.0" +edition = "2018" [dependencies] cfg-if = "0.1.6" diff --git a/parity-util-mem/get_malloc_size_src.sh b/parity-util-mem/get_malloc_size_src.sh deleted file mode 100755 index 6eb52131f..000000000 --- a/parity-util-mem/get_malloc_size_src.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# script/process to update code from servo project (malloc_size_of) -# untested, note that we do not use submodule due to size of git repo -git clone https://github.com/servo/servo.git -cd servo -git checkout 5bdea7dc1c80790a852a3fb03edfb2b8fbd403dc -git apply ../slim_malloc_size_of.patch -#git merge master -#cp components/malloc_size_of/lib.rs ../src/malloc_size.rs -#cd .. -#rm -rf ./servo diff --git a/parity-util-mem/slim_malloc_size_of.patch b/parity-util-mem/slim_malloc_size_of.patch deleted file mode 100644 index 7c31cf606..000000000 --- a/parity-util-mem/slim_malloc_size_of.patch +++ /dev/null @@ -1,746 +0,0 @@ -diff --git a/components/malloc_size_of/lib.rs b/components/malloc_size_of/lib.rs -index 778082b5f0..e13745d6af 100644 ---- a/components/malloc_size_of/lib.rs -+++ b/components/malloc_size_of/lib.rs -@@ -43,55 +43,39 @@ - //! measured as well as the thing it points to. E.g. - //! ` as MallocSizeOf>::size_of(field, ops)`. - --extern crate app_units; --#[cfg(feature = "servo")] --extern crate crossbeam_channel; --extern crate cssparser; --extern crate euclid; --extern crate hashglobe; --#[cfg(feature = "servo")] --extern crate hyper; --#[cfg(feature = "servo")] --extern crate hyper_serde; --#[cfg(feature = "servo")] --extern crate keyboard_types; --#[cfg(feature = "servo")] --extern crate mozjs as js; --extern crate selectors; --#[cfg(feature = "servo")] --extern crate serde; --#[cfg(feature = "servo")] --extern crate serde_bytes; --extern crate servo_arc; --extern crate smallbitvec; --extern crate smallvec; --#[cfg(feature = "servo")] --extern crate string_cache; --extern crate thin_slice; --#[cfg(feature = "servo")] --extern crate time; --#[cfg(feature = "url")] --extern crate url; --extern crate void; --#[cfg(feature = "webrender_api")] --extern crate webrender_api; --#[cfg(feature = "servo")] --extern crate xml5ever; -- --#[cfg(feature = "servo")] --use serde_bytes::ByteBuf; -+ -+// This file is patched at commit 5bdea7dc1c80790a852a3fb03edfb2b8fbd403dc DO NOT EDIT. -+ -+ -+#[cfg(not(feature = "std"))] -+use alloc::vec::Vec; -+#[cfg(not(feature = "std"))] -+use alloc::string::String; -+#[cfg(not(feature = "std"))] -+mod std { -+ pub use core::*; -+ pub use alloc::collections; -+} -+ -+#[cfg(feature = "std")] -+use std::sync::Arc; -+ - use std::hash::{BuildHasher, Hash}; - use std::mem::size_of; - use std::ops::Range; - use std::ops::{Deref, DerefMut}; -+#[cfg(feature = "std")] - use std::os::raw::c_void; --use void::Void; -+#[cfg(not(feature = "std"))] -+use core::ffi::c_void; -+#[cfg(not(feature = "std"))] -+pub use alloc::boxed::Box; - - /// A C function that takes a pointer to a heap allocation and returns its size. --type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize; -+pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize; - - /// A closure implementing a stateful predicate on pointers. --type VoidPtrToBoolFnMut = FnMut(*const c_void) -> bool; -+pub type VoidPtrToBoolFnMut = FnMut(*const c_void) -> bool; - - /// Operations used when measuring heap usage of data structures. - pub struct MallocSizeOfOps { -@@ -216,44 +200,62 @@ pub trait MallocConditionalShallowSizeOf { - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; - } - --impl MallocSizeOf for String { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- unsafe { ops.malloc_size_of(self.as_ptr()) } -+#[cfg(not(any( -+ all( -+ target_os = "macos", -+ not(feature = "jemalloc-global"), -+ ), -+ feature = "estimate-heapsize" -+)))] -+pub mod inner_allocator_use { -+ -+use super::*; -+ -+impl MallocShallowSizeOf for Box { -+ fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -+ unsafe { ops.malloc_size_of(&**self) } - } - } - --impl<'a, T: ?Sized> MallocSizeOf for &'a T { -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- // Zero makes sense for a non-owning reference. -- 0 -+impl MallocShallowSizeOf for Vec { -+ fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -+ unsafe { ops.malloc_size_of(self.as_ptr()) } - } - } - --impl MallocShallowSizeOf for Box { -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- unsafe { ops.malloc_size_of(&**self) } -+// currently this seems only fine with jemalloc -+#[cfg(feature = "std")] -+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "android", feature = "jemalloc-global"))] -+impl MallocUnconditionalShallowSizeOf for Arc { -+ fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -+ unsafe { ops.malloc_size_of(arc_ptr(self)) } - } - } - --impl MallocSizeOf for Box { -+#[cfg(feature = "std")] -+#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android", feature = "jemalloc-global")))] -+impl MallocUnconditionalShallowSizeOf for Arc { -+ fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -+ size_of::() -+ } -+} -+ -+} -+ -+impl MallocSizeOf for String { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.shallow_size_of(ops) + (**self).size_of(ops) -+ unsafe { ops.malloc_size_of(self.as_ptr()) } - } - } - --impl MallocShallowSizeOf for thin_slice::ThinBoxedSlice { -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = 0; -- unsafe { -- n += thin_slice::ThinBoxedSlice::spilled_storage(self) -- .map_or(0, |ptr| ops.malloc_size_of(ptr)); -- n += ops.malloc_size_of(&**self); -- } -- n -+impl<'a, T: ?Sized> MallocSizeOf for &'a T { -+ fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -+ // Zero makes sense for a non-owning reference. -+ 0 - } - } - --impl MallocSizeOf for thin_slice::ThinBoxedSlice { -+impl MallocSizeOf for Box { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.shallow_size_of(ops) + (**self).size_of(ops) - } -@@ -329,6 +331,7 @@ impl MallocSizeOf for std::cell::RefCell { - } - } - -+#[cfg(feature = "std")] - impl<'a, B: ?Sized + ToOwned> MallocSizeOf for std::borrow::Cow<'a, B> - where - B::Owned: MallocSizeOf, -@@ -351,30 +354,6 @@ impl MallocSizeOf for [T] { - } - } - --#[cfg(feature = "servo")] --impl MallocShallowSizeOf for ByteBuf { -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- unsafe { ops.malloc_size_of(self.as_ptr()) } -- } --} -- --#[cfg(feature = "servo")] --impl MallocSizeOf for ByteBuf { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = self.shallow_size_of(ops); -- for elem in self.iter() { -- n += elem.size_of(ops); -- } -- n -- } --} -- --impl MallocShallowSizeOf for Vec { -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- unsafe { ops.malloc_size_of(self.as_ptr()) } -- } --} -- - impl MallocSizeOf for Vec { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); -@@ -412,30 +391,7 @@ impl MallocSizeOf for std::collections::VecDeque { - } - } - --impl MallocShallowSizeOf for smallvec::SmallVec { -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- if self.spilled() { -- unsafe { ops.malloc_size_of(self.as_ptr()) } -- } else { -- 0 -- } -- } --} -- --impl MallocSizeOf for smallvec::SmallVec --where -- A: smallvec::Array, -- A::Item: MallocSizeOf, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = self.shallow_size_of(ops); -- for elem in self.iter() { -- n += elem.size_of(ops); -- } -- n -- } --} -- -+#[cfg(feature = "std")] - impl MallocShallowSizeOf for std::collections::HashSet - where - T: Eq + Hash, -@@ -457,6 +413,7 @@ where - } - } - -+#[cfg(feature = "std")] - impl MallocSizeOf for std::collections::HashSet - where - T: Eq + Hash + MallocSizeOf, -@@ -471,59 +428,7 @@ where - } - } - --impl MallocShallowSizeOf for hashglobe::hash_set::HashSet --where -- T: Eq + Hash, -- S: BuildHasher, --{ -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- // See the implementation for std::collections::HashSet for details. -- if ops.has_malloc_enclosing_size_of() { -- self.iter() -- .next() -- .map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) }) -- } else { -- self.capacity() * (size_of::() + size_of::()) -- } -- } --} -- --impl MallocSizeOf for hashglobe::hash_set::HashSet --where -- T: Eq + Hash + MallocSizeOf, -- S: BuildHasher, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = self.shallow_size_of(ops); -- for t in self.iter() { -- n += t.size_of(ops); -- } -- n -- } --} -- --impl MallocShallowSizeOf for hashglobe::fake::HashSet --where -- T: Eq + Hash, -- S: BuildHasher, --{ -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- use std::ops::Deref; -- self.deref().shallow_size_of(ops) -- } --} -- --impl MallocSizeOf for hashglobe::fake::HashSet --where -- T: Eq + Hash + MallocSizeOf, -- S: BuildHasher, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- use std::ops::Deref; -- self.deref().size_of(ops) -- } --} -- -+#[cfg(feature = "std")] - impl MallocShallowSizeOf for std::collections::HashMap - where - K: Eq + Hash, -@@ -541,6 +446,7 @@ where - } - } - -+#[cfg(feature = "std")] - impl MallocSizeOf for std::collections::HashMap - where - K: Eq + Hash + MallocSizeOf, -@@ -587,62 +493,6 @@ where - } - } - --impl MallocShallowSizeOf for hashglobe::hash_map::HashMap --where -- K: Eq + Hash, -- S: BuildHasher, --{ -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- // See the implementation for std::collections::HashSet for details. -- if ops.has_malloc_enclosing_size_of() { -- self.values() -- .next() -- .map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) -- } else { -- self.capacity() * (size_of::() + size_of::() + size_of::()) -- } -- } --} -- --impl MallocSizeOf for hashglobe::hash_map::HashMap --where -- K: Eq + Hash + MallocSizeOf, -- V: MallocSizeOf, -- S: BuildHasher, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = self.shallow_size_of(ops); -- for (k, v) in self.iter() { -- n += k.size_of(ops); -- n += v.size_of(ops); -- } -- n -- } --} -- --impl MallocShallowSizeOf for hashglobe::fake::HashMap --where -- K: Eq + Hash, -- S: BuildHasher, --{ -- fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- use std::ops::Deref; -- self.deref().shallow_size_of(ops) -- } --} -- --impl MallocSizeOf for hashglobe::fake::HashMap --where -- K: Eq + Hash + MallocSizeOf, -- V: MallocSizeOf, -- S: BuildHasher, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- use std::ops::Deref; -- self.deref().size_of(ops) -- } --} -- - // PhantomData is always 0. - impl MallocSizeOf for std::marker::PhantomData { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -@@ -657,21 +507,22 @@ impl MallocSizeOf for std::marker::PhantomData { - //impl !MallocSizeOf for Arc { } - //impl !MallocShallowSizeOf for Arc { } - --impl MallocUnconditionalShallowSizeOf for servo_arc::Arc { -- fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- unsafe { ops.malloc_size_of(self.heap_ptr()) } -- } -+#[cfg(feature = "std")] -+fn arc_ptr(s: &Arc) -> * const T { -+ &(**s) as *const T - } - --impl MallocUnconditionalSizeOf for servo_arc::Arc { -+#[cfg(feature = "std")] -+impl MallocUnconditionalSizeOf for Arc { - fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.unconditional_shallow_size_of(ops) + (**self).size_of(ops) - } - } - --impl MallocConditionalShallowSizeOf for servo_arc::Arc { -+#[cfg(feature = "std")] -+impl MallocConditionalShallowSizeOf for Arc { - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- if ops.have_seen_ptr(self.heap_ptr()) { -+ if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_shallow_size_of(ops) -@@ -679,9 +530,10 @@ impl MallocConditionalShallowSizeOf for servo_arc::Arc { - } - } - --impl MallocConditionalSizeOf for servo_arc::Arc { -+#[cfg(feature = "std")] -+impl MallocConditionalSizeOf for Arc { - fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- if ops.have_seen_ptr(self.heap_ptr()) { -+ if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_size_of(ops) -@@ -695,203 +547,13 @@ impl MallocConditionalSizeOf for servo_arc::Arc { - /// If a mutex is stored inside of an Arc value as a member of a data type that is being measured, - /// the Arc will not be automatically measured so there is no risk of overcounting the mutex's - /// contents. -+#[cfg(feature = "std")] - impl MallocSizeOf for std::sync::Mutex { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - (*self.lock().unwrap()).size_of(ops) - } - } - --impl MallocSizeOf for smallbitvec::SmallBitVec { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- if let Some(ptr) = self.heap_ptr() { -- unsafe { ops.malloc_size_of(ptr) } -- } else { -- 0 -- } -- } --} -- --impl MallocSizeOf for euclid::Length { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.0.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedScale { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.0.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedPoint2D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.x.size_of(ops) + self.y.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedRect { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.origin.size_of(ops) + self.size.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedSideOffsets2D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.top.size_of(ops) + -- self.right.size_of(ops) + -- self.bottom.size_of(ops) + -- self.left.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedSize2D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.width.size_of(ops) + self.height.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedTransform2D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.m11.size_of(ops) + -- self.m12.size_of(ops) + -- self.m21.size_of(ops) + -- self.m22.size_of(ops) + -- self.m31.size_of(ops) + -- self.m32.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedTransform3D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.m11.size_of(ops) + -- self.m12.size_of(ops) + -- self.m13.size_of(ops) + -- self.m14.size_of(ops) + -- self.m21.size_of(ops) + -- self.m22.size_of(ops) + -- self.m23.size_of(ops) + -- self.m24.size_of(ops) + -- self.m31.size_of(ops) + -- self.m32.size_of(ops) + -- self.m33.size_of(ops) + -- self.m34.size_of(ops) + -- self.m41.size_of(ops) + -- self.m42.size_of(ops) + -- self.m43.size_of(ops) + -- self.m44.size_of(ops) -- } --} -- --impl MallocSizeOf for euclid::TypedVector2D { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.x.size_of(ops) + self.y.size_of(ops) -- } --} -- --impl MallocSizeOf for selectors::parser::AncestorHashes { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let selectors::parser::AncestorHashes { ref packed_hashes } = *self; -- packed_hashes.size_of(ops) -- } --} -- --impl MallocSizeOf for selectors::parser::Selector --where -- Impl::NonTSPseudoClass: MallocSizeOf, -- Impl::PseudoElement: MallocSizeOf, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- let mut n = 0; -- -- // It's OK to measure this ThinArc directly because it's the -- // "primary" reference. (The secondary references are on the -- // Stylist.) -- n += unsafe { ops.malloc_size_of(self.thin_arc_heap_ptr()) }; -- for component in self.iter_raw_match_order() { -- n += component.size_of(ops); -- } -- -- n -- } --} -- --impl MallocSizeOf for selectors::parser::Component --where -- Impl::NonTSPseudoClass: MallocSizeOf, -- Impl::PseudoElement: MallocSizeOf, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- use selectors::parser::Component; -- -- match self { -- Component::AttributeOther(ref attr_selector) => attr_selector.size_of(ops), -- Component::Negation(ref components) => components.size_of(ops), -- Component::NonTSPseudoClass(ref pseudo) => (*pseudo).size_of(ops), -- Component::Slotted(ref selector) | Component::Host(Some(ref selector)) => { -- selector.size_of(ops) -- }, -- Component::PseudoElement(ref pseudo) => (*pseudo).size_of(ops), -- Component::Combinator(..) | -- Component::ExplicitAnyNamespace | -- Component::ExplicitNoNamespace | -- Component::DefaultNamespace(..) | -- Component::Namespace(..) | -- Component::ExplicitUniversalType | -- Component::LocalName(..) | -- Component::ID(..) | -- Component::Class(..) | -- Component::AttributeInNoNamespaceExists { .. } | -- Component::AttributeInNoNamespace { .. } | -- Component::FirstChild | -- Component::LastChild | -- Component::OnlyChild | -- Component::Root | -- Component::Empty | -- Component::Scope | -- Component::NthChild(..) | -- Component::NthLastChild(..) | -- Component::NthOfType(..) | -- Component::NthLastOfType(..) | -- Component::FirstOfType | -- Component::LastOfType | -- Component::OnlyOfType | -- Component::Host(None) => 0, -- } -- } --} -- --impl MallocSizeOf -- for selectors::attr::AttrSelectorWithOptionalNamespace --{ -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- 0 -- } --} -- --impl MallocSizeOf for Void { -- #[inline] -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- void::unreachable(*self) -- } --} -- --#[cfg(feature = "servo")] --impl MallocSizeOf for string_cache::Atom { -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- 0 -- } --} -- --// This is measured properly by the heap measurement implemented in --// SpiderMonkey. --#[cfg(feature = "servo")] --impl MallocSizeOf for js::jsapi::Heap { -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- 0 -- } --} -- --/// For use on types where size_of() returns 0. - #[macro_export] - macro_rules! malloc_size_of_is_0( - ($($ty:ty),+) => ( -@@ -929,117 +591,6 @@ malloc_size_of_is_0!(Range, Range, Range, Range, Range - malloc_size_of_is_0!(Range, Range, Range, Range, Range); - malloc_size_of_is_0!(Range, Range); - --malloc_size_of_is_0!(app_units::Au); -- --malloc_size_of_is_0!(cssparser::RGBA, cssparser::TokenSerializationType); -- --#[cfg(feature = "url")] --impl MallocSizeOf for url::Host { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- match *self { -- url::Host::Domain(ref s) => s.size_of(ops), -- _ => 0, -- } -- } --} --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::BorderRadius); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::BorderStyle); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::BoxShadowClipMode); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ClipAndScrollInfo); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ColorF); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ComplexClipRegion); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ExtendMode); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::FilterOp); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ExternalScrollId); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::FontInstanceKey); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::GradientStop); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::GlyphInstance); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::NinePatchBorder); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ImageKey); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ImageRendering); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::LineStyle); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::MixBlendMode); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::NormalBorder); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::RepeatMode); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::ScrollSensitivity); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::StickyOffsetBounds); --#[cfg(feature = "webrender_api")] --malloc_size_of_is_0!(webrender_api::TransformStyle); -- --#[cfg(feature = "servo")] --impl MallocSizeOf for keyboard_types::Key { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- match self { -- keyboard_types::Key::Character(ref s) => s.size_of(ops), -- _ => 0, -- } -- } --} -- --#[cfg(feature = "servo")] --malloc_size_of_is_0!(keyboard_types::Modifiers); -- --#[cfg(feature = "servo")] --impl MallocSizeOf for xml5ever::QualName { -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.prefix.size_of(ops) + self.ns.size_of(ops) + self.local.size_of(ops) -- } --} -- --#[cfg(feature = "servo")] --malloc_size_of_is_0!(time::Duration); --#[cfg(feature = "servo")] --malloc_size_of_is_0!(time::Tm); -- --#[cfg(feature = "servo")] --impl MallocSizeOf for hyper_serde::Serde --where -- for<'de> hyper_serde::De: serde::Deserialize<'de>, -- for<'a> hyper_serde::Ser<'a, T>: serde::Serialize, -- T: MallocSizeOf, --{ -- fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { -- self.0.size_of(ops) -- } --} -- --// Placeholder for unique case where internals of Sender cannot be measured. --// malloc size of is 0 macro complains about type supplied! --#[cfg(feature = "servo")] --impl MallocSizeOf for crossbeam_channel::Sender { -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- 0 -- } --} -- --#[cfg(feature = "servo")] --impl MallocSizeOf for hyper::StatusCode { -- fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { -- 0 -- } --} -- - /// Measurable that defers to inner value and used to verify MallocSizeOf implementation in a - /// struct. - #[derive(Clone)] diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs index 2379c97d9..9193dab9c 100644 --- a/parity-util-mem/src/allocators.rs +++ b/parity-util-mem/src/allocators.rs @@ -42,9 +42,9 @@ //! - mimalloc: compile error (until https://github.com/microsoft/mimalloc/pull/32 is merged) -use malloc_size::{MallocSizeOfOps, VoidPtrToSizeFn, MallocSizeOf}; +use crate::malloc_size::{MallocSizeOfOps, VoidPtrToSizeFn, MallocSizeOf}; #[cfg(feature = "std")] -use malloc_size::MallocUnconditionalSizeOf; +use crate::malloc_size::MallocUnconditionalSizeOf; #[cfg(feature = "std")] use std::os::raw::c_void; #[cfg(not(feature = "std"))] @@ -54,7 +54,7 @@ mod usable_size { use super::*; -cfg_if! { +cfg_if::cfg_if! { if #[cfg(any( target_arch = "wasm32", @@ -74,10 +74,7 @@ cfg_if! { } else if #[cfg(target_os = "windows")] { - // default windows allocator - extern crate winapi; - - use self::winapi::um::heapapi::{GetProcessHeap, HeapSize, HeapValidate}; + use winapi::um::heapapi::{GetProcessHeap, HeapSize, HeapValidate}; /// Get the size of a heap block. /// Call windows allocator through `winapi` crate diff --git a/parity-util-mem/src/impls.rs b/parity-util-mem/src/impls.rs index 19e787ba3..b6fd44d97 100644 --- a/parity-util-mem/src/impls.rs +++ b/parity-util-mem/src/impls.rs @@ -19,16 +19,12 @@ //! - elastic_array arrays //! - parking_lot mutex structures -extern crate elastic_array; -extern crate ethereum_types; -extern crate parking_lot; - -use self::ethereum_types::{ +use ethereum_types::{ U64, U128, U256, U512, H32, H64, H128, H160, H256, H264, H512, H520, Bloom }; -use self::elastic_array::{ +use elastic_array::{ ElasticArray2, ElasticArray4, ElasticArray8, @@ -42,7 +38,7 @@ use self::elastic_array::{ ElasticArray1024, ElasticArray2048, }; -use self::parking_lot::{Mutex, RwLock}; +use parking_lot::{Mutex, RwLock}; use super::{MallocSizeOf, MallocSizeOfOps}; #[cfg(not(feature = "std"))] diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index 2114cddba..b55e5dc9e 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -19,36 +19,27 @@ //! memory erasure. #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(not(feature = "std"), feature(core_intrinsics))] -#![cfg_attr(not(feature = "std"), feature(alloc))] - - -#[macro_use] -extern crate cfg_if; #[cfg(not(feature = "std"))] extern crate alloc; -extern crate malloc_size_of_derive as malloc_size_derive; +use malloc_size_of_derive as malloc_size_derive; -cfg_if! { +cfg_if::cfg_if! { if #[cfg(all( feature = "jemalloc-global", not(target_os = "windows"), not(target_arch = "wasm32") ))] { - extern crate jemallocator; #[global_allocator] /// Global allocator pub static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; } else if #[cfg(feature = "dlmalloc-global")] { - extern crate dlmalloc; #[global_allocator] /// Global allocator pub static ALLOC: dlmalloc::GlobalDlmalloc = dlmalloc::GlobalDlmalloc; } else if #[cfg(feature = "weealloc-global")] { - extern crate wee_alloc; #[global_allocator] /// Global allocator pub static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; @@ -56,8 +47,6 @@ cfg_if! { feature = "mimalloc-global", not(target_arch = "wasm32") ))] { - extern crate mimallocator; - extern crate mimalloc_sys; #[global_allocator] /// Global allocator pub static ALLOC: mimallocator::Mimalloc = mimallocator::Mimalloc; @@ -77,9 +66,6 @@ pub mod allocators; ))] pub mod sizeof; -#[cfg(not(feature = "std"))] -use core as std; - /// This is a copy of patched crate `malloc_size_of` as a module. /// We need to have it as an inner module to be able to define our own traits implementation, /// if at some point the trait become standard enough we could use the right way of doing it diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index e7599ea85..8c2e06deb 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -49,10 +49,17 @@ #[cfg(not(feature = "std"))] use alloc::vec::Vec; +#[cfg(feature = "std")] +mod rstd { + pub use std::*; +} #[cfg(not(feature = "std"))] -mod std { +mod rstd { pub use core::*; - pub use alloc::collections; + pub mod collections { + pub use alloc::collections::*; + pub use vec_deque::VecDeque; + } } #[cfg(feature = "std")] @@ -60,10 +67,10 @@ use std::sync::Arc; #[cfg(feature = "std")] use std::hash::BuildHasher; -use std::hash::Hash; -use std::mem::size_of; -use std::ops::Range; -use std::ops::{Deref, DerefMut}; +use rstd::hash::Hash; +use rstd::mem::size_of; +use rstd::ops::Range; +use rstd::ops::{Deref, DerefMut}; #[cfg(feature = "std")] use std::os::raw::c_void; #[cfg(not(feature = "std"))] @@ -322,13 +329,13 @@ impl MallocSizeOf for Result { } } -impl MallocSizeOf for std::cell::Cell { +impl MallocSizeOf for rstd::cell::Cell { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { self.get().size_of(ops) } } -impl MallocSizeOf for std::cell::RefCell { +impl MallocSizeOf for rstd::cell::RefCell { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { self.borrow().size_of(ops) } @@ -367,7 +374,7 @@ impl MallocSizeOf for Vec { } } -impl MallocShallowSizeOf for std::collections::VecDeque { +impl MallocShallowSizeOf for rstd::collections::VecDeque { fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { if ops.has_malloc_enclosing_size_of() { if let Some(front) = self.front() { @@ -384,7 +391,7 @@ impl MallocShallowSizeOf for std::collections::VecDeque { } } -impl MallocSizeOf for std::collections::VecDeque { +impl MallocSizeOf for rstd::collections::VecDeque { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = self.shallow_size_of(ops); for elem in self.iter() { @@ -466,7 +473,7 @@ where } } -impl MallocShallowSizeOf for std::collections::BTreeMap +impl MallocShallowSizeOf for rstd::collections::BTreeMap where K: Eq + Hash, { @@ -481,7 +488,7 @@ where } } -impl MallocSizeOf for std::collections::BTreeMap +impl MallocSizeOf for rstd::collections::BTreeMap where K: Eq + Hash + MallocSizeOf, V: MallocSizeOf, @@ -497,7 +504,7 @@ where } // PhantomData is always 0. -impl MallocSizeOf for std::marker::PhantomData { +impl MallocSizeOf for rstd::marker::PhantomData { fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { 0 } @@ -586,9 +593,9 @@ malloc_size_of_is_0!(u8, u16, u32, u64, u128, usize); malloc_size_of_is_0!(i8, i16, i32, i64, i128, isize); malloc_size_of_is_0!(f32, f64); -malloc_size_of_is_0!(std::sync::atomic::AtomicBool); -malloc_size_of_is_0!(std::sync::atomic::AtomicIsize); -malloc_size_of_is_0!(std::sync::atomic::AtomicUsize); +malloc_size_of_is_0!(rstd::sync::atomic::AtomicBool); +malloc_size_of_is_0!(rstd::sync::atomic::AtomicIsize); +malloc_size_of_is_0!(rstd::sync::atomic::AtomicUsize); malloc_size_of_is_0!(Range, Range, Range, Range, Range); malloc_size_of_is_0!(Range, Range, Range, Range, Range); From fe54a473ec8b320223bf17be7a16c42a3629615a Mon Sep 17 00:00:00 2001 From: Ashley Date: Tue, 8 Oct 2019 00:25:11 +1300 Subject: [PATCH 013/359] [fixed-hash] Migrated code to the 2018 edition and updated dependencies (#223) * Migrated code and updated dependencies * . * Revert "." This reverts commit 57ae6b5a5d880318ac1054383eae8008bab5a21b. * Downgrade rand to 0.6 due to breaking changes with 0.7 * Update fixed-hash/Cargo.toml Co-Authored-By: Andronik Ordian * Fixed some things * fix comment --- fixed-hash/Cargo.toml | 5 +++-- fixed-hash/src/lib.rs | 17 ++++++++--------- fixed-hash/src/tests.rs | 23 +++++++++++------------ 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 1facbf088..8672c6d31 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -8,6 +8,7 @@ repository = "https://github.com/paritytech/parity-common" description = "Macros to define custom fixed-size hash types" documentation = "https://docs.rs/fixed-hash/" readme = "README.md" +edition = "2018" [package.metadata.docs.rs] features = ["quickcheck", "api-dummy"] @@ -15,9 +16,9 @@ features = ["quickcheck", "api-dummy"] [dependencies] rand = { version = "0.7", optional = true, default-features = false } rustc-hex = { version = "2.0", optional = true, default-features = false } -quickcheck = { version = "0.7", optional = true } +quickcheck = { version = "0.9", optional = true } byteorder = { version = "1.2", optional = true, default-features = false } -static_assertions = "0.2" +static_assertions = "0.3" [dev-dependencies] rand_xorshift = "0.2.0" diff --git a/fixed-hash/src/lib.rs b/fixed-hash/src/lib.rs index f215a771c..9c841f885 100644 --- a/fixed-hash/src/lib.rs +++ b/fixed-hash/src/lib.rs @@ -15,21 +15,20 @@ pub extern crate alloc as alloc_; // Re-export libcore using an alias so that the macros can work without -// requiring `extern crate core` downstream. +// requiring `use core` downstream. #[doc(hidden)] -pub extern crate core as core_; +pub use core as core_; #[cfg(all(feature = "libc", not(target_os = "unknown")))] #[doc(hidden)] -pub extern crate libc; +pub use libc; // This disables a warning for unused #[macro_use(..)] // which is incorrect since the compiler does not check // for all available configurations. #[allow(unused_imports)] -#[macro_use(const_assert)] #[doc(hidden)] -pub extern crate static_assertions; +pub use static_assertions; // Export `const_assert` macro so that users of this crate do not // have to import the `static_assertions` crate themselves. @@ -38,7 +37,7 @@ pub use static_assertions::const_assert; #[cfg(feature = "byteorder")] #[doc(hidden)] -pub extern crate byteorder; +pub use byteorder; #[cfg(not(feature = "libc"))] #[doc(hidden)] @@ -46,15 +45,15 @@ pub mod libc {} #[cfg(feature = "rustc-hex")] #[doc(hidden)] -pub extern crate rustc_hex; +pub use rustc_hex; #[cfg(feature = "rand")] #[doc(hidden)] -pub extern crate rand; +pub use rand; #[cfg(feature = "quickcheck")] #[doc(hidden)] -pub extern crate quickcheck; +pub use quickcheck; #[cfg(test)] extern crate rand_xorshift; diff --git a/fixed-hash/src/tests.rs b/fixed-hash/src/tests.rs index 45e22927a..b1445f3e4 100644 --- a/fixed-hash/src/tests.rs +++ b/fixed-hash/src/tests.rs @@ -250,30 +250,29 @@ mod from_low_u64 { #[cfg(feature = "rand")] mod rand { use super::*; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; + use ::rand::{SeedableRng, rngs::StdRng}; #[test] fn random() { - let default_seed = ::Seed::default(); - let mut rng = XorShiftRng::from_seed(default_seed); + let default_seed = ::Seed::default(); + let mut rng = StdRng::from_seed(default_seed); assert_eq!( H32::random_using(&mut rng), - H32::from([0x43, 0xCA, 0x64, 0xED]) + H32::from([0x76, 0xa0, 0x40, 0x53]) ); } #[test] fn randomize() { - let default_seed = ::Seed::default(); - let mut rng = XorShiftRng::from_seed(default_seed); + let default_seed = ::Seed::default(); + let mut rng = StdRng::from_seed(default_seed); assert_eq!( { let mut ret = H32::zero(); ret.randomize_using(&mut rng); ret }, - H32::from([0x43, 0xCA, 0x64, 0xED]) + H32::from([0x76, 0xa0, 0x40, 0x53]) ) } } @@ -284,7 +283,7 @@ mod from_str { #[test] fn valid() { - use core_::str::FromStr; + use crate::core_::str::FromStr; assert_eq!( H64::from_str("0123456789ABCDEF").unwrap(), @@ -294,19 +293,19 @@ mod from_str { #[test] fn empty_str() { - use core_::str::FromStr; + use crate::core_::str::FromStr; assert!(H64::from_str("").is_err()) } #[test] fn invalid_digits() { - use core_::str::FromStr; + use crate::core_::str::FromStr; assert!(H64::from_str("Hello, World!").is_err()) } #[test] fn too_many_digits() { - use core_::str::FromStr; + use crate::core_::str::FromStr; assert!(H64::from_str("0123456789ABCDEF0").is_err()) } } From e9e3ab35b380aad5fd958e1cba4a033f5dca1dc4 Mon Sep 17 00:00:00 2001 From: Demi Obenour <48690212+DemiMarie-parity@users.noreply.github.com> Date: Wed, 16 Oct 2019 07:17:15 -0400 Subject: [PATCH 014/359] Update to 2018 edition idioms (#237) * Update to 2018 edition idioms * Reduce extern crate usage * fix missing import in tests * Add missing `use $crate::unroll` * Address review comments --- ethbloom/src/lib.rs | 12 +-- ethereum-types/src/lib.rs | 3 +- kvdb-rocksdb/src/lib.rs | 10 +-- kvdb-web/src/error.rs | 2 +- parity-bytes/src/lib.rs | 8 +- parity-crypto/benches/bench.rs | 3 - parity-crypto/src/error.rs | 12 +-- parity-crypto/src/hmac/mod.rs | 2 +- parity-crypto/src/pbkdf2/mod.rs | 4 +- rlp/tests/tests.rs | 4 +- transaction-pool/src/error.rs | 2 +- transaction-pool/src/pool.rs | 10 +-- transaction-pool/src/replace.rs | 2 +- transaction-pool/src/tests/helpers.rs | 2 +- transaction-pool/src/tests/mod.rs | 4 +- transaction-pool/src/transactions.rs | 2 +- uint/benches/bigint.rs | 10 +-- uint/examples/modular.rs | 3 +- uint/src/lib.rs | 10 +-- uint/src/uint.rs | 115 ++++++++++++++------------ uint/tests/uint_tests.rs | 26 +++--- 21 files changed, 119 insertions(+), 127 deletions(-) diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index ec6514c18..682a0d247 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -140,13 +140,13 @@ impl Bloom { } pub fn contains_bloom<'a, B>(&self, bloom: B) -> bool where BloomRef<'a>: From { - let bloom_ref: BloomRef = bloom.into(); + let bloom_ref: BloomRef<'_> = bloom.into(); // workaround for https://github.com/rust-lang/rust/issues/43644 self.contains_bloom_ref(bloom_ref) } - fn contains_bloom_ref(&self, bloom: BloomRef) -> bool { - let self_ref: BloomRef = self.into(); + fn contains_bloom_ref(&self, bloom: BloomRef<'_>) -> bool { + let self_ref: BloomRef<'_> = self.into(); self_ref.contains_bloom(bloom) } @@ -158,7 +158,7 @@ impl Bloom { let mask = bloom_bits - 1; let bloom_bytes = (log2(bloom_bits) + 7) / 8; - let hash: Hash = input.into(); + let hash: Hash<'_> = input.into(); // must be a power of 2 assert_eq!(m & (m - 1), 0); @@ -183,7 +183,7 @@ impl Bloom { } pub fn accrue_bloom<'a, B>(&mut self, bloom: B) where BloomRef<'a>: From { - let bloom_ref: BloomRef = bloom.into(); + let bloom_ref: BloomRef<'_> = bloom.into(); assert_eq!(self.0.len(), BLOOM_SIZE); assert_eq!(bloom_ref.0.len(), BLOOM_SIZE); for i in 0..BLOOM_SIZE { @@ -213,7 +213,7 @@ impl<'a> BloomRef<'a> { #[allow(clippy::trivially_copy_pass_by_ref)] pub fn contains_bloom<'b, B>(&self, bloom: B) -> bool where BloomRef<'b>: From { - let bloom_ref: BloomRef = bloom.into(); + let bloom_ref: BloomRef<'_> = bloom.into(); assert_eq!(self.0.len(), BLOOM_SIZE); assert_eq!(bloom_ref.0.len(), BLOOM_SIZE); for i in 0..BLOOM_SIZE { diff --git a/ethereum-types/src/lib.rs b/ethereum-types/src/lib.rs index a6a828f1b..b491fd37b 100644 --- a/ethereum-types/src/lib.rs +++ b/ethereum-types/src/lib.rs @@ -1,7 +1,6 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -extern crate core; + mod hash; mod uint; diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 34388d47c..7f137953c 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -409,7 +409,7 @@ impl Database { } /// Commit buffered changes to database. Must be called under `flush_lock` - fn write_flushing_with_lock(&self, _lock: &mut MutexGuard) -> io::Result<()> { + fn write_flushing_with_lock(&self, _lock: &mut MutexGuard<'_, bool>) -> io::Result<()> { match *self.db.read() { Some(DBAndColumns { ref db, ref cfs }) => { let batch = WriteBatch::new(); @@ -534,7 +534,7 @@ impl Database { } /// Get database iterator for flushed data. - pub fn iter(&self, col: Option) -> Option { + pub fn iter(&self, col: Option) -> Option> { match *self.db.read() { Some(DBAndColumns { ref db, ref cfs }) => { let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; @@ -561,7 +561,7 @@ impl Database { } } - fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Option { + fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Option> { match *self.db.read() { Some(DBAndColumns { ref db, ref cfs }) => { let iter = col.map_or_else(|| db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts), @@ -703,10 +703,8 @@ impl Drop for Database { #[cfg(test)] mod tests { - extern crate tempdir; - use std::str::FromStr; - use self::tempdir::TempDir; + use tempdir::TempDir; use ethereum_types::H256; use super::*; diff --git a/kvdb-web/src/error.rs b/kvdb-web/src/error.rs index 64361830d..f45295dc9 100644 --- a/kvdb-web/src/error.rs +++ b/kvdb-web/src/error.rs @@ -45,7 +45,7 @@ impl std::error::Error for Error { } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Error::WindowNotAvailable => write!(f, "Accessing a Window has failed"), Error::NotSupported(ref err) => write!( diff --git a/parity-bytes/src/lib.rs b/parity-bytes/src/lib.rs index 475864537..41bfd6134 100644 --- a/parity-bytes/src/lib.rs +++ b/parity-bytes/src/lib.rs @@ -32,7 +32,7 @@ use core::{cmp::min, fmt, ops}; pub struct PrettySlice<'a>(&'a [u8]); impl<'a> fmt::Debug for PrettySlice<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for i in 0..self.0.len() { if i > 0 { write!(f, "·{:02x}", self.0[i])?; @@ -45,7 +45,7 @@ impl<'a> fmt::Debug for PrettySlice<'a> { } impl<'a> fmt::Display for PrettySlice<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for i in 0..self.0.len() { write!(f, "{:02x}", self.0[i])?; } @@ -57,7 +57,7 @@ impl<'a> fmt::Display for PrettySlice<'a> { /// defaults cannot otherwise be avoided. pub trait ToPretty { /// Convert a type into a derivative form in order to make `format!` print it prettily. - fn pretty(&self) -> PrettySlice; + fn pretty(&self) -> PrettySlice<'_>; /// Express the object as a hex string. fn to_hex(&self) -> String { format!("{}", self.pretty()) @@ -65,7 +65,7 @@ pub trait ToPretty { } impl> ToPretty for T { - fn pretty(&self) -> PrettySlice { + fn pretty(&self) -> PrettySlice<'_> { PrettySlice(self.as_ref()) } } diff --git a/parity-crypto/benches/bench.rs b/parity-crypto/benches/bench.rs index f9f68dfd1..bdc9fd16d 100644 --- a/parity-crypto/benches/bench.rs +++ b/parity-crypto/benches/bench.rs @@ -14,9 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . - -extern crate parity_crypto; - #[macro_use] extern crate criterion; diff --git a/parity-crypto/src/error.rs b/parity-crypto/src/error.rs index 888bebb21..110c62cc5 100644 --- a/parity-crypto/src/error.rs +++ b/parity-crypto/src/error.rs @@ -43,7 +43,7 @@ enum PrivSymmErr { } impl StdError for Error { - fn source(&self) -> Option<&(StdError + 'static)> { + fn source(&self) -> Option<&(dyn StdError + 'static)> { match self { Error::Scrypt(scrypt_err) => Some(scrypt_err), Error::Symm(symm_err) => Some(symm_err), @@ -52,7 +52,7 @@ impl StdError for Error { } impl StdError for ScryptError { - fn source(&self) -> Option<&(StdError + 'static)> { + fn source(&self) -> Option<&(dyn StdError + 'static)> { match self { ScryptError::ScryptParam(err) => Some(err), ScryptError::ScryptLength(err) => Some(err), @@ -62,7 +62,7 @@ impl StdError for ScryptError { } impl StdError for SymmError { - fn source(&self) -> Option<&(StdError + 'static)> { + fn source(&self) -> Option<&(dyn StdError + 'static)> { match &self.0 { PrivSymmErr::BlockMode(err) => Some(err), PrivSymmErr::InvalidKeyLength(err) => Some(err), @@ -72,7 +72,7 @@ impl StdError for SymmError { } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> result::Result<(), fmt::Error> { match self { Error::Scrypt(err)=> write!(f, "scrypt error: {}", err), Error::Symm(err) => write!(f, "symm error: {}", err), @@ -81,7 +81,7 @@ impl fmt::Display for Error { } impl fmt::Display for ScryptError { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> result::Result<(), fmt::Error> { match self { ScryptError::InvalidN => write!(f, "invalid n argument"), ScryptError::InvalidP => write!(f, "invalid p argument"), @@ -92,7 +92,7 @@ impl fmt::Display for ScryptError { } impl fmt::Display for SymmError { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> result::Result<(), fmt::Error> { match self { SymmError(PrivSymmErr::BlockMode(err)) => write!(f, "block cipher error: {}", err), SymmError(PrivSymmErr::KeyStream(err)) => write!(f, "ctr key stream ended: {}", err), diff --git a/parity-crypto/src/hmac/mod.rs b/parity-crypto/src/hmac/mod.rs index 43721fb0a..571df7b59 100644 --- a/parity-crypto/src/hmac/mod.rs +++ b/parity-crypto/src/hmac/mod.rs @@ -52,7 +52,7 @@ pub struct SigKey(KeyInner, PhantomData); struct DisposableBox(Box<[u8]>); impl std::fmt::Debug for DisposableBox { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", &self.0.as_ref()) } } diff --git a/parity-crypto/src/pbkdf2/mod.rs b/parity-crypto/src/pbkdf2/mod.rs index 83445e7e7..099e98893 100644 --- a/parity-crypto/src/pbkdf2/mod.rs +++ b/parity-crypto/src/pbkdf2/mod.rs @@ -17,11 +17,11 @@ pub struct Salt<'a>(pub &'a [u8]); pub struct Secret<'a>(pub &'a [u8]); -pub fn sha256(iter: u32, salt: Salt, sec: Secret, out: &mut [u8; 32]) { +pub fn sha256(iter: u32, salt: Salt<'_>, sec: Secret<'_>, out: &mut [u8; 32]) { pbkdf2::pbkdf2::>(sec.0, salt.0, iter as usize, out) } -pub fn sha512(iter: u32, salt: Salt, sec: Secret, out: &mut [u8; 64]) { +pub fn sha512(iter: u32, salt: Salt<'_>, sec: Secret<'_>, out: &mut [u8; 64]) { pbkdf2::pbkdf2::>(sec.0, salt.0, iter as usize, out) } diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index 07dd3fcc5..5d0310553 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -509,7 +509,7 @@ fn test_nested_list_roundtrip() { } impl Decodable for Inner { - fn decode(rlp: &Rlp) -> Result { + fn decode(rlp: &Rlp<'_>) -> Result { Ok(Inner(rlp.val_at(0)?, rlp.val_at(1)?)) } } @@ -526,7 +526,7 @@ fn test_nested_list_roundtrip() { } impl Decodable for Nest { - fn decode(rlp: &Rlp) -> Result { + fn decode(rlp: &Rlp<'_>) -> Result { Ok(Nest(rlp.list_at(0)?)) } } diff --git a/transaction-pool/src/error.rs b/transaction-pool/src/error.rs index 851f1f6e7..74ce76652 100644 --- a/transaction-pool/src/error.rs +++ b/transaction-pool/src/error.rs @@ -31,7 +31,7 @@ pub enum Error { pub type Result = result::Result>; impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Error::AlreadyImported(h) => write!(f, "[{:?}] already imported", h), diff --git a/transaction-pool/src/pool.rs b/transaction-pool/src/pool.rs index efc12b23d..539e7dded 100644 --- a/transaction-pool/src/pool.rs +++ b/transaction-pool/src/pool.rs @@ -138,7 +138,7 @@ impl Pool where /// new transaction via the supplied `ShouldReplace` implementation and may be evicted. /// /// The `Listener` will be informed on any drops or rejections. - pub fn import(&mut self, transaction: T, replace: &ShouldReplace) -> error::Result, T::Hash> { + pub fn import(&mut self, transaction: T, replace: &dyn ShouldReplace) -> error::Result, T::Hash> { let mem_usage = transaction.mem_usage(); if self.by_hash.contains_key(transaction.hash()) { @@ -288,7 +288,7 @@ impl Pool where /// /// Returns `None` in case we couldn't decide if the transaction should replace the worst transaction or not. /// In such case we will accept the transaction even though it is going to exceed the limit. - fn remove_worst(&mut self, transaction: &Transaction, replace: &ShouldReplace) -> error::Result>, T::Hash> { + fn remove_worst(&mut self, transaction: &Transaction, replace: &dyn ShouldReplace) -> error::Result>, T::Hash> { let to_remove = match self.worst_transactions.iter().next_back() { // No elements to remove? and the pool is still full? None => { @@ -437,7 +437,7 @@ impl Pool where } /// Returns an iterator of pending (ready) transactions. - pub fn pending>(&self, ready: R) -> PendingIterator { + pub fn pending>(&self, ready: R) -> PendingIterator<'_, T, R, S, L> { PendingIterator { ready, best_transactions: self.best_transactions.clone(), @@ -446,7 +446,7 @@ impl Pool where } /// Returns pending (ready) transactions from given sender. - pub fn pending_from_sender>(&self, ready: R, sender: &T::Sender) -> PendingIterator { + pub fn pending_from_sender>(&self, ready: R, sender: &T::Sender) -> PendingIterator<'_, T, R, S, L> { let best_transactions = self.transactions.get(sender) .and_then(|transactions| transactions.worst_and_best()) .map(|(_, best)| ScoreWithRef::new(best.0, best.1)) @@ -465,7 +465,7 @@ impl Pool where } /// Returns unprioritized list of ready transactions. - pub fn unordered_pending>(&self, ready: R) -> UnorderedIterator { + pub fn unordered_pending>(&self, ready: R) -> UnorderedIterator<'_, T, R, S> { UnorderedIterator { ready, senders: self.transactions.iter(), diff --git a/transaction-pool/src/replace.rs b/transaction-pool/src/replace.rs index 7a8896995..bc44e8d90 100644 --- a/transaction-pool/src/replace.rs +++ b/transaction-pool/src/replace.rs @@ -51,5 +51,5 @@ pub trait ShouldReplace { /// Decides if `new` should push out `old` transaction from the pool. /// /// NOTE returning `InsertNew` here can lead to some transactions being accepted above pool limits. - fn should_replace(&self, old: &ReplaceTransaction, new: &ReplaceTransaction) -> Choice; + fn should_replace(&self, old: &ReplaceTransaction<'_, T>, new: &ReplaceTransaction<'_, T>) -> Choice; } diff --git a/transaction-pool/src/tests/helpers.rs b/transaction-pool/src/tests/helpers.rs index f14314c2e..73d11f9e5 100644 --- a/transaction-pool/src/tests/helpers.rs +++ b/transaction-pool/src/tests/helpers.rs @@ -74,7 +74,7 @@ impl Scoring for DummyScoring { } impl ShouldReplace for DummyScoring { - fn should_replace(&self, old: &ReplaceTransaction, new: &ReplaceTransaction) -> scoring::Choice { + fn should_replace(&self, old: &ReplaceTransaction<'_, Transaction>, new: &ReplaceTransaction<'_, Transaction>) -> scoring::Choice { if self.always_insert { scoring::Choice::InsertNew } else if new.gas_price > old.gas_price { diff --git a/transaction-pool/src/tests/mod.rs b/transaction-pool/src/tests/mod.rs index 3d1ca4af4..7cc7c5553 100644 --- a/transaction-pool/src/tests/mod.rs +++ b/transaction-pool/src/tests/mod.rs @@ -272,9 +272,9 @@ fn should_skip_staled_pending_transactions() { let b = TransactionBuilder::default(); let mut txq = TestPool::default(); - let tx0 = import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); + let _tx0 = import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); let tx2 = import(&mut txq, b.tx().nonce(2).gas_price(5).new()).unwrap(); - let tx1 = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap(); + let _tx1 = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap(); // tx0 and tx1 are Stale, tx2 is Ready let mut pending = txq.pending(NonceReady::new(2)); diff --git a/transaction-pool/src/transactions.rs b/transaction-pool/src/transactions.rs index eb6151131..8256bf33a 100644 --- a/transaction-pool/src/transactions.rs +++ b/transaction-pool/src/transactions.rs @@ -70,7 +70,7 @@ impl> Transactions { self.transactions.len() } - pub fn iter(&self) -> ::std::slice::Iter> { + pub fn iter(&self) -> ::std::slice::Iter<'_, Transaction> { self.transactions.iter() } diff --git a/uint/benches/bigint.rs b/uint/benches/bigint.rs index e1d45adc9..61f24e93d 100644 --- a/uint/benches/bigint.rs +++ b/uint/benches/bigint.rs @@ -12,13 +12,9 @@ //! rustup run cargo bench //! ``` -#[macro_use] -extern crate criterion; -extern crate core; -#[macro_use] -extern crate uint; -extern crate num_bigint; -extern crate rug; + +use criterion::{criterion_group, criterion_main}; +use uint::{construct_uint, uint_full_mul_reg}; construct_uint! { pub struct U256(4); diff --git a/uint/examples/modular.rs b/uint/examples/modular.rs index 6cd9f7409..1364bd766 100644 --- a/uint/examples/modular.rs +++ b/uint/examples/modular.rs @@ -6,8 +6,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#[cfg(feature="std")] -extern crate core; + #[macro_use] extern crate uint; diff --git a/uint/src/lib.rs b/uint/src/lib.rs index 352ccf70d..5f803a271 100644 --- a/uint/src/lib.rs +++ b/uint/src/lib.rs @@ -11,21 +11,21 @@ #![cfg_attr(not(feature = "std"), no_std)] #[doc(hidden)] -pub extern crate byteorder; +pub use byteorder; // Re-export libcore using an alias so that the macros can work without // requiring `extern crate core` downstream. #[doc(hidden)] -pub extern crate core as core_; +pub use core as core_; #[doc(hidden)] -pub extern crate rustc_hex; +pub use rustc_hex; #[cfg(feature="quickcheck")] #[doc(hidden)] -pub extern crate quickcheck; +pub use quickcheck; -extern crate crunchy; +#[doc(hidden)] pub use crunchy::unroll; #[macro_use] diff --git a/uint/src/uint.rs b/uint/src/uint.rs index cc58ee220..5006bbd38 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -81,6 +81,9 @@ macro_rules! uint_overflowing_binop { let ret_ptr = &mut ret as *mut [u64; $n_words] as *mut u64; let mut carry = 0u64; + // `unroll!` is recursive, but doesn’t use `$crate::unroll`, so we need to ensure that it + // is in scope unqualified. + use $crate::unroll; unroll! { for i in 0..$n_words { use $crate::core_::ptr; @@ -119,10 +122,10 @@ macro_rules! uint_overflowing_binop { #[doc(hidden)] macro_rules! uint_full_mul_reg { ($name:ident, 8, $self_expr:expr, $other:expr) => { - uint_full_mul_reg!($name, 8, $self_expr, $other, |a, b| a != 0 || b != 0); + $crate::uint_full_mul_reg!($name, 8, $self_expr, $other, |a, b| a != 0 || b != 0); }; ($name:ident, $n_words:tt, $self_expr:expr, $other:expr) => { - uint_full_mul_reg!($name, $n_words, $self_expr, $other, |_, _| true); + $crate::uint_full_mul_reg!($name, $n_words, $self_expr, $other, |_, _| true); }; ($name:ident, $n_words:tt, $self_expr:expr, $other:expr, $check:expr) => ({{ #![allow(unused_assignments)] @@ -131,6 +134,7 @@ macro_rules! uint_full_mul_reg { let $name(ref you) = $other; let mut ret = [0u64; $n_words * 2]; + use $crate::unroll; unroll! { for i in 0..$n_words { let mut carry = 0u64; @@ -173,7 +177,7 @@ macro_rules! uint_full_mul_reg { #[doc(hidden)] macro_rules! uint_overflowing_mul { ($name:ident, $n_words: tt, $self_expr: expr, $other: expr) => ({ - let ret: [u64; $n_words * 2] = uint_full_mul_reg!($name, $n_words, $self_expr, $other); + let ret: [u64; $n_words * 2] = $crate::uint_full_mul_reg!($name, $n_words, $self_expr, $other); // The safety of this is enforced by the compiler let ret: [[u64; $n_words]; 2] = unsafe { $crate::core_::mem::transmute(ret) }; @@ -181,6 +185,7 @@ macro_rules! uint_overflowing_mul { // The compiler WILL NOT inline this if you remove this annotation. #[inline(always)] fn any_nonzero(arr: &[u64; $n_words]) -> bool { + use $crate::unroll; unroll! { for i in 0..$n_words { if arr[i] != 0 { @@ -234,7 +239,7 @@ macro_rules! impl_mul_from { fn mul(self, other: $other) -> $name { let bignum: $name = other.into(); let (result, overflow) = self.overflowing_mul(bignum); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -245,7 +250,7 @@ macro_rules! impl_mul_from { fn mul(self, other: &'a $other) -> $name { let bignum: $name = (*other).into(); let (result, overflow) = self.overflowing_mul(bignum); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -256,7 +261,7 @@ macro_rules! impl_mul_from { fn mul(self, other: &'a $other) -> $name { let bignum: $name = (*other).into(); let (result, overflow) = self.overflowing_mul(bignum); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -267,7 +272,7 @@ macro_rules! impl_mul_from { fn mul(self, other: $other) -> $name { let bignum: $name = other.into(); let (result, overflow) = self.overflowing_mul(bignum); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -290,7 +295,7 @@ macro_rules! impl_mul_for_primitive { fn mul(self, other: $other) -> $name { let (result, carry) = self.overflowing_mul_u64(other as u64); - panic_on_overflow!(carry > 0); + $crate::panic_on_overflow!(carry > 0); result } } @@ -300,7 +305,7 @@ macro_rules! impl_mul_for_primitive { fn mul(self, other: &'a $other) -> $name { let (result, carry) = self.overflowing_mul_u64(*other as u64); - panic_on_overflow!(carry > 0); + $crate::panic_on_overflow!(carry > 0); result } } @@ -310,7 +315,7 @@ macro_rules! impl_mul_for_primitive { fn mul(self, other: &'a $other) -> $name { let (result, carry) = self.overflowing_mul_u64(*other as u64); - panic_on_overflow!(carry > 0); + $crate::panic_on_overflow!(carry > 0); result } } @@ -320,7 +325,7 @@ macro_rules! impl_mul_for_primitive { fn mul(self, other: $other) -> $name { let (result, carry) = self.overflowing_mul_u64(other as u64); - panic_on_overflow!(carry > 0); + $crate::panic_on_overflow!(carry > 0); result } } @@ -337,11 +342,11 @@ macro_rules! impl_mul_for_primitive { #[macro_export] macro_rules! construct_uint { ( $(#[$attr:meta])* $visibility:vis struct $name:ident (1); ) => { - construct_uint!{ @construct $(#[$attr])* $visibility struct $name (1); } + $crate::construct_uint!{ @construct $(#[$attr])* $visibility struct $name (1); } }; ( $(#[$attr:meta])* $visibility:vis struct $name:ident ( $n_words:tt ); ) => { - construct_uint! { @construct $(#[$attr])* $visibility struct $name ($n_words); } + $crate::construct_uint! { @construct $(#[$attr])* $visibility struct $name ($n_words); } impl $crate::core_::convert::From for $name { fn from(value: u128) -> $name { @@ -863,22 +868,22 @@ macro_rules! construct_uint { while n > u_one { if is_even(&n) { - x = overflowing!(x.overflowing_mul(x), overflow); + x = $crate::overflowing!(x.overflowing_mul(x), overflow); n = n >> 1usize; } else { - y = overflowing!(x.overflowing_mul(y), overflow); - x = overflowing!(x.overflowing_mul(x), overflow); + y = $crate::overflowing!(x.overflowing_mul(y), overflow); + x = $crate::overflowing!(x.overflowing_mul(x), overflow); n = (n - u_one) >> 1usize; } } - let res = overflowing!(x.overflowing_mul(y), overflow); + let res = $crate::overflowing!(x.overflowing_mul(y), overflow); (res, overflow) } /// Add with overflow. #[inline(always)] pub fn overflowing_add(self, other: $name) -> ($name, bool) { - uint_overflowing_binop!( + $crate::uint_overflowing_binop!( $name, $n_words, self, @@ -906,7 +911,7 @@ macro_rules! construct_uint { /// Subtraction which underflows and returns a flag if it does. #[inline(always)] pub fn overflowing_sub(self, other: $name) -> ($name, bool) { - uint_overflowing_binop!( + $crate::uint_overflowing_binop!( $name, $n_words, self, @@ -934,7 +939,7 @@ macro_rules! construct_uint { /// Multiply with overflow, returning a flag if it does. #[inline(always)] pub fn overflowing_mul(self, other: $name) -> ($name, bool) { - uint_overflowing_mul!($name, $n_words, self, other) + $crate::uint_overflowing_mul!($name, $n_words, self, other) } /// Multiplication which saturates at the maximum value.. @@ -1156,10 +1161,10 @@ macro_rules! construct_uint { } } - impl_map_from!($name, u8, u64); - impl_map_from!($name, u16, u64); - impl_map_from!($name, u32, u64); - impl_map_from!($name, usize, u64); + $crate::impl_map_from!($name, u8, u64); + $crate::impl_map_from!($name, u16, u64); + $crate::impl_map_from!($name, u32, u64); + $crate::impl_map_from!($name, usize, u64); impl $crate::core_::convert::From for $name { fn from(value: i64) -> $name { @@ -1170,10 +1175,10 @@ macro_rules! construct_uint { } } - impl_map_from!($name, i8, i64); - impl_map_from!($name, i16, i64); - impl_map_from!($name, i32, i64); - impl_map_from!($name, isize, i64); + $crate::impl_map_from!($name, i8, i64); + $crate::impl_map_from!($name, i16, i64); + $crate::impl_map_from!($name, i32, i64); + $crate::impl_map_from!($name, isize, i64); // Converts from big endian representation. impl<'a> $crate::core_::convert::From<&'a [u8]> for $name { @@ -1182,23 +1187,23 @@ macro_rules! construct_uint { } } - impl_try_from_for_primitive!($name, u8); - impl_try_from_for_primitive!($name, u16); - impl_try_from_for_primitive!($name, u32); - impl_try_from_for_primitive!($name, usize); - impl_try_from_for_primitive!($name, u64); - impl_try_from_for_primitive!($name, i8); - impl_try_from_for_primitive!($name, i16); - impl_try_from_for_primitive!($name, i32); - impl_try_from_for_primitive!($name, isize); - impl_try_from_for_primitive!($name, i64); + $crate::impl_try_from_for_primitive!($name, u8); + $crate::impl_try_from_for_primitive!($name, u16); + $crate::impl_try_from_for_primitive!($name, u32); + $crate::impl_try_from_for_primitive!($name, usize); + $crate::impl_try_from_for_primitive!($name, u64); + $crate::impl_try_from_for_primitive!($name, i8); + $crate::impl_try_from_for_primitive!($name, i16); + $crate::impl_try_from_for_primitive!($name, i32); + $crate::impl_try_from_for_primitive!($name, isize); + $crate::impl_try_from_for_primitive!($name, i64); impl $crate::core_::ops::Add for $name where T: Into<$name> { type Output = $name; fn add(self, other: T) -> $name { let (result, overflow) = self.overflowing_add(other.into()); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -1214,7 +1219,7 @@ macro_rules! construct_uint { impl $crate::core_::ops::AddAssign<$name> for $name { fn add_assign(&mut self, other: $name) { let (result, overflow) = self.overflowing_add(other); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); *self = result } } @@ -1225,7 +1230,7 @@ macro_rules! construct_uint { #[inline] fn sub(self, other: T) -> $name { let (result, overflow) = self.overflowing_sub(other.into()); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); result } } @@ -1241,23 +1246,23 @@ macro_rules! construct_uint { impl $crate::core_::ops::SubAssign<$name> for $name { fn sub_assign(&mut self, other: $name) { let (result, overflow) = self.overflowing_sub(other); - panic_on_overflow!(overflow); + $crate::panic_on_overflow!(overflow); *self = result } } // all other impls - impl_mul_from!($name, $name); - impl_mul_for_primitive!($name, u8); - impl_mul_for_primitive!($name, u16); - impl_mul_for_primitive!($name, u32); - impl_mul_for_primitive!($name, u64); - impl_mul_for_primitive!($name, usize); - impl_mul_for_primitive!($name, i8); - impl_mul_for_primitive!($name, i16); - impl_mul_for_primitive!($name, i32); - impl_mul_for_primitive!($name, i64); - impl_mul_for_primitive!($name, isize); + $crate::impl_mul_from!($name, $name); + $crate::impl_mul_for_primitive!($name, u8); + $crate::impl_mul_for_primitive!($name, u16); + $crate::impl_mul_for_primitive!($name, u32); + $crate::impl_mul_for_primitive!($name, u64); + $crate::impl_mul_for_primitive!($name, usize); + $crate::impl_mul_for_primitive!($name, i8); + $crate::impl_mul_for_primitive!($name, i16); + $crate::impl_mul_for_primitive!($name, i32); + $crate::impl_mul_for_primitive!($name, i64); + $crate::impl_mul_for_primitive!($name, isize); impl $crate::core_::ops::Div for $name where T: Into<$name> { type Output = $name; @@ -1526,10 +1531,10 @@ macro_rules! construct_uint { } } - impl_std_for_uint!($name, $n_words); + $crate::impl_std_for_uint!($name, $n_words); // `$n_words * 8` because macro expects bytes and // uints use 64 bit (8 byte) words - impl_quickcheck_arbitrary_for_uint!($name, ($n_words * 8)); + $crate::impl_quickcheck_arbitrary_for_uint!($name, ($n_words * 8)); } } diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index de77b828e..7f36b6a65 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -1,19 +1,16 @@ -extern crate core; - -#[macro_use] -extern crate uint; - -#[cfg(feature = "quickcheck")] -#[macro_use] -extern crate quickcheck; - -#[cfg_attr(all(test, feature = "quickcheck"), macro_use(unroll))] -extern crate crunchy; +// Copyright 2015-2019 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use core::u64::MAX; use core::str::FromStr; use core::convert::TryInto; -use uint::{FromDecStrErr}; +use uint::{FromDecStrErr, construct_uint, overflowing}; +use crunchy::unroll; construct_uint! { pub struct U256(4); @@ -1101,11 +1098,12 @@ fn trailing_zeros() { #[cfg(feature="quickcheck")] pub mod laws { + use super::construct_uint; macro_rules! uint_laws { ($mod_name:ident, $uint_ty:ident) => { mod $mod_name { - use quickcheck::TestResult; - use super::{$uint_ty}; + use quickcheck::{TestResult, quickcheck}; + use super::$uint_ty; quickcheck! { fn associative_add(x: $uint_ty, y: $uint_ty, z: $uint_ty) -> TestResult { From 027649b1fa3ce9e675f199faad96aae5f36f96e8 Mon Sep 17 00:00:00 2001 From: Federico Pasqua Date: Fri, 18 Oct 2019 10:36:39 +0200 Subject: [PATCH 015/359] Fix old syntax in the README of crate uint (#240) * Fix old syntax in the README of crate uint The syntax actually reported is not working and is different from that present in the test and the examples folders. * Update uint README.md with the instruction for pre and 2018-edition --- uint/README.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/uint/README.md b/uint/README.md index e10638919..6d4a30b70 100644 --- a/uint/README.md +++ b/uint/README.md @@ -16,11 +16,26 @@ In your `Cargo.toml` paste uint = "0.8" ``` +Import the macro + +``` +use uint::construct_uint; +``` + +If you're using pre-edition Rust in your main file + +``` +#[macro_use] +extern crate uint; +``` + Construct your own big unsigned integer type as follows. ``` // U1024 with 1024 bits consisting of 16 x 64-bit words -construct_uint!(U1024; 16); +construct_uint! { + pub struct U1024(16); +} ``` ## Tests From 2b39ab937053e608beb914da2a69c7e38ab77748 Mon Sep 17 00:00:00 2001 From: Demi Obenour <48690212+DemiMarie-parity@users.noreply.github.com> Date: Sat, 19 Oct 2019 18:40:43 -0400 Subject: [PATCH 016/359] Replace `uninitialized` with `MaybeUninit` (#238) * Replace `uninitialized` with `MaybeUninit` `mem::uninitialized` is deprecated and unsafe. This replaces its use with `mem::MaybeUninit` and adds a proof that the use of `mem::MaybeUninit` is correct. Requested by @niklasad1. * Replace mem::uninitialized() with zero initialization This generates the same assembly and is safer. * Improve formatting and force CI retry --- ethbloom/src/lib.rs | 32 ++++++++++++++++---------------- fixed-hash/Cargo.toml | 2 +- fixed-hash/src/hash.rs | 1 - keccak-hash/src/lib.rs | 2 +- parity-crypto/benches/bench.rs | 2 +- parity-crypto/src/aes.rs | 4 ++-- uint/Cargo.toml | 1 + uint/src/lib.rs | 2 ++ uint/src/uint.rs | 18 ++++++++---------- 9 files changed, 32 insertions(+), 32 deletions(-) diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index 682a0d247..5cc5b8119 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -8,22 +8,22 @@ //! use std::str::FromStr; //! let bloom = Bloom::from_str( //! "00000000000000000000000000000000\ -//! 00000000100000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000002020000000000000000000000\ -//! 00000000000000000000000800000000\ -//! 10000000000000000000000000000000\ -//! 00000000000000000000001000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000" -//! ).unwrap(); +//! 00000000100000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000002020000000000000000000000\ +//! 00000000000000000000000800000000\ +//! 10000000000000000000000000000000\ +//! 00000000000000000000001000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000" +//! ).unwrap(); //! let address = hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106"); //! let topic = hex!("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"); //! diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 8672c6d31..e494fedd2 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -18,7 +18,7 @@ rand = { version = "0.7", optional = true, default-features = false } rustc-hex = { version = "2.0", optional = true, default-features = false } quickcheck = { version = "0.9", optional = true } byteorder = { version = "1.2", optional = true, default-features = false } -static_assertions = "0.3" +static_assertions = "1.0.0" [dev-dependencies] rand_xorshift = "0.2.0" diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index f8ffcb426..393b4cc93 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -779,7 +779,6 @@ macro_rules! impl_ops_for_hash { macro_rules! impl_fixed_hash_conversions { ($large_ty:ident, $small_ty:ident) => { $crate::static_assertions::const_assert!( - VALID_SIZES; $crate::core_::mem::size_of::<$small_ty>() < $crate::core_::mem::size_of::<$large_ty>() ); diff --git a/keccak-hash/src/lib.rs b/keccak-hash/src/lib.rs index 1b8926be1..77a3767e0 100644 --- a/keccak-hash/src/lib.rs +++ b/keccak-hash/src/lib.rs @@ -107,7 +107,7 @@ mod tests { keccak([0x41u8; 32]), H256([ 0x59, 0xca, 0xd5, 0x94, 0x86, 0x73, 0x62, 0x2c, - 0x1d, 0x64, 0xe2, 0x32, 0x24, 0x88, 0xbf, 0x01, + 0x1d, 0x64, 0xe2, 0x32, 0x24, 0x88, 0xbf, 0x01, 0x61, 0x9f, 0x7f, 0xf4, 0x57, 0x89, 0x74, 0x1b, 0x15, 0xa9, 0xf7, 0x82, 0xce, 0x92, 0x90, 0xa8 ]), diff --git a/parity-crypto/benches/bench.rs b/parity-crypto/benches/bench.rs index bdc9fd16d..6e739adb6 100644 --- a/parity-crypto/benches/bench.rs +++ b/parity-crypto/benches/bench.rs @@ -40,7 +40,7 @@ fn input_len(c: &mut Criterion) { let mut dest = vec![0; *size]; let k = [0; 16]; let iv = [0; 16]; - + b.iter(||{ parity_crypto::aes::encrypt_128_ctr(&k[..], &iv[..], &data[..], &mut dest[..]).unwrap(); // same as encrypt but add it just in case diff --git a/parity-crypto/src/aes.rs b/parity-crypto/src/aes.rs index dc62674d4..e13300524 100644 --- a/parity-crypto/src/aes.rs +++ b/parity-crypto/src/aes.rs @@ -66,14 +66,14 @@ impl AesCtr256 { )) } - /// In place encrypt a content without padding, the content length must be a multiple + /// In place encrypt a content without padding, the content length must be a multiple /// of the block size. pub fn encrypt(&mut self, content: &mut[u8]) -> Result<(), SymmError> { self.0.try_apply_keystream(content)?; Ok(()) } - /// In place decrypt a content without padding, the content length must be a multiple + /// In place decrypt a content without padding, the content length must be a multiple /// of the block size. pub fn decrypt(&mut self, content: &mut[u8]) -> Result<(), SymmError> { self.0.try_apply_keystream(content)?; diff --git a/uint/Cargo.toml b/uint/Cargo.toml index e35d809b6..cf6a88d03 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -14,6 +14,7 @@ byteorder = { version = "1", default-features = false } rustc-hex = { version = "2.0", default-features = false } quickcheck = { version = "0.6", optional = true } crunchy = { version = "0.2", default-features = true } +static_assertions = "1.0.0" [features] default = ["std"] diff --git a/uint/src/lib.rs b/uint/src/lib.rs index 5f803a271..1ef540d85 100644 --- a/uint/src/lib.rs +++ b/uint/src/lib.rs @@ -26,6 +26,8 @@ pub use rustc_hex; pub use quickcheck; #[doc(hidden)] +pub use static_assertions; + pub use crunchy::unroll; #[macro_use] diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 5006bbd38..49dfcf07f 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -74,39 +74,37 @@ macro_rules! impl_try_from_for_primitive { #[doc(hidden)] macro_rules! uint_overflowing_binop { ($name:ident, $n_words: tt, $self_expr: expr, $other: expr, $fn:expr) => ({ + use $crate::{core_ as core}; let $name(ref me) = $self_expr; let $name(ref you) = $other; - let mut ret = unsafe { $crate::core_::mem::uninitialized() }; + let mut ret = [0u64; $n_words]; let ret_ptr = &mut ret as *mut [u64; $n_words] as *mut u64; let mut carry = 0u64; + $crate::static_assertions::const_assert!(core::isize::MAX as usize / core::mem::size_of::() > $n_words); // `unroll!` is recursive, but doesn’t use `$crate::unroll`, so we need to ensure that it // is in scope unqualified. use $crate::unroll; unroll! { for i in 0..$n_words { - use $crate::core_::ptr; + use core::ptr; if carry != 0 { let (res1, overflow1) = ($fn)(me[i], you[i]); let (res2, overflow2) = ($fn)(res1, carry); unsafe { - ptr::write( - ret_ptr.offset(i as _), - res2 - ); + // SAFETY: `i` is within bounds and `i * size_of::() < isize::MAX` + *ret_ptr.offset(i as _) = res2 } carry = (overflow1 as u8 + overflow2 as u8) as u64; } else { let (res, overflow) = ($fn)(me[i], you[i]); unsafe { - ptr::write( - ret_ptr.offset(i as _), - res - ); + // SAFETY: `i` is within bounds and `i * size_of::() < isize::MAX` + *ret_ptr.offset(i as _) = res } carry = overflow as u64; From 31c67cf73b1213c4b5e31e8bf85212a125d3bb51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 21 Oct 2019 14:23:09 +0200 Subject: [PATCH 017/359] Fix off-by-one. (#243) * Fix off-by-one. * Fix. --- primitive-types/impls/serde/Cargo.toml | 5 ++- primitive-types/impls/serde/src/serialize.rs | 41 +++++++++++++++++++- 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index 77a70be6c..bd2963fc4 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-serde" -version = "0.2.1" +version = "0.2.2" authors = ["Parity Technologies "] license = "Apache-2.0/MIT" homepage = "https://github.com/paritytech/parity-common" @@ -11,8 +11,9 @@ serde = "1.0" [dev-dependencies] criterion = "0.3.0" -uint = "0.8.1" +serde_derive = "1.0" serde_json = "1.0.40" +uint = "0.8.1" [[bench]] name = "impl_serde" diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index da4fe491f..95b626471 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -105,7 +105,7 @@ pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> where let bytes_len = v.len() - 2; let mut modulus = bytes_len % 2; - let mut bytes = vec![0u8; bytes_len / 2]; + let mut bytes = vec![0u8; (bytes_len + 1) / 2]; let mut buf = 0; let mut pos = 0; for (idx, byte) in v.bytes().enumerate().skip(2) { @@ -217,3 +217,42 @@ pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) deserializer.deserialize_str(Visitor { len }) } + +#[cfg(test)] +mod tests { + extern crate serde_derive; + + use self::serde_derive::Deserialize; + + #[derive(Deserialize)] + struct Bytes(#[serde(with="super")] Vec); + + #[test] + fn should_not_fail_on_short_string() { + let a: Bytes = serde_json::from_str("\"0x\"").unwrap(); + let b: Bytes = serde_json::from_str("\"0x1\"").unwrap(); + let c: Bytes = serde_json::from_str("\"0x12\"").unwrap(); + let d: Bytes = serde_json::from_str("\"0x123\"").unwrap(); + let e: Bytes = serde_json::from_str("\"0x1234\"").unwrap(); + let f: Bytes = serde_json::from_str("\"0x12345\"").unwrap(); + + assert!(a.0.is_empty()); + assert_eq!(b.0, vec![1]); + assert_eq!(c.0, vec![0x12]); + assert_eq!(d.0, vec![0x1, 0x23]); + assert_eq!(e.0, vec![0x12, 0x34]); + assert_eq!(f.0, vec![0x1, 0x23, 0x45]); + } + + + #[test] + fn should_not_fail_on_other_strings() { + let a: Bytes = serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587\"").unwrap(); + let b: Bytes = serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b\"").unwrap(); + let c: Bytes = serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b4\"").unwrap(); + + assert_eq!(a.0.len(), 31); + assert_eq!(b.0.len(), 32); + assert_eq!(c.0.len(), 32); + } +} From 0de00989f03266693c5b3b0dae1aceba8854cf52 Mon Sep 17 00:00:00 2001 From: Anton Gavrilov Date: Tue, 22 Oct 2019 09:50:54 +0200 Subject: [PATCH 018/359] Move ethkey crypto utils to parity crypto crate (#210) * EthKey modules moved to crypto crate * 2018 edition changes * 2 converters added * Remove quick error * Public key primitives moved to the separate module * Feature include for public key module * Rust hex version increased, module docs added * Move ethereum types include under public key feature * More specific names for files * Errors usages corrected * Documentation for module components improved * Test and benchmark added * Static initialization for curve order method * Couple of comments changed * Generation point bytes glued into one array * Tests for math operations added * Proper feauture for benches * Comments added to methods and parameters added * Switch to try_from and Infallible traits * Comment removed * Ethereum types version increased --- parity-crypto/Cargo.toml | 15 +- parity-crypto/benches/bench.rs | 15 +- parity-crypto/src/lib.rs | 2 + parity-crypto/src/publickey/ec_math_utils.rs | 174 ++++++ parity-crypto/src/publickey/ecdh.rs | 37 ++ .../src/publickey/ecdsa_signature.rs | 326 +++++++++++ parity-crypto/src/publickey/ecies.rs | 138 +++++ parity-crypto/src/publickey/error.rs | 99 ++++ parity-crypto/src/publickey/extended_keys.rs | 521 ++++++++++++++++++ parity-crypto/src/publickey/keypair.rs | 122 ++++ .../src/publickey/keypair_generator.rs | 47 ++ parity-crypto/src/publickey/mod.rs | 55 ++ parity-crypto/src/publickey/secret_key.rs | 307 +++++++++++ 13 files changed, 1855 insertions(+), 3 deletions(-) create mode 100644 parity-crypto/src/publickey/ec_math_utils.rs create mode 100644 parity-crypto/src/publickey/ecdh.rs create mode 100644 parity-crypto/src/publickey/ecdsa_signature.rs create mode 100644 parity-crypto/src/publickey/ecies.rs create mode 100644 parity-crypto/src/publickey/error.rs create mode 100644 parity-crypto/src/publickey/extended_keys.rs create mode 100644 parity-crypto/src/publickey/keypair.rs create mode 100644 parity-crypto/src/publickey/keypair_generator.rs create mode 100644 parity-crypto/src/publickey/mod.rs create mode 100644 parity-crypto/src/publickey/secret_key.rs diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 5bfda0aca..9b4ecc7e1 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-crypto" -version = "0.4.1" +version = "0.4.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Crypto utils used by ethstore and network." @@ -11,10 +11,13 @@ edition = "2018" [[bench]] name = "bench" harness = false - +required-features = ["publickey"] [dependencies] tiny-keccak = "1.4" +eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1", rev = "a96ad75", optional = true } +ethereum-types = { version = "0.8.0", optional = true } +lazy_static = { version = "1.0", optional = true } scrypt = { version = "0.2", default-features = false } ripemd160 = "0.8.0" sha2 = "0.8.0" @@ -24,9 +27,17 @@ aes = "0.3.2" aes-ctr = "0.3.0" block-modes = "0.3.3" pbkdf2 = "0.3.0" +rand = "0.6" +rustc-hex = "2.0" subtle = "2.1" zeroize = "0.9.1" [dev-dependencies] criterion = "0.2" hex-literal = "0.2" + +[features] +default = [] +# public key crypto utils +# moved from ethkey module in parity ethereum repository +publickey = ["eth-secp256k1", "lazy_static", "ethereum-types"] \ No newline at end of file diff --git a/parity-crypto/benches/bench.rs b/parity-crypto/benches/bench.rs index 6e739adb6..aa2e0fb78 100644 --- a/parity-crypto/benches/bench.rs +++ b/parity-crypto/benches/bench.rs @@ -18,8 +18,13 @@ extern crate criterion; use criterion::{Criterion, Bencher}; +use crate::parity_crypto::publickey::Generator; -criterion_group!(benches, input_len); +criterion_group!( + benches, + input_len, + ecdh_agree, +); criterion_main!(benches); @@ -51,3 +56,11 @@ fn input_len(c: &mut Criterion) { ); } + +fn ecdh_agree(c: &mut Criterion) { + let keypair = parity_crypto::publickey::Random.generate().unwrap(); + let public = keypair.public().clone(); + let secret = keypair.secret().clone(); + + c.bench_function("ecdh_agree", move |b| b.iter(|| parity_crypto::publickey::ecdh::agree(&secret, &public))); +} \ No newline at end of file diff --git a/parity-crypto/src/lib.rs b/parity-crypto/src/lib.rs index ce680f929..309c4c803 100644 --- a/parity-crypto/src/lib.rs +++ b/parity-crypto/src/lib.rs @@ -22,6 +22,8 @@ pub mod scrypt; pub mod digest; pub mod hmac; pub mod pbkdf2; +#[cfg(feature = "publickey")] +pub mod publickey; pub use crate::error::Error; diff --git a/parity-crypto/src/publickey/ec_math_utils.rs b/parity-crypto/src/publickey/ec_math_utils.rs new file mode 100644 index 000000000..cbc2e3f81 --- /dev/null +++ b/parity-crypto/src/publickey/ec_math_utils.rs @@ -0,0 +1,174 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Multiple primitives for work with public and secret keys and with secp256k1 curve points + +use super::{SECP256K1, Public, Secret, Error}; +use secp256k1::key; +use secp256k1::constants::{CURVE_ORDER as SECP256K1_CURVE_ORDER}; +use ethereum_types::{BigEndianHash as _, U256, H256}; +use lazy_static::lazy_static; + +/// Generation point array combined from X and Y coordinates +/// Equivalent to uncompressed form, see https://tools.ietf.org/id/draft-jivsov-ecc-compact-05.html#rfc.section.3 +pub const BASE_POINT_BYTES: [u8; 65] = [ + 0x4, + // The X coordinate of the generator + 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac, + 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07, + 0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, + 0x59, 0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98, + // The Y coordinate of the generator + 0x48, 0x3a, 0xda, 0x77, 0x26, 0xa3, 0xc4, 0x65, + 0x5d, 0xa4, 0xfb, 0xfc, 0x0e, 0x11, 0x08, 0xa8, + 0xfd, 0x17, 0xb4, 0x48, 0xa6, 0x85, 0x54, 0x19, + 0x9c, 0x47, 0xd0, 0x8f, 0xfb, 0x10, 0xd4, 0xb8, +]; + +lazy_static! { + pub static ref CURVE_ORDER: U256 = H256::from_slice(&SECP256K1_CURVE_ORDER).into_uint(); +} + +/// Whether the public key is valid. +pub fn public_is_valid(public: &Public) -> bool { + to_secp256k1_public(public).ok() + .map_or(false, |p| p.is_valid()) +} + +/// In-place multiply public key by secret key (EC point * scalar) +pub fn public_mul_secret(public: &mut Public, secret: &Secret) -> Result<(), Error> { + let key_secret = secret.to_secp256k1_secret()?; + let mut key_public = to_secp256k1_public(public)?; + key_public.mul_assign(&SECP256K1, &key_secret)?; + set_public(public, &key_public); + Ok(()) +} + +/// In-place add one public key to another (EC point + EC point) +pub fn public_add(public: &mut Public, other: &Public) -> Result<(), Error> { + let mut key_public = to_secp256k1_public(public)?; + let other_public = to_secp256k1_public(other)?; + key_public.add_assign(&SECP256K1, &other_public)?; + set_public(public, &key_public); + Ok(()) +} + +/// In-place sub one public key from another (EC point - EC point) +pub fn public_sub(public: &mut Public, other: &Public) -> Result<(), Error> { + let mut key_neg_other = to_secp256k1_public(other)?; + key_neg_other.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + + let mut key_public = to_secp256k1_public(public)?; + key_public.add_assign(&SECP256K1, &key_neg_other)?; + set_public(public, &key_public); + Ok(()) +} + +/// Replace a public key with its additive inverse (EC point = - EC point) +pub fn public_negate(public: &mut Public) -> Result<(), Error> { + let mut key_public = to_secp256k1_public(public)?; + key_public.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + set_public(public, &key_public); + Ok(()) +} + +/// Return the generation point (aka base point) of secp256k1 +pub fn generation_point() -> Public { + let public_key = key::PublicKey::from_slice(&SECP256K1, &BASE_POINT_BYTES) + .expect("constructed using constants; qed"); + let mut public = Public::default(); + set_public(&mut public, &public_key); + public +} + +fn to_secp256k1_public(public: &Public) -> Result { + let public_data = { + let mut temp = [4u8; 65]; + (&mut temp[1..65]).copy_from_slice(&public[0..64]); + temp + }; + + Ok(key::PublicKey::from_slice(&SECP256K1, &public_data)?) +} + +fn set_public(public: &mut Public, key_public: &key::PublicKey) { + let key_public_serialized = key_public.serialize_vec(&SECP256K1, false); + public.as_bytes_mut().copy_from_slice(&key_public_serialized[1..65]); +} + +#[cfg(test)] +mod tests { + use super::super::{Random, Generator, Secret}; + use super::{public_add, public_sub, public_negate, public_is_valid, generation_point, public_mul_secret}; + use std::str::FromStr; + + #[test] + fn public_addition_is_commutative() { + let public1 = Random.generate().unwrap().public().clone(); + let public2 = Random.generate().unwrap().public().clone(); + + let mut left = public1.clone(); + public_add(&mut left, &public2).unwrap(); + + let mut right = public2.clone(); + public_add(&mut right, &public1).unwrap(); + + assert_eq!(left, right); + } + + #[test] + fn public_addition_is_reversible_with_subtraction() { + let public1 = Random.generate().unwrap().public().clone(); + let public2 = Random.generate().unwrap().public().clone(); + + let mut sum = public1.clone(); + public_add(&mut sum, &public2).unwrap(); + public_sub(&mut sum, &public2).unwrap(); + + assert_eq!(sum, public1); + } + + #[test] + fn public_negation_is_involutory() { + let public = Random.generate().unwrap().public().clone(); + let mut negation = public.clone(); + public_negate(&mut negation).unwrap(); + public_negate(&mut negation).unwrap(); + + assert_eq!(negation, public); + } + + #[test] + fn known_public_is_valid() { + let public = Random.generate().unwrap().public().clone(); + assert!(public_is_valid(&public)); + } + + #[test] + fn generation_point_expected() { + let point = generation_point(); + // Check the returned value equal to uncompressed form for sec2561k1 + assert_eq!(format!("{:x}", point), "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"); + } + + #[test] + fn public_multiplication_verification() { + let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let mut public = generation_point(); + public_mul_secret(&mut public, &secret).unwrap(); + assert_eq!(format!("{:x}", public), "8ce0db0b0359ffc5866ba61903cc2518c3675ef2cf380a7e54bde7ea20e6fa1ab45b7617346cd11b7610001ee6ae5b0155c41cad9527cbcdff44ec67848943a4"); + } +} diff --git a/parity-crypto/src/publickey/ecdh.rs b/parity-crypto/src/publickey/ecdh.rs new file mode 100644 index 000000000..73d25491c --- /dev/null +++ b/parity-crypto/src/publickey/ecdh.rs @@ -0,0 +1,37 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! ECDH key agreement scheme implemented as a free function. + +use secp256k1::{self, ecdh, key}; +use super::{Error, Secret, Public, SECP256K1}; + +/// Agree on a shared secret +pub fn agree(secret: &Secret, public: &Public) -> Result { + let context = &SECP256K1; + let pdata = { + let mut temp = [4u8; 65]; + (&mut temp[1..65]).copy_from_slice(&public[0..64]); + temp + }; + + let publ = key::PublicKey::from_slice(context, &pdata)?; + let sec = key::SecretKey::from_slice(context, secret.as_bytes())?; + let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec); + + Secret::import_key(&shared[0..32]) + .map_err(|_| Error::Secp(secp256k1::Error::InvalidSecretKey)) +} diff --git a/parity-crypto/src/publickey/ecdsa_signature.rs b/parity-crypto/src/publickey/ecdsa_signature.rs new file mode 100644 index 000000000..421fa9b61 --- /dev/null +++ b/parity-crypto/src/publickey/ecdsa_signature.rs @@ -0,0 +1,326 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Signature based on ECDSA, algorithm's description: https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm + +use std::ops::{Deref, DerefMut}; +use std::cmp::PartialEq; +use std::fmt; +use std::str::FromStr; +use std::hash::{Hash, Hasher}; +use secp256k1::{Message as SecpMessage, RecoverableSignature, RecoveryId, Error as SecpError}; +use secp256k1::key::{SecretKey, PublicKey}; +use rustc_hex::{ToHex, FromHex}; +use ethereum_types::{H520, H256}; +use super::{Secret, Public, SECP256K1, Message, public_to_address, Address, Error}; + +/// Signature encoded as RSV components +#[repr(C)] +pub struct Signature([u8; 65]); + +impl Signature { + /// Get a slice into the 'r' portion of the data. + pub fn r(&self) -> &[u8] { + &self.0[0..32] + } + + /// Get a slice into the 's' portion of the data. + pub fn s(&self) -> &[u8] { + &self.0[32..64] + } + + /// Get the recovery byte. + pub fn v(&self) -> u8 { + self.0[64] + } + + /// Encode the signature into RSV array (V altered to be in "Electrum" notation). + pub fn into_electrum(mut self) -> [u8; 65] { + self.0[64] += 27; + self.0 + } + + /// Parse bytes as a signature encoded as RSV (V in "Electrum" notation). + /// May return empty (invalid) signature if given data has invalid length. + pub fn from_electrum(data: &[u8]) -> Self { + if data.len() != 65 || data[64] < 27 { + // fallback to empty (invalid) signature + return Signature::default(); + } + + let mut sig = [0u8; 65]; + sig.copy_from_slice(data); + sig[64] -= 27; + Signature(sig) + } + + /// Create a signature object from the RSV triple. + pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Self { + let mut sig = [0u8; 65]; + sig[0..32].copy_from_slice(r.as_ref()); + sig[32..64].copy_from_slice(s.as_ref()); + sig[64] = v; + Signature(sig) + } + + /// Check if this is a "low" signature (that s part of the signature is in range + /// 0x1 and 0x7FFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF 5D576E73 57A4501D DFE92F46 681B20A0 (inclusive)). + /// This condition may be required by some verification algorithms + pub fn is_low_s(&self) -> bool { + const LOW_SIG_THRESHOLD: H256 = H256([ + 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x5D, 0x57, 0x6E, 0x73, 0x57, 0xA4, 0x50, 0x1D, + 0xDF, 0xE9, 0x2F, 0x46, 0x68, 0x1B, 0x20, 0xA0, + ]); + H256::from_slice(self.s()) <= LOW_SIG_THRESHOLD + } + + /// Check if each component of the signature is in valid range. + /// r is in range 0x1 and 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 (inclusive) + /// s is in range 0x1 and fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 (inclusive) + /// v is 0 or 1 + /// Group order for secp256k1 defined as 'n' in "Standards for Efficient Cryptography" (SEC2) 2.7.1; + /// used here as the upper bound for a valid (r, s, v) tuple + pub fn is_valid(&self) -> bool { + const UPPER_BOUND: H256 = H256([ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, + 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41, + ]); + const ONE: H256 = H256([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + ]); + let r = H256::from_slice(self.r()); + let s = H256::from_slice(self.s()); + self.v() <= 1 && + r < UPPER_BOUND && r >= ONE && + s < UPPER_BOUND && s >= ONE + } +} + +// manual implementation large arrays don't have trait impls by default. +// TODO[grbIzl] remove when integer generics exist +impl PartialEq for Signature { + fn eq(&self, other: &Self) -> bool { + &self.0[..] == &other.0[..] + } +} + +// manual implementation required in Rust 1.13+, see `std::cmp::AssertParamIsEq`. +impl Eq for Signature { } + +// also manual for the same reason, but the pretty printing might be useful. +impl fmt::Debug for Signature { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("Signature") + .field("r", &self.0[0..32].to_hex::()) + .field("s", &self.0[32..64].to_hex::()) + .field("v", &self.0[64..65].to_hex::()) + .finish() + } +} + +impl fmt::Display for Signature { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "{}", self.to_hex::()) + } +} + +impl FromStr for Signature { + type Err = Error; + + fn from_str(s: &str) -> Result { + match s.from_hex::>() { + Ok(ref hex) if hex.len() == 65 => { + let mut data = [0; 65]; + data.copy_from_slice(&hex[0..65]); + Ok(Signature(data)) + }, + _ => Err(Error::InvalidSignature) + } + } +} + +impl Default for Signature { + fn default() -> Self { + Signature([0; 65]) + } +} + +impl Hash for Signature { + fn hash(&self, state: &mut H) { + H520::from(self.0).hash(state); + } +} + +impl Clone for Signature { + fn clone(&self) -> Self { + Signature(self.0.clone()) + } +} + +impl From<[u8; 65]> for Signature { + fn from(s: [u8; 65]) -> Self { + Signature(s) + } +} + +impl Into<[u8; 65]> for Signature { + fn into(self) -> [u8; 65] { + self.0 + } +} + +impl From for H520 { + fn from(s: Signature) -> Self { + H520::from(s.0) + } +} + +impl From for Signature { + fn from(bytes: H520) -> Self { + Signature(bytes.into()) + } +} + +impl Deref for Signature { + type Target = [u8; 65]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Signature { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +/// Signs message with the given secret key. +/// Returns the corresponding signature +pub fn sign(secret: &Secret, message: &Message) -> Result { + let context = &SECP256K1; + let sec = SecretKey::from_slice(context, secret.as_ref())?; + let s = context.sign_recoverable(&SecpMessage::from_slice(&message[..])?, &sec)?; + let (rec_id, data) = s.serialize_compact(context); + let mut data_arr = [0; 65]; + + // no need to check if s is low, it always is + data_arr[0..64].copy_from_slice(&data[0..64]); + data_arr[64] = rec_id.to_i32() as u8; + Ok(Signature(data_arr)) +} + +/// Performs verification of the signature for the given message with corresponding public key +pub fn verify_public(public: &Public, signature: &Signature, message: &Message) -> Result { + let context = &SECP256K1; + let rsig = RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; + let sig = rsig.to_standard(context); + + let pdata: [u8; 65] = { + let mut temp = [4u8; 65]; + temp[1..65].copy_from_slice(public.as_bytes()); + temp + }; + + let publ = PublicKey::from_slice(context, &pdata)?; + match context.verify(&SecpMessage::from_slice(&message[..])?, &sig, &publ) { + Ok(_) => Ok(true), + Err(SecpError::IncorrectSignature) => Ok(false), + Err(x) => Err(Error::from(x)) + } +} + +/// Checks if the address corresponds to the public key from the signature for the message +pub fn verify_address(address: &Address, signature: &Signature, message: &Message) -> Result { + let public = recover(signature, message)?; + let recovered_address = public_to_address(&public); + Ok(address == &recovered_address) +} + +/// Recovers the public key from the signature for the message +pub fn recover(signature: &Signature, message: &Message) -> Result { + let context = &SECP256K1; + let rsig = RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; + let pubkey = context.recover(&SecpMessage::from_slice(&message[..])?, &rsig)?; + let serialized = pubkey.serialize_vec(context, false); + + let mut public = Public::default(); + public.as_bytes_mut().copy_from_slice(&serialized[1..65]); + Ok(public) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + use super::super::{Generator, Random, Message}; + use super::{sign, verify_public, verify_address, recover, Signature}; + + #[test] + fn vrs_conversion() { + // given + let keypair = Random.generate().unwrap(); + let message = Message::default(); + let signature = sign(keypair.secret(), &message).unwrap(); + + // when + let vrs = signature.clone().into_electrum(); + let from_vrs = Signature::from_electrum(&vrs); + + // then + assert_eq!(signature, from_vrs); + } + + #[test] + fn signature_to_and_from_str() { + let keypair = Random.generate().unwrap(); + let message = Message::default(); + let signature = sign(keypair.secret(), &message).unwrap(); + let string = format!("{}", signature); + let deserialized = Signature::from_str(&string).unwrap(); + assert_eq!(signature, deserialized); + } + + #[test] + fn sign_and_recover_public() { + let keypair = Random.generate().unwrap(); + let message = Message::default(); + let signature = sign(keypair.secret(), &message).unwrap(); + assert_eq!(keypair.public(), &recover(&signature, &message).unwrap()); + } + + #[test] + fn sign_and_verify_public() { + let keypair = Random.generate().unwrap(); + let message = Message::default(); + let signature = sign(keypair.secret(), &message).unwrap(); + assert!(verify_public(keypair.public(), &signature, &message).unwrap()); + } + + #[test] + fn sign_and_verify_address() { + let keypair = Random.generate().unwrap(); + let message = Message::default(); + let signature = sign(keypair.secret(), &message).unwrap(); + assert!(verify_address(&keypair.address(), &signature, &message).unwrap()); + } +} diff --git a/parity-crypto/src/publickey/ecies.rs b/parity-crypto/src/publickey/ecies.rs new file mode 100644 index 000000000..b54ce717e --- /dev/null +++ b/parity-crypto/src/publickey/ecies.rs @@ -0,0 +1,138 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Functions for ECIES scheme encryption and decryption + +use ethereum_types::H128; +use super::{Error, Random, Generator, Public, Secret, ecdh}; +use crate::{aes, digest, hmac, is_equal}; + +const ENC_VERSION: u8 = 0x04; + +/// Encrypt a message with a public key, writing an HMAC covering both +/// the plaintext and authenticated data. +/// +/// Authenticated data may be empty. +pub fn encrypt(public: &Public, auth_data: &[u8], plain: &[u8]) -> Result, Error> { + let r = Random.generate()?; + let z = ecdh::agree(r.secret(), public)?; + let mut key = [0u8; 32]; + kdf(&z, &[0u8; 0], &mut key); + + let ekey = &key[0..16]; + let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32])); + + let mut msg = vec![0u8; 1 + 64 + 16 + plain.len() + 32]; + msg[0] = ENC_VERSION; + { + let result_msg = &mut msg[1..]; + result_msg[0..64].copy_from_slice(r.public().as_bytes()); + let iv = H128::random(); + result_msg[64..80].copy_from_slice(iv.as_bytes()); + { + let cipher = &mut result_msg[(64 + 16)..(64 + 16 + plain.len())]; + aes::encrypt_128_ctr(ekey, iv.as_bytes(), plain, cipher)?; + } + let mut hmac = hmac::Signer::with(&mkey); + { + let cipher_iv = &result_msg[64..(64 + 16 + plain.len())]; + hmac.update(cipher_iv); + } + hmac.update(auth_data); + let sig = hmac.sign(); + result_msg[(64 + 16 + plain.len())..].copy_from_slice(&sig); + } + Ok(msg) +} + +/// Decrypt a message with a secret key, checking HMAC for ciphertext +/// and authenticated data validity. +pub fn decrypt(secret: &Secret, auth_data: &[u8], encrypted: &[u8]) -> Result, Error> { + const META_LEN: usize = 1 + 64 + 16 + 32; + let enc_version = encrypted[0]; + if encrypted.len() < META_LEN || enc_version < 2 || enc_version > 4 { + return Err(Error::InvalidMessage); + } + + let e = &encrypted[1..]; + let p = Public::from_slice(&e[0..64]); + let z = ecdh::agree(secret, &p)?; + let mut key = [0u8; 32]; + kdf(&z, &[0u8; 0], &mut key); + + let ekey = &key[0..16]; + let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32])); + + let cipher_text_len = encrypted.len() - META_LEN; + let cipher_with_iv = &e[64..(64 + 16 + cipher_text_len)]; + let cipher_iv = &cipher_with_iv[0..16]; + let cipher_no_iv = &cipher_with_iv[16..]; + let msg_mac = &e[(64 + 16 + cipher_text_len)..]; + + // Verify tag + let mut hmac = hmac::Signer::with(&mkey); + hmac.update(cipher_with_iv); + hmac.update(auth_data); + let mac = hmac.sign(); + + if !is_equal(&mac.as_ref()[..], msg_mac) { + return Err(Error::InvalidMessage); + } + + let mut msg = vec![0u8; cipher_text_len]; + aes::decrypt_128_ctr(ekey, cipher_iv, cipher_no_iv, &mut msg[..])?; + Ok(msg) +} + +fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) { + // SEC/ISO/Shoup specify counter size SHOULD be equivalent + // to size of hash output, however, it also notes that + // the 4 bytes is okay. NIST specifies 4 bytes. + let mut ctr = 1u32; + let mut written = 0usize; + while written < dest.len() { + let mut hasher = digest::Hasher::sha256(); + let ctrs = [(ctr >> 24) as u8, (ctr >> 16) as u8, (ctr >> 8) as u8, ctr as u8]; + hasher.update(&ctrs); + hasher.update(secret.as_bytes()); + hasher.update(s1); + let d = hasher.finish(); + &mut dest[written..(written + 32)].copy_from_slice(&d); + written += 32; + ctr += 1; + } +} + +#[cfg(test)] +mod tests { + use super::super::{ecies, Random, Generator}; + + #[test] + fn ecies_shared() { + let kp = Random.generate().unwrap(); + let message = b"So many books, so little time"; + + let shared = b"shared"; + let wrong_shared = b"incorrect"; + let encrypted = ecies::encrypt(kp.public(), shared, message).unwrap(); + assert!(encrypted[..] != message[..]); + assert_eq!(encrypted[0], 0x04); + + assert!(ecies::decrypt(kp.secret(), wrong_shared, &encrypted).is_err()); + let decrypted = ecies::decrypt(kp.secret(), shared, &encrypted).unwrap(); + assert_eq!(decrypted[..message.len()], message[..]); + } +} diff --git a/parity-crypto/src/publickey/error.rs b/parity-crypto/src/publickey/error.rs new file mode 100644 index 000000000..2cc66f733 --- /dev/null +++ b/parity-crypto/src/publickey/error.rs @@ -0,0 +1,99 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Module specific errors + +use std::{fmt, result, error::Error as StdError}; +use crate::error::SymmError; + +/// Module specific errors +#[derive(Debug)] +pub enum Error { + /// secp256k1 enc error + Secp(secp256k1::Error), + /// Invalid secret key + InvalidSecretKey, + /// Invalid public key + InvalidPublicKey, + /// Invalid address + InvalidAddress, + /// Invalid EC signature + InvalidSignature, + /// Invalid AES message + InvalidMessage, + /// IO Error + Io(std::io::Error), + /// Symmetric encryption error + Symm(SymmError), + /// Custom + Custom(String), +} + +impl StdError for Error { + fn source(&self) -> Option<&(StdError + 'static)> { + match self { + Error::Secp(secp_err) => Some(secp_err), + Error::Io(err) => Some(err), + Error::Symm(symm_err) => Some(symm_err), + _ => None, + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { + match self { + Error::Secp(err) => write!(f, "secp error: {}", err), + Error::InvalidSecretKey => write!(f, "invalid secret key"), + Error::InvalidPublicKey => write!(f, "invalid public key"), + Error::InvalidAddress => write!(f, "invalid address"), + Error::InvalidSignature => write!(f, "invalid EC signature"), + Error::InvalidMessage => write!(f, "invalid AES message"), + Error::Io(err) => write!(f, "I/O error: {}", err), + Error::Symm(err) => write!(f, "symmetric encryption error: {}", err), + Error::Custom(err) => write!(f, "custom crypto error: {}", err), + } + } +} + +impl Into for Error { + fn into(self) -> String { + format!("{}", self) + } +} + +impl From for Error { + fn from(err: std::io::Error) -> Error { + Error::Io(err) + } +} + +impl From for Error { + fn from(err: SymmError) -> Error { + Error::Symm(err) + } +} + +impl From for Error { + fn from(e: secp256k1::Error) -> Error { + match e { + secp256k1::Error::InvalidMessage => Error::InvalidMessage, + secp256k1::Error::InvalidPublicKey => Error::InvalidPublicKey, + secp256k1::Error::InvalidSecretKey => Error::InvalidSecretKey, + _ => Error::InvalidSignature, + } + } +} diff --git a/parity-crypto/src/publickey/extended_keys.rs b/parity-crypto/src/publickey/extended_keys.rs new file mode 100644 index 000000000..e585672e8 --- /dev/null +++ b/parity-crypto/src/publickey/extended_keys.rs @@ -0,0 +1,521 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Secret, public keys extended with the entropy (aka chain code), that allows further key derivation +//! Each extended key has 2^31 normal child keys, and 2^31 hardened child keys. +//! Each of these child keys has an index. The normal child keys use indices 0 through 2^31 - 1. +//! The hardened child keys use indices 2^31 through 2^32 - 1. +//! See more details about derivation in https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki + +use super::{Secret, Public}; +use ethereum_types::H256; +pub use self::derivation::Error as DerivationError; + +/// Represents label that can be stored as a part of key derivation +pub trait Label { + /// Length of the data that label occupies + fn len() -> usize; + + /// Store label data to the key derivation sequence + /// Must not use more than `len()` bytes from slice + fn store(&self, target: &mut [u8]); +} + +impl Label for u32 { + fn len() -> usize { 4 } + + fn store(&self, target: &mut [u8]) { + let bytes = self.to_be_bytes(); + target[0..4].copy_from_slice(&bytes); + } +} + +/// Key derivation over generic label `T` +pub enum Derivation { + /// Soft key derivation (allow proof of parent) + Soft(T), + /// Hard key derivation (does not allow proof of parent) + Hard(T), +} + +impl From for Derivation { + fn from(index: u32) -> Self { + // Type of the derived key is defined by it index + // See module's documentation for more details + if index < (2 << 30) { + Derivation::Soft(index) + } + else { + Derivation::Hard(index) + } + } +} + +impl Label for H256 { + fn len() -> usize { 32 } + + fn store(&self, target: &mut [u8]) { + (&mut target[0..32]).copy_from_slice(self.as_bytes()); + } +} + +/// Extended secret key, allows deterministic derivation of subsequent keys. +pub struct ExtendedSecret { + secret: Secret, + chain_code: H256, +} + +impl ExtendedSecret { + /// New extended key from given secret and chain code. + pub fn with_code(secret: Secret, chain_code: H256) -> ExtendedSecret { + ExtendedSecret { + secret: secret, + chain_code: chain_code, + } + } + + /// New extended key from given secret with the random chain code. + pub fn new_random(secret: Secret) -> ExtendedSecret { + ExtendedSecret::with_code(secret, H256::random()) + } + + /// New extended key from given secret. + /// Chain code will be derived from the secret itself (in a deterministic way). + pub fn new(secret: Secret) -> ExtendedSecret { + let chain_code = derivation::chain_code(*secret); + ExtendedSecret::with_code(secret, chain_code) + } + + /// Derive new private key + pub fn derive(&self, index: Derivation) -> ExtendedSecret where T: Label { + let (derived_key, next_chain_code) = derivation::private(*self.secret, self.chain_code, index); + + let derived_secret = Secret::from(derived_key.0); + + ExtendedSecret::with_code(derived_secret, next_chain_code) + } + + /// Private key component of the extended key. + pub fn as_raw(&self) -> &Secret { + &self.secret + } +} + +/// Extended public key, allows deterministic derivation of subsequent keys. +pub struct ExtendedPublic { + public: Public, + chain_code: H256, +} + +impl ExtendedPublic { + /// New extended public key from known parent and chain code + pub fn new(public: Public, chain_code: H256) -> Self { + ExtendedPublic { public: public, chain_code: chain_code } + } + + /// Create new extended public key from known secret + pub fn from_secret(secret: &ExtendedSecret) -> Result { + Ok( + ExtendedPublic::new( + derivation::point(**secret.as_raw())?, + secret.chain_code.clone(), + ) + ) + } + + /// Derive new public key + /// Operation is defined only for index belongs [0..2^31) + pub fn derive(&self, index: Derivation) -> Result where T: Label { + let (derived_key, next_chain_code) = derivation::public(self.public, self.chain_code, index)?; + Ok(ExtendedPublic::new(derived_key, next_chain_code)) + } + + pub fn public(&self) -> &Public { + &self.public + } +} + +pub struct ExtendedKeyPair { + secret: ExtendedSecret, + public: ExtendedPublic, +} + +impl ExtendedKeyPair { + pub fn new(secret: Secret) -> Self { + let extended_secret = ExtendedSecret::new(secret); + let extended_public = ExtendedPublic::from_secret(&extended_secret) + .expect("Valid `Secret` always produces valid public; qed"); + ExtendedKeyPair { + secret: extended_secret, + public: extended_public, + } + } + + pub fn with_code(secret: Secret, public: Public, chain_code: H256) -> Self { + ExtendedKeyPair { + secret: ExtendedSecret::with_code(secret, chain_code.clone()), + public: ExtendedPublic::new(public, chain_code), + } + } + + pub fn with_secret(secret: Secret, chain_code: H256) -> Self { + let extended_secret = ExtendedSecret::with_code(secret, chain_code); + let extended_public = ExtendedPublic::from_secret(&extended_secret) + .expect("Valid `Secret` always produces valid public; qed"); + ExtendedKeyPair { + secret: extended_secret, + public: extended_public, + } + } + + pub fn with_seed(seed: &[u8]) -> Result { + let (master_key, chain_code) = derivation::seed_pair(seed); + Ok(ExtendedKeyPair::with_secret( + Secret::import_key(master_key.as_bytes()).map_err(|_| DerivationError::InvalidSeed)?, + chain_code, + )) + } + + pub fn secret(&self) -> &ExtendedSecret { + &self.secret + } + + pub fn public(&self) -> &ExtendedPublic { + &self.public + } + + pub fn derive(&self, index: Derivation) -> Result where T: Label { + let derived = self.secret.derive(index); + + Ok(ExtendedKeyPair { + public: ExtendedPublic::from_secret(&derived)?, + secret: derived, + }) + } +} + +// Derivation functions for private and public keys +// Work is based on BIP0032 +// https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki +mod derivation { + use crate::{hmac, Keccak256}; + use super::super::SECP256K1; + use ethereum_types::{BigEndianHash, U256, U512, H512, H256}; + use secp256k1::key::{SecretKey, PublicKey}; + use super::super::ec_math_utils::CURVE_ORDER; + use super::{Label, Derivation}; + use std::convert::TryInto; + + #[derive(Debug)] + pub enum Error { + InvalidHardenedUse, + InvalidPoint, + MissingIndex, + InvalidSeed, + } + + // Deterministic derivation of the key using secp256k1 elliptic curve. + // Derivation can be either hardened or not. + // For hardened derivation, pass u32 index at least 2^31 or custom Derivation::Hard(T) enum + // + // Can panic if passed `private_key` is not a valid secp256k1 private key + // (outside of (0..curve_order()]) field + pub fn private(private_key: H256, chain_code: H256, index: Derivation) -> (H256, H256) where T: Label { + match index { + Derivation::Soft(index) => private_soft(private_key, chain_code, index), + Derivation::Hard(index) => private_hard(private_key, chain_code, index), + } + } + + fn hmac_pair(data: &[u8], private_key: H256, chain_code: H256) -> (H256, H256) { + let private: U256 = private_key.into_uint(); + + // produces 512-bit derived hmac (I) + let skey = hmac::SigKey::sha512(chain_code.as_bytes()); + let i_512 = hmac::sign(&skey, &data[..]); + + // left most 256 bits are later added to original private key + let hmac_key: U256 = H256::from_slice(&i_512[0..32]).into_uint(); + // right most 256 bits are new chain code for later derivations + let next_chain_code = H256::from_slice(&i_512[32..64]); + + let child_key = BigEndianHash::from_uint(&private_add(hmac_key, private)); + (child_key, next_chain_code) + } + + // Can panic if passed `private_key` is not a valid secp256k1 private key + // (outside of (0..curve_order()]) field + fn private_soft(private_key: H256, chain_code: H256, index: T) -> (H256, H256) where T: Label { + let mut data = vec![0u8; 33 + T::len()]; + + let sec_private = SecretKey::from_slice(&SECP256K1, private_key.as_bytes()) + .expect("Caller should provide valid private key"); + let sec_public = PublicKey::from_secret_key(&SECP256K1, &sec_private) + .expect("Caller should provide valid private key"); + let public_serialized = sec_public.serialize_vec(&SECP256K1, true); + + // curve point (compressed public key) -- index + // 0.33 -- 33..end + data[0..33].copy_from_slice(&public_serialized); + index.store(&mut data[33..]); + + hmac_pair(&data, private_key, chain_code) + } + + // Deterministic derivation of the key using secp256k1 elliptic curve + // This is hardened derivation and does not allow to associate + // corresponding public keys of the original and derived private keys + fn private_hard(private_key: H256, chain_code: H256, index: T) -> (H256, H256) where T: Label { + let mut data: Vec = vec![0u8; 33 + T::len()]; + let private: U256 = private_key.into_uint(); + + // 0x00 (padding) -- private_key -- index + // 0 -- 1..33 -- 33..end + private.to_big_endian(&mut data[1..33]); + index.store(&mut data[33..(33 + T::len())]); + + hmac_pair(&data, private_key, chain_code) + } + + fn private_add(k1: U256, k2: U256) -> U256 { + let sum = U512::from(k1) + U512::from(k2); + modulo(sum, *CURVE_ORDER) + } + + // todo: surely can be optimized + fn modulo(u1: U512, u2: U256) -> U256 { + let m = u1 % U512::from(u2); + m.try_into().expect("U512 modulo U256 should fit into U256; qed") + } + + pub fn public(public_key: H512, chain_code: H256, derivation: Derivation) -> Result<(H512, H256), Error> where T: Label { + let index = match derivation { + Derivation::Soft(index) => index, + Derivation::Hard(_) => { return Err(Error::InvalidHardenedUse); } + }; + + let mut public_sec_raw = [0u8; 65]; + public_sec_raw[0] = 4; + public_sec_raw[1..65].copy_from_slice(public_key.as_bytes()); + let public_sec = PublicKey::from_slice(&SECP256K1, &public_sec_raw).map_err(|_| Error::InvalidPoint)?; + let public_serialized = public_sec.serialize_vec(&SECP256K1, true); + + let mut data = vec![0u8; 33 + T::len()]; + // curve point (compressed public key) -- index + // 0.33 -- 33..end + data[0..33].copy_from_slice(&public_serialized); + index.store(&mut data[33..(33 + T::len())]); + + // HMAC512SHA produces [derived private(256); new chain code(256)] + let skey = hmac::SigKey::sha512(chain_code.as_bytes()); + let i_512 = hmac::sign(&skey, &data[..]); + + let new_private = H256::from_slice(&i_512[0..32]); + let new_chain_code = H256::from_slice(&i_512[32..64]); + + // Generated private key can (extremely rarely) be out of secp256k1 key field + if *CURVE_ORDER <= new_private.into_uint() { return Err(Error::MissingIndex); } + let new_private_sec = SecretKey::from_slice(&SECP256K1, new_private.as_bytes()) + .expect("Private key belongs to the field [0..CURVE_ORDER) (checked above); So initializing can never fail; qed"); + let mut new_public = PublicKey::from_secret_key(&SECP256K1, &new_private_sec) + .expect("Valid private key produces valid public key"); + + // Adding two points on the elliptic curves (combining two public keys) + new_public.add_assign(&SECP256K1, &public_sec) + .expect("Addition of two valid points produce valid point"); + + let serialized = new_public.serialize_vec(&SECP256K1, false); + + Ok(( + H512::from_slice(&serialized[1..65]), + new_chain_code, + )) + } + + fn sha3(slc: &[u8]) -> H256 { + slc.keccak256().into() + } + + pub fn chain_code(secret: H256) -> H256 { + // 10,000 rounds of sha3 + let mut running_sha3 = sha3(secret.as_bytes()); + for _ in 0..99999 { running_sha3 = sha3(running_sha3.as_bytes()); } + running_sha3 + } + + pub fn point(secret: H256) -> Result { + let sec = SecretKey::from_slice(&SECP256K1, secret.as_bytes()) + .map_err(|_| Error::InvalidPoint)?; + let public_sec = PublicKey::from_secret_key(&SECP256K1, &sec) + .map_err(|_| Error::InvalidPoint)?; + let serialized = public_sec.serialize_vec(&SECP256K1, false); + Ok(H512::from_slice(&serialized[1..65])) + } + + pub fn seed_pair(seed: &[u8]) -> (H256, H256) { + let skey = hmac::SigKey::sha512(b"Bitcoin seed"); + let i_512 = hmac::sign(&skey, seed); + + let master_key = H256::from_slice(&i_512[0..32]); + let chain_code = H256::from_slice(&i_512[32..64]); + + (master_key, chain_code) + } +} + +#[cfg(test)] +mod tests { + use super::{ExtendedSecret, ExtendedPublic, ExtendedKeyPair}; + use super::super::Secret; + use std::str::FromStr; + use ethereum_types::{H128, H256, H512}; + use super::{derivation, Derivation}; + + fn master_chain_basic() -> (H256, H256) { + let seed = H128::from_str("000102030405060708090a0b0c0d0e0f") + .expect("Seed should be valid H128") + .as_bytes() + .to_vec(); + + derivation::seed_pair(&*seed) + } + + fn test_extended(f: F, test_private: H256) where F: Fn(ExtendedSecret) -> ExtendedSecret { + let (private_seed, chain_code) = master_chain_basic(); + let extended_secret = ExtendedSecret::with_code(Secret::from(private_seed.0), chain_code); + let derived = f(extended_secret); + assert_eq!(**derived.as_raw(), test_private); + } + + #[test] + fn smoky() { + let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::zero()); + + // hardened + assert_eq!(&**extended_secret.as_raw(), &*secret); + assert_eq!( + **extended_secret.derive(2147483648.into()).as_raw(), + H256::from_str("0927453daed47839608e414a3738dfad10aed17c459bbd9ab53f89b026c834b6").unwrap(), + ); + assert_eq!( + **extended_secret.derive(2147483649.into()).as_raw(), + H256::from_str("44238b6a29c6dcbe9b401364141ba11e2198c289a5fed243a1c11af35c19dc0f").unwrap(), + ); + + // normal + assert_eq!(**extended_secret.derive(0.into()).as_raw(), H256::from_str("bf6a74e3f7b36fc4c96a1e12f31abc817f9f5904f5a8fc27713163d1f0b713f6").unwrap()); + assert_eq!(**extended_secret.derive(1.into()).as_raw(), H256::from_str("bd4fca9eb1f9c201e9448c1eecd66e302d68d4d313ce895b8c134f512205c1bc").unwrap()); + assert_eq!(**extended_secret.derive(2.into()).as_raw(), H256::from_str("86932b542d6cab4d9c65490c7ef502d89ecc0e2a5f4852157649e3251e2a3268").unwrap()); + + let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); + let derived_public = extended_public.derive(0.into()).expect("First derivation of public should succeed"); + assert_eq!( + *derived_public.public(), + H512::from_str("f7b3244c96688f92372bfd4def26dc4151529747bab9f188a4ad34e141d47bd66522ff048bc6f19a0a4429b04318b1a8796c000265b4fa200dae5f6dda92dd94").unwrap(), + ); + + let keypair = ExtendedKeyPair::with_secret( + Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(), + H256::from_low_u64_be(64), + ); + assert_eq!( + **keypair.derive(2147483648u32.into()).expect("Derivation of keypair should succeed").secret().as_raw(), + H256::from_str("edef54414c03196557cf73774bc97a645c9a1df2164ed34f0c2a78d1375a930c").unwrap(), + ); + } + + #[test] + fn h256_soft_match() { + let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let derivation_secret = H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); + + let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::zero()); + let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); + + let derived_secret0 = extended_secret.derive(Derivation::Soft(derivation_secret)); + let derived_public0 = extended_public.derive(Derivation::Soft(derivation_secret)).expect("First derivation of public should succeed"); + + let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created"); + + assert_eq!(public_from_secret0.public(), derived_public0.public()); + } + + #[test] + fn h256_hard() { + let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let derivation_secret = H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); + let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::from_low_u64_be(1)); + + assert_eq!( + **extended_secret.derive(Derivation::Hard(derivation_secret)).as_raw(), + H256::from_str("2bc2d696fb744d77ff813b4a1ef0ad64e1e5188b622c54ba917acc5ebc7c5486").unwrap(), + ); + } + + #[test] + fn match_() { + let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::from_low_u64_be(1)); + let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); + + let derived_secret0 = extended_secret.derive(0.into()); + let derived_public0 = extended_public.derive(0.into()).expect("First derivation of public should succeed"); + + let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created"); + + assert_eq!(public_from_secret0.public(), derived_public0.public()); + } + + #[test] + fn test_seeds() { + let seed = H128::from_str("000102030405060708090a0b0c0d0e0f") + .expect("Seed should be valid H128") + .as_bytes() + .to_vec(); + + // private key from bitcoin test vector + // xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs + let test_private = H256::from_str("e8f32e723decf4051aefac8e2c93c9c5b214313817cdb01a1494b917c8436b35") + .expect("Private should be decoded ok"); + + let (private_seed, _) = derivation::seed_pair(&*seed); + + assert_eq!(private_seed, test_private); + } + + #[test] + fn test_vector_1() { + // xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7 + // H(0) + test_extended( + |secret| secret.derive(2147483648.into()), + H256::from_str("edb2e14f9ee77d26dd93b4ecede8d16ed408ce149b6cd80b0715a2d911a0afea") + .expect("Private should be decoded ok") + ); + } + + #[test] + fn test_vector_2() { + // xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs + // H(0)/1 + test_extended( + |secret| secret.derive(2147483648.into()).derive(1.into()), + H256::from_str("3c6cb8d0f6a264c91ea8b5030fadaa8e538b020f0a387421a12de9319dc93368") + .expect("Private should be decoded ok") + ); + } +} diff --git a/parity-crypto/src/publickey/keypair.rs b/parity-crypto/src/publickey/keypair.rs new file mode 100644 index 000000000..04a9dbadd --- /dev/null +++ b/parity-crypto/src/publickey/keypair.rs @@ -0,0 +1,122 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Key pair (public + secrect) description + +use std::fmt; +use secp256k1::key; +use super::{Secret, Public, Address, SECP256K1, Error}; +use crate::Keccak256; + +/// Convert public key into the address +pub fn public_to_address(public: &Public) -> Address { + let hash = public.keccak256(); + let mut result = Address::zero(); + result.as_bytes_mut().copy_from_slice(&hash[12..]); + result +} + +#[derive(Debug, Clone, PartialEq)] +/// secp256k1 key pair +pub struct KeyPair { + secret: Secret, + public: Public, +} + +impl fmt::Display for KeyPair { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + writeln!(f, "secret: {:x}", self.secret)?; + writeln!(f, "public: {:x}", self.public)?; + write!(f, "address: {:x}", self.address()) + } +} + +impl KeyPair { + /// Create a pair from secret key + pub fn from_secret(secret: Secret) -> Result { + let context = &SECP256K1; + let s: key::SecretKey = key::SecretKey::from_slice(context, &secret[..])?; + let pub_key = key::PublicKey::from_secret_key(context, &s)?; + let serialized = pub_key.serialize_vec(context, false); + + let mut public = Public::default(); + public.as_bytes_mut().copy_from_slice(&serialized[1..65]); + + let keypair = KeyPair { + secret: secret, + public: public, + }; + + Ok(keypair) + } + + /// Create a pair from the slice, which imported and verified as secret key + pub fn from_secret_slice(slice: &[u8]) -> Result { + Self::from_secret(Secret::import_key(slice)?) + } + + /// Copies a pair from another one + pub fn from_keypair(sec: key::SecretKey, publ: key::PublicKey) -> Self { + let context = &SECP256K1; + let serialized = publ.serialize_vec(context, false); + let secret = Secret::from(sec); + let mut public = Public::default(); + public.as_bytes_mut().copy_from_slice(&serialized[1..65]); + + KeyPair { + secret, + public, + } + } + + /// Returns secret part of the keypair + pub fn secret(&self) -> &Secret { + &self.secret + } + + /// Returns public part of the keypair + pub fn public(&self) -> &Public { + &self.public + } + + /// Returns public part of the keypair converted into Address + pub fn address(&self) -> Address { + public_to_address(&self.public) + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + use super::{KeyPair, Secret}; + + #[test] + fn from_secret() { + let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let _ = KeyPair::from_secret(secret).unwrap(); + } + + #[test] + fn keypair_display() { + let expected = +"secret: a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65 +public: 8ce0db0b0359ffc5866ba61903cc2518c3675ef2cf380a7e54bde7ea20e6fa1ab45b7617346cd11b7610001ee6ae5b0155c41cad9527cbcdff44ec67848943a4 +address: 5b073e9233944b5e729e46d618f0d8edf3d9c34a".to_owned(); + let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let kp = KeyPair::from_secret(secret).unwrap(); + assert_eq!(format!("{}", kp), expected); + } +} diff --git a/parity-crypto/src/publickey/keypair_generator.rs b/parity-crypto/src/publickey/keypair_generator.rs new file mode 100644 index 000000000..d8d6bc77e --- /dev/null +++ b/parity-crypto/src/publickey/keypair_generator.rs @@ -0,0 +1,47 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Random key pair generator. Relies on the secp256k1 C-library to generate random data. + +use rand::rngs::OsRng; +use std::convert::Infallible; +use super::{Generator, KeyPair, SECP256K1}; + +/// Randomly generates new keypair, instantiating the RNG each time. +pub struct Random; + +impl Generator for Random { + type Error = std::io::Error; + + fn generate(&mut self) -> Result { + let mut rng = OsRng::new()?; + match rng.generate() { + Ok(pair) => Ok(pair), + Err(void) => match void {}, // LLVM unreachable + } + } +} + +impl Generator for OsRng { + type Error = Infallible; + + fn generate(&mut self) -> Result { + let (sec, publ) = SECP256K1.generate_keypair(self) + .expect("context always created with full capabilities; qed"); + + Ok(KeyPair::from_keypair(sec, publ)) + } +} diff --git a/parity-crypto/src/publickey/mod.rs b/parity-crypto/src/publickey/mod.rs new file mode 100644 index 000000000..c983f0112 --- /dev/null +++ b/parity-crypto/src/publickey/mod.rs @@ -0,0 +1,55 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Submodule of crypto utils for working with public key crypto primitives +//! If you are looking for git history please refer to the `ethkey` crate in the `parity-ethereum` repository. + +mod keypair; +mod keypair_generator; +mod ecdsa_signature; +mod secret_key; +mod extended_keys; + +pub mod ecdh; +pub mod ecies; +pub mod ec_math_utils; +pub mod error; + +pub use self::keypair::{KeyPair, public_to_address}; +pub use self::ec_math_utils::public_is_valid; +pub use self::keypair_generator::Random; +pub use self::error::Error; +pub use self::ecdsa_signature::{sign, verify_public, verify_address, recover, Signature}; +pub use self::secret_key::Secret; +pub use self::extended_keys::{ExtendedPublic, ExtendedSecret, ExtendedKeyPair, DerivationError, Derivation}; + +use ethereum_types::H256; +use lazy_static::lazy_static; + +pub use ethereum_types::{Address, Public}; +pub type Message = H256; + +lazy_static! { + pub static ref SECP256K1: secp256k1::Secp256k1 = secp256k1::Secp256k1::new(); +} + +/// Generates new keypair. +pub trait Generator { + type Error; + + /// Should be called to generate new keypair. + fn generate(&mut self) -> Result; +} diff --git a/parity-crypto/src/publickey/secret_key.rs b/parity-crypto/src/publickey/secret_key.rs new file mode 100644 index 000000000..ef8590c1d --- /dev/null +++ b/parity-crypto/src/publickey/secret_key.rs @@ -0,0 +1,307 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Secret key implementation + +use std::fmt; +use std::ops::Deref; +use std::str::FromStr; +use std::convert::TryFrom; +use secp256k1::constants::{SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE}; +use secp256k1::key; +use ethereum_types::H256; +use zeroize::Zeroize; +use super::{SECP256K1, Error}; + +/// Represents secret key +#[derive(Clone, PartialEq, Eq)] +pub struct Secret { + inner: H256, +} + +impl Drop for Secret { + fn drop(&mut self) { + self.inner.0.zeroize() + } +} + +impl fmt::LowerHex for Secret { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(fmt) + } +} + +impl fmt::Debug for Secret { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(fmt) + } +} + +impl fmt::Display for Secret { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "Secret: 0x{:x}{:x}..{:x}{:x}", self.inner[0], self.inner[1], self.inner[30], self.inner[31]) + } +} + +impl Secret { + /// Creates a `Secret` from the given slice, returning `None` if the slice length != 32. + pub fn copy_from_slice(key: &[u8]) -> Option { + if key.len() != 32 { + return None + } + let mut h = H256::zero(); + h.as_bytes_mut().copy_from_slice(&key[0..32]); + Some(Secret { inner: h }) + } + + /// Creates zero key, which is invalid for crypto operations, but valid for math operation. + pub fn zero() -> Self { + Secret { inner: H256::zero() } + } + + /// Imports and validates the key. + pub fn import_key(key: &[u8]) -> Result { + let secret = key::SecretKey::from_slice(&super::SECP256K1, key)?; + Ok(secret.into()) + } + + /// Checks validity of this key. + pub fn check_validity(&self) -> Result<(), Error> { + self.to_secp256k1_secret().map(|_| ()) + } + + /// Wrapper over hex conversion + pub fn to_hex(&self) -> String { + format!("{:x}", self.inner) + } + + /// Inplace add one secret key to another (scalar + scalar) + pub fn add(&mut self, other: &Secret) -> Result<(), Error> { + match (self.is_zero(), other.is_zero()) { + (true, true) | (false, true) => Ok(()), + (true, false) => { + *self = other.clone(); + Ok(()) + }, + (false, false) => { + let mut key_secret = self.to_secp256k1_secret()?; + let other_secret = other.to_secp256k1_secret()?; + key_secret.add_assign(&SECP256K1, &other_secret)?; + + *self = key_secret.into(); + Ok(()) + }, + } + } + + /// Inplace subtract one secret key from another (scalar - scalar) + pub fn sub(&mut self, other: &Secret) -> Result<(), Error> { + match (self.is_zero(), other.is_zero()) { + (true, true) | (false, true) => Ok(()), + (true, false) => { + *self = other.clone(); + self.neg() + }, + (false, false) => { + let mut key_secret = self.to_secp256k1_secret()?; + let mut other_secret = other.to_secp256k1_secret()?; + other_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + key_secret.add_assign(&SECP256K1, &other_secret)?; + + *self = key_secret.into(); + Ok(()) + }, + } + } + + /// Inplace decrease secret key (scalar - 1) + pub fn dec(&mut self) -> Result<(), Error> { + match self.is_zero() { + true => { + *self = key::MINUS_ONE_KEY.into(); + Ok(()) + }, + false => { + let mut key_secret = self.to_secp256k1_secret()?; + key_secret.add_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + + *self = key_secret.into(); + Ok(()) + }, + } + } + + /// Inplace multiply one secret key to another (scalar * scalar) + pub fn mul(&mut self, other: &Secret) -> Result<(), Error> { + match (self.is_zero(), other.is_zero()) { + (true, true) | (true, false) => Ok(()), + (false, true) => { + *self = Self::zero(); + Ok(()) + }, + (false, false) => { + let mut key_secret = self.to_secp256k1_secret()?; + let other_secret = other.to_secp256k1_secret()?; + key_secret.mul_assign(&SECP256K1, &other_secret)?; + + *self = key_secret.into(); + Ok(()) + }, + } + } + + /// Inplace negate secret key (-scalar) + pub fn neg(&mut self) -> Result<(), Error> { + match self.is_zero() { + true => Ok(()), + false => { + let mut key_secret = self.to_secp256k1_secret()?; + key_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + + *self = key_secret.into(); + Ok(()) + }, + } + } + + /// Inplace inverse secret key (1 / scalar) + pub fn inv(&mut self) -> Result<(), Error> { + let mut key_secret = self.to_secp256k1_secret()?; + key_secret.inv_assign(&SECP256K1)?; + + *self = key_secret.into(); + Ok(()) + } + + /// Compute power of secret key inplace (secret ^ pow). + pub fn pow(&mut self, pow: usize) -> Result<(), Error> { + if self.is_zero() { + return Ok(()); + } + + match pow { + 0 => *self = key::ONE_KEY.into(), + 1 => (), + _ => { + let c = self.clone(); + for _ in 1..pow { + self.mul(&c)?; + } + }, + } + + Ok(()) + } + + /// Create `secp256k1::key::SecretKey` based on this secret + pub fn to_secp256k1_secret(&self) -> Result { + Ok(key::SecretKey::from_slice(&SECP256K1, &self[..])?) + } +} + +impl FromStr for Secret { + type Err = Error; + fn from_str(s: &str) -> Result { + Ok(H256::from_str(s).map_err(|e| Error::Custom(format!("{:?}", e)))?.into()) + } +} + +impl From<[u8; 32]> for Secret { + fn from(k: [u8; 32]) -> Self { + Secret { inner: H256(k) } + } +} + +impl From for Secret { + fn from(s: H256) -> Self { + s.0.into() + } +} + +impl TryFrom<&str> for Secret { + type Error = Error; + + fn try_from(s: &str) -> Result { + s.parse().map_err(|e| Error::Custom(format!("{:?}", e))) + } +} + +impl From for Secret { + fn from(key: key::SecretKey) -> Self { + let mut a = [0; SECP256K1_SECRET_KEY_SIZE]; + a.copy_from_slice(&key[0 .. SECP256K1_SECRET_KEY_SIZE]); + a.into() + } +} + +impl Deref for Secret { + type Target = H256; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + use super::super::{Random, Generator}; + use super::Secret; + + #[test] + fn multiplicating_secret_inversion_with_secret_gives_one() { + let secret = Random.generate().unwrap().secret().clone(); + let mut inversion = secret.clone(); + inversion.inv().unwrap(); + inversion.mul(&secret).unwrap(); + assert_eq!(inversion, Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap()); + } + + #[test] + fn secret_inversion_is_reversible_with_inversion() { + let secret = Random.generate().unwrap().secret().clone(); + let mut inversion = secret.clone(); + inversion.inv().unwrap(); + inversion.inv().unwrap(); + assert_eq!(inversion, secret); + } + + #[test] + fn secret_pow() { + let secret = Random.generate().unwrap().secret().clone(); + + let mut pow0 = secret.clone(); + pow0.pow(0).unwrap(); + assert_eq!(pow0, Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap()); + + let mut pow1 = secret.clone(); + pow1.pow(1).unwrap(); + assert_eq!(pow1, secret); + + let mut pow2 = secret.clone(); + pow2.pow(2).unwrap(); + let mut pow2_expected = secret.clone(); + pow2_expected.mul(&secret).unwrap(); + assert_eq!(pow2, pow2_expected); + + let mut pow3 = secret.clone(); + pow3.pow(3).unwrap(); + let mut pow3_expected = secret.clone(); + pow3_expected.mul(&secret).unwrap(); + pow3_expected.mul(&secret).unwrap(); + assert_eq!(pow3, pow3_expected); + } +} From 1ddcaf0a063168ee93d9cefc280628fd2f6c0a42 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Tue, 22 Oct 2019 14:35:56 +0200 Subject: [PATCH 019/359] [impl-serde]: add benches `Bytes` and `hex_to_uint` (#245) * [impl-serde]: add benches `Bytes` and `hex_to_uint` * benc(impl serde): correct 65_536 input --- primitive-types/impls/serde/Cargo.toml | 3 +- .../impls/serde/benches/impl_serde.rs | 91 +++++++++++++++++-- primitive-types/impls/serde/benches/input.rs | 18 ++++ primitive-types/impls/serde/src/lib.rs | 2 +- primitive-types/impls/serde/src/serialize.rs | 2 +- 5 files changed, 105 insertions(+), 11 deletions(-) create mode 100644 primitive-types/impls/serde/benches/input.rs diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index bd2963fc4..8f2048c9f 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -2,6 +2,7 @@ name = "impl-serde" version = "0.2.2" authors = ["Parity Technologies "] +edition = "2018" license = "Apache-2.0/MIT" homepage = "https://github.com/paritytech/parity-common" description = "Serde serialization support for uint and fixed hash." @@ -12,7 +13,7 @@ serde = "1.0" [dev-dependencies] criterion = "0.3.0" serde_derive = "1.0" -serde_json = "1.0.40" +serde_json = "1.0" uint = "0.8.1" [[bench]] diff --git a/primitive-types/impls/serde/benches/impl_serde.rs b/primitive-types/impls/serde/benches/impl_serde.rs index 6aeb1eddf..5e8935710 100644 --- a/primitive-types/impls/serde/benches/impl_serde.rs +++ b/primitive-types/impls/serde/benches/impl_serde.rs @@ -12,13 +12,13 @@ //! cargo bench //! ``` -#[macro_use] -extern crate criterion; -#[macro_use] -extern crate uint; -#[macro_use] -extern crate impl_serde; -extern crate serde_json; +use criterion::{black_box, criterion_main, criterion_group, Criterion, ParameterizedBenchmark}; +use serde_derive::{Deserialize, Serialize}; +// TODO(niklasad1): use `uint::construct_uint` when a new version of `uint` is released +use uint::*; +use impl_serde::impl_uint_serde; + +mod input; construct_uint! { pub struct U256(4); @@ -26,11 +26,15 @@ construct_uint! { impl_uint_serde!(U256, 4); -use criterion::{black_box, Criterion, ParameterizedBenchmark}; +#[derive(Debug, Deserialize, Serialize)] +struct Bytes(#[serde(with = "impl_serde::serialize")] Vec); criterion_group!( impl_serde, u256_to_hex, + hex_to_u256, + bytes_to_hex, + hex_to_bytes, ); criterion_main!(impl_serde); @@ -55,3 +59,74 @@ fn u256_to_hex(c: &mut Criterion) { ), ); } + +fn hex_to_u256(c: &mut Criterion) { + let parameters = vec![ + r#""0x0""#, + r#""0x1""#, + r#""0x10""#, + r#""0x100""#, + r#""0x1000000000000000000000000000000000000000000000000000000000000100""#, + ]; + + c.bench( + "hex_to_u256", + ParameterizedBenchmark::new( + "", + |b, x| { + b.iter(|| { + black_box(serde_json::from_str::(&x)) + }) + }, + parameters + ) + ); +} + +fn bytes_to_hex(c: &mut Criterion) { + let parameters = vec![ + serde_json::from_str::(&input::HEX_64_CHARS).unwrap(), + serde_json::from_str::(&input::HEX_256_CHARS).unwrap(), + serde_json::from_str::(&input::HEX_1024_CHARS).unwrap(), + serde_json::from_str::(&input::HEX_4096_CHARS).unwrap(), + serde_json::from_str::(&input::HEX_16384_CHARS).unwrap(), + serde_json::from_str::(&input::HEX_65536_CHARS).unwrap(), + ]; + + c.bench( + "bytes to hex", + ParameterizedBenchmark::new( + "", + |b, x| { + b.iter(|| { + black_box(serde_json::to_string(&x)) + }) + }, + parameters + ) + ); +} + +fn hex_to_bytes(c: &mut Criterion) { + let parameters = vec![ + input::HEX_64_CHARS, + input::HEX_256_CHARS, + input::HEX_1024_CHARS, + input::HEX_4096_CHARS, + input::HEX_16384_CHARS, + input::HEX_65536_CHARS, + ]; + + c.bench( + "hex to bytes", + ParameterizedBenchmark::new( + "", + |b, x| { + b.iter(|| { + black_box(serde_json::from_str::(&x)) + }) + }, + parameters + ) + ); +} diff --git a/primitive-types/impls/serde/benches/input.rs b/primitive-types/impls/serde/benches/input.rs new file mode 100644 index 000000000..060b430d8 --- /dev/null +++ b/primitive-types/impls/serde/benches/input.rs @@ -0,0 +1,18 @@ +/// Hexdecimal string 64 chars (32 bytes) +pub const HEX_64_CHARS: &str = "\"0x6402541b4e3c2ab65306aec48fce5adedc60e3ac465c3d7036c731e0b2e49209\""; + +/// Hexdecimal string 256 chars (128 bytes) +pub const HEX_256_CHARS: &str = "\"0x2568f30caf43f5f11ec121695200e166f89b17149743da42fdff7dba5504c527b34c898ef20a61a45109b7a6e6a516994567932b31478cfa3a3bd4b058e36e88a51400403be492afb01039910c45376951d2bbec9a838666404b14850c6a8efe07b30b7cedc3e84f59b678b5d812feb4adc7e2c39d681366563ef45669a33a7604415e46a3df671968e38df6115c80eedb96dd326a273404a4b9b9957055d22e7091d3e663faa6c54a48888f29778db6f7c5199a2ccd2237c265d30ea67aed475cf459ca8039831971c2e04eb6b89951d4ba472ef8b196e60e2875e3ec2955a07956936d56162d6758eb0fe09d2b2b12e27217f26aa38f90f5def14ab7c215d3\""; + +/// Hexadecimal string 1024 chars (512 bytes) +pub const HEX_1024_CHARS: &str = "\"0x102450f41fa547cdd5cba0f648912b2890fd9081889150edf41c97e3d23ab44334bba15ec46b1052ab86885328c3935bdd2229b0e403fee8ca4a70d6f47363d7e26c9253a97d625b28aa40a67133c6e3c9e6c0ea70bda6065755262a4f40b10d01a234adf14e997278afea3c869ec300066ddd474212d3fe5bfe82696aa13fd69ebc7a59877f23eee7245e96ad48feb897fd2148ab536a4e1220218499ba7b87dc9c6c48ee5186acc6699dd335ede2dd90b5d5c8292aa8b04e11656a053f0bb3d781283b6b1a2c863cb0ac60e24d0e194dee2dfe8cffcf96ae5e06d96026d36b1f3540e17db1d18e1e20d4f00d6ed5525bb826cd9a9c54b6c906b8cadddee844887287188a96c7eac7b86348814cc16c1348872364a5910f4739f914133d30fc6bfc99b398c526926c214f4c44792061f3f14e7daf2a0133f1eb070b9e5b8153c4d679bec659a76c81fd15326688826ea49c4b587d81b4fc483783683d0ca36331d8aaa26e8f162ce787cc191514309f6c3022d978f8d61a4db338baab043d697e2bc1c91aad78cb8c6a285eb1a2e000546b43b485985dc142d68f4ff7ddf45022ef2d059aa3871284de11ebce33ac61bc7419611b0d3c2836def6b980e13963606b06f34b2c4fa91b3a867ef9e0abd98217e56869fe1f9ebb720ebf34752d9ce049e800162c9c48eef2530653f9e049db9cdfb691124ea8dd6d296785b44bf8\""; + +/// Hexadecimal string 4096 chars (2048 bytes) +pub const HEX_4096_CHARS: &str = "\"0x40966ce6355f1dbbfef6ea6f1e131dbc6425bccdf60440fa53df8cbc55554e38a5e0f6899004b4ee7e5582349285b284849cc2a5f9a10b977eabff784d6be9fbb3361c3a0f188656a7dfa020e07e653fbd0ea76816422d76f6160d38f477ee5b16ed3a4cb7b6e28bc62c15f944ddce23439493659501557a4dbb8404bb0db9080c879c8d109127174e9b667a6f84e3f00ac7915a7ab971ad2aacb69074d5b804520b10842f253de4843ca0d594b9bc4e1888c4a291ab79b0e08d0100996d988f14e15bd63c97081e36ac47f546ab1c51213c8ca1de5a9ec261f7a4392c4e0e167a82142e1bf474e752889ee17a910ce0936d05e99cc8dfed0f6ebc63d723f29e5103fc74fa5168023459f9b4d73c98b2c4c054f2b5eafbd8460b922b7dcac8d30fcd15b585606191a5c69d8c0a4e47f693d1f3e5f278962715c5106bbc6b6b05313f83950997d002f01a81b6e3bc999049b846e1a19cbf7c4207a45548356c007db4030b13e4f4d81b24496ded0f54aeacc303b2f470474934057bfa0e27b95fa99bc946d38afa6b1ef6a3675981b2d81c3e58e12092d0cd9f1dad844cb6145f5e55d0492b21bf32b282831536232ee0906abd2dd0ef33d3206a3f70f97af1fbddf1868e09d82b1c9825cfedcebb05f5e5c37688f735732284b8608c9db57cc3a89914044d1dfdab36de3e876673611468650423127f28ec92a78d8e9de7065357be7bd5e052d009a445682f99a4523a77997af93c3080d56737d2c258637e6d245c28515e3a15172afa89607ef1cecbaf9241ac7b29f81010d20333dde8c8b2e0df7421336ad623a154fc2ed58c3505c8db0d1ed00a416a75136a98c901aee8ec97f6d7b5949df429e411f98f165455f000234441825a5456f60582a3f0dab0f831ac3f68934cf5b35b3255761c53f6fe9b1044d164a31a344b966d1f62403c6fb615b8c68fd5af2afa771ab3fe9aed6ac8a6818bd8bcf4d11512bf36f6f01d01c78d137496f1f42265430678cb329a4adbf21ca8d6a646f7543c2ab826f4251e196f21f4c6cafc1b910a8a83053b00b092a2effdb45935218939248a54a0d69f3d269b6e47039a5e56edaf0985b37687cb6301c57be754ed2e08dfb6da9275cb4dfcbc9f164f90762c41e1d618db827021db646041ea9de903bb9524c25d89c519e631ec1048694fa2faa77fad75af23c8ea7e5445c064aa157ec2c286b4ee50463b6a5b57b34fa52720293c18f059787a5a386599a58d7e5fee4aa301a6997842bcbfbf7e8241509c565b1ddfc20a08edbe6f80417b879a10b0ee75f34c4469187166c39834a98d40b40d090a4978dc8acd94eb3c91a8b9391850dd1520935242d358a5320e472dc788c779026258f014daa224ad2c0f49ba2f72a95ad4712305c9d9383106b72ee31ddc62d9f41a2dea577fa1aabbd003b620b6959df8b939cf22357cb3d4785b2821d0d4d756228b32cbec7d2fa7d09c3f8910d97a90f4e919c3dc1fe8be1157a574dad0cba47edf41c13fb96cbd0dbb79dade0d25b1dd08e2bc2467bc60abeb215bbdf4f590311d6549168ee5f1a66ea32ec81ecf76a5d16eb7fcd66d706fba91f2bc1745e7609b291ff4960de1f7589baa9cc841e58370a94a898c59fed78c19d7e4277e2039a0206c36cf57e467b4ed699cd04de8ee1ae5f537b6f9dde0ea230f9f23aa7969ea9dea7d5d1252ec048084814d1f3c34d058b559cb4dca6f5cbd4da065b161a7f006378409a14b8c9f7a4b8b52bd6cd78dd8c9d429a3fea8989e7ea1e3f09a542b5d5fff5b215ca092162203a05bdbf86fd600f5f93ae0aa60ba9515f38c82b8be4dabdc768131f4b1543c9575e8ade19687446298f1948702ae45bb460fdbf6c007d080396a990fe77054418495e7b8dfa184bab4e7fbd370f5235bf8942411300e9c885a3357d8db0b6f7f4d7b4092918d6e17577946e46879a31d10d5503fbc391d6c96aa4f2f8a5f676afd8f11dee8190fef1a67fc144821175eb5231e4a34051af080d48af8950e17e3de652359158d79b4be46f30bf8959cca21dd093bc1aee5bb95a6bd3430ffb5cf13d7ffe0eaa0e1dd644604f01612f4947ff04e6a2361a7c28ef267ba521d4d4dcaea282dd477191225811d5572cbd1d2379455ffa7f47009b4ce867f8592fcc3cdc8ae71c391e5c5c66f5c0af7a8cbfb55428129db96ab0307ca9efd4ea3fac8f0bb37da6c53def450ef646f930632f3d2e8c05d787171ae41310529a94a3df34051a0b5bbfd373bab4a3ad222a283a877ef116cc4b86427f06fb9ffd31839274280b6a7beacbde9d29db7ea9b95eefb633dd3af49c6eb48c86b76e75ba688112eb313396d5523418c6503eb3613e8ad3904a493c6f40d5fcefb700b123a494ca5d5642ba178aa834995b46326a1bbc6648eadfe0c486b900e173aad2391e6d94ebda7c3d21c14042115c050eb5bcf156e181d086c0acfed4881b781a2baa6dd9073d4554fb3e123d2fa260de74df8917d86f00ddc4991efabc78ce583b5f5f569069409392dc457bb9cc998a0e8e057cb11aadcd14fd9ab65aff62af1641b6ba50a93ca1f6168f20b0f3b77f0674d5ee5b9b9d1e265a424f20721b627b053a38b4a435be65bffb7f2d01200aa41bae45df0039c3f767b165cbd78b16f537e35ec0d49c2e1714d04bf92421a5a29ba996a0f5ee76b9f22f065a07077ed549aa6be7267a0abd683e95092e706d182a3557393eaa745fb216504c23c6e1804355c29caedef6648726e55c192c227b70b7dbf067aed51ce685301205d9701c3fc9c8478ae9391934ead83e91f897983f7dd0a73cc15e4b0c6a6a321b6380e157b61a888d3cd35eda3829a563569082f24ed1269d323cbe9dd907c494b85e3c4d5b708097f4f\""; + +/// Hexadecimal string 16384 chars (8192 bytes) +pub const HEX_16384_CHARS: &str = "\"0x163845494c2b0c18961062bab4aaca8139fc0d25421880bb44321d6632cf7f4c4504f62524f7a86ae4475cf99b5b4cda8737a27f14ddae73a8c7815a60603e6f732dad7b42a94ef4cc9674b7c73c93b6baace28b61879755ad8261917798e9b158219efacd1c25689e1d1c085cf314808e5c3c1e92ea386a1e0c8a5fb1ae88e2156052ecde6aad91a78cba4cda9632fa7c41d64845d48a51a1ec922567e530184cd9fab6d8c1b19612b350ebacd2daf44dc32a969496f0e0cfea47362fbc387bc8cfa7f8a9efd797bed8d1a317ca217226aaecb0f58cc29cc4c2f4d3c91f6367b2db2c4ffc44243486bd6e5ceae007bfd6d1bc44f784af00df58f88b1a999911c256038e04a56360655b2ba92841fac013ef8cf902720c23991292b6e540f95a7ac9b7d0dfd92dd7ab7e757a80cb170acf0466812f141bad1b8047b10ed98f51fe1a28851b8019a73721c0b29815b6642587a77d2711fb93238a6005ca3613a049546db4b6626ffcca2c8352b02a4bcd514e76bd2c9e4f0a6ac557deca69ccfa41caf16d276e06b272d6bf2c680828dead23c3b6d3111201f2ca98afd36ae03da4b8a620e48cb8ae05ecf8d37d21840052ff426543a141b7f25e27a7cf94240945b0fa69e9e4e566e2255f677789cb26fc129b7021221af3588711cf9402a750d89e4f9288eacadc2bb64a6782b9c3cefd3faa57c3796ab150a9126d9959853bf93039dbcf8a0965dec9e295d5fa7e7f8d63f115e14d9e9eb531eacbcdd392cfa8c6eb499f55a6660b1bd9c2d40ebcf31581a720b0b2576ef202f739ac809a7ba7c7fe87817b6e76f02f8613111621d615aa57c9fb914cc8587a6e7f1e91a82381174e8ab011d50e86b826cd945c41cfe3608b5785c6c496b2f6e4ec9f25059bda78a3af9899426076b07039e5072b464b94ec5b5544ab15151ee583a3645819caf2df574d81079cd7ada1baf296db81e5c046f4385aad4554903cd107c0f78759c5dfd327a022a3a986de84a96e03858c3cbd476e4fe25645c437dd4986c31cb7fe6fe82d80f83887de253a6601160ccf4ef82d5e556c5fccb983bd8da0556983520b866768505e73a5eb9ffefea262dfa6218ddb143eff7f9a9b2715c526d3bcc4f1d1dfdfcea8ab68a005d0192ccdf68d2da67cbb156ada92df641a5d9f18af4b76485aa84565e9deb41c6b29ddef705c53801dcf6540dd2c2ceaa7da6bade03ce40e1e44fafe7edab9fe2a89ef77c25a4efcfc456f472a6502020a196beb2c17c25977ef7c328a19c3636ef52ee8afa78f5d31002bda28f33156a3520b4e40a6740fca6bcc003b2fddfc33e7bff9c443350d4e2e571a0f7b465142c905c1d98c5ecdf887b494f1956a1f83d5952b4618748852937b900edaae2e9b8445eec1346a7b95b5ca36bafb9c45bdb7a895f1bf4852ec217886b7d67bfdf2711f73b56397c3b5f7138fbc91434d3ec3a1d8ca584d32691b82fc0e59f8ccc0ea9aef5ab92eb2f93fec2122142ba912832bb22a1fa594fe4b14de7f3a429bb6325fa6db1d1744f878fa2ccc630c02bf4a4a6c8a8fa42e22771dc2cd252f14c2cc6f0fc3a6fdd8c7ffd62adc26efee754b0fdfff4678eaf6dc2d8e59778b9cf1b691bedc4395b0680353be17acdc18d33101a8a2e6bcfe8878151b4480a677d47a536b41e974362d5d7e9dde2e6122b22d6f056b4e1aadbc6c776ce7c2d6248c66a3cecd2587e9595548661b86dd6bf356b2d28d1cee23813580b25cc863ef8da98cb8c59b120fb6c371474cde9bf971ed5366d51419da90edfec0b56127cb7d7d51d836cae2b1f83a3563f216bdc5bc69161d91af42441b22e76e80f91a51e8635296ed4e9ae26b627f298f973066eed623374d062ad3a67be902ac50a96634437b56db882a0f24d058213187c740260eff87ef41b5e987fd18c96acde50908eec87efd3e0ae75da18113f86f04b4dbbc197e6faee36121f2f03ddc995e07eab68a099931aa288ebc1c0889ffcd793a9d780b736b85d036bee14b5874c669e425540c7d2702e0c833a5ba8ff9b17b7f44b0612dcdd1321d8dc674a56c66abed9cf920c2b5c8eed06fadd784ebd0335e89c7916148b0f28e8a54e6881cdfc2a655246179a23ee328e436bc1890c821beefc4af0ca86923404ee9ba01cf42ceda8801e9a6507e4bca2528953dd9ddd6b4564ab7d18b9d40bb2f29e5f9a38bec25aeaf89f432ff295e496e2cf76ade09b5c9f65dc5807e1c1ac104d6f8593e9957504fea1cddd5add90506c277f0a2939955306d9fd9f5b83ca7c0a7e538de323aac21fddc9f181024c60c7ac17e593704345fd1d51bf336e96afc23fb7704f0fd78c17e5ea09633a1a4e81eb09f2c7cd8366181f8437aac37dcdd23136d38e311da09f3132a6caf29b6a2428c103d6a0de54e4f74deeb4da8142aa405653c634bc5fb288a62a310949aa03f8474b79cc4d578b14fa9ef9f970e28e747cd89d0f3dd0850f1a34056e70596e1ba5e5a3525d0fda0c1e1e7a40b0e65df62532f2c43c067126c050037c192d31808ca7deaa42eaeff832c9301751a949906d7ff030fcccc510fc3790c6984976bb8580169f0de5db4b21ca214916059fba0ce6e255d6624eda87adcd5424d24e5506d0100ee0aa8d0baf89c2535358a7c244496de98f6533f8c6a4c35890217876c4254dd3b98b707694b8b42c6705bedea7b125a5cee9eb280826b6a722eaf18e4180c5f7b30211d1e983a5055714defbbd2f225b1cc2219201dc1783aa8ef36d8bf5ee33ec0326a127d9a7fb7b50fda765e3a11092104d45e64e07ae4ce215bf5cb27ea333d1a37db8a105041805e1e2eba47135aff7abc33175aa373a1d3621756c82c186d62df1586d62281b93795adf3c3ae233ee701bae359589502b6b4b2fa6672cd2b4f3d064118a53e2beac5379d05a623cc95537fb14fa583bf0f03366a9651cd91e81f20aaf36098cbc0bc8c52c5073196ae40828e01a266e106c3da91ae60bb9571a610622715b1e5be5e0954bd948e441b1bca32823900324c4b8ad21ee701c4c0c6e4b3e9fb71683b9bf1a6e4cc2cb3a21bcfc5153f5835a309722e4567d7e66eebad6abd0d2e721e8513fc7f266c9d8230cced279e5f6615b25648e6ee5d71b9836fdb0cd27e99d9884f288cb3d64d7f6aba59ed5ca44b645c489a497363fe48e965a03c738771e2f3f180278ceeecb60d69c3391d445c0c2c2af8d9e72fe2973961a5c75cbcd1dd75ee3417900e70ebb4c6f625e5cce11e4a1e64644aa524156f5cc0966a1a07e0c82cca92a54f7549136f2858c66f5add0c90735ec9e8e8b89d99730242437cb565a79d3c850a43d50f4af8791bb68284e395e4d97ed183ee555fb81f871f6735dc5da96f8e9954335695f2f637fcb939250bd869374d5929ba298d8873c8f9687f22d4bd45184d96a0e26d178a1e413364d62e49f8381db668959f3b8676b8e5e08cb363f3a51b91d27738efd10fb897b1918895b75585174a272934ce2a8eca9d13019ea4b9abd86573fbebb45443ec9302a65d3594f6da39ded12ba4aad681cdc4e504ed7dc69d37a7f27836ee24bd1471884acc6e8194cb7d66e099e455ea72c211620f723ba7089be12fbcb93bdb9751d7718d2c63972afb3be135d83f413c1e1697fa3ee4a20177933973870b40e5bdb73ea8401e4846238306b904fd028dd7706c26909f96f1087fcbbe87aa053befab4b6ec8e07486dea54a4bc8c3c139b3d010fb6aed3563f2174f6eb46f8eea68109738ad14f381f519b39b45a50cfbc24fe84eef9ef99241ef53474561831faa89499dca951f662dadef973837ff6f72cd442563b801df3a5114aac4dd85403b0600698b09096d5c90daef2b1b84d7a1e9ccba1265d8893de06b1e8c642f95eb8dd26877fc21fb1c828aa8eff954553addbaf88f266b25b5ce2472c46a38eee97a52296bb6b66dde1823960da350e69f7672f8689b281266413f0932042b4fcf0b8a546292e492e45e2d94f8e4f0115130a9a464e2236b954875aecf6fe0925ecc1a010f490292bacb1f2bcb1790845c7dad561bed04a403340d1efe1cdb4d83e00bc688d5877b7b2ab52bb16d11d03bb1e1190e9531f8a7380aaf2447f1f9f6bf1d389b05c897705f52bf215d1e421983d2bcaadd2064908f84d344d66ea470580248c704417aa2488736cbd72a7c0a436fb7b9e6f8aa76f8c9dd780d222a4652674f624fdf9a4b375c9568db0369f37d0a636568980ef3a411b49720096ba31b3b5604fb8437e007b092faae0eeaea1549745c629a92fda7195c5a28ed014d3452b472851638a0d042ad5eeb447ceff57204ba469dc870c2c31f7e2f35319666ca6c296383360fd56df57bab17ea26c438ed0eac012a4bbbb4f7fc0b7a0126243b676f89e4108c88e0dde8799006a68540c294d244b7de1eae3217e0cc6b5769800db8aea252114ecb17ca31ee3520281a5aa6885fcacb5c33ac32a68dd2555d527e611cab46e54cb514ef43a6c0b7b087246efb89fc7afb1336970479a64cfafca89d09565b03b1b260ae09a950d5828e14fc6439576a60c3f348ade4857a151b024f32a1f67318c67dbe9532a4767882331ceb6ad429bcceae5eaa93867244b2ee38515099b674a90643625b6ef54f3ad687476ef973d1fb7f92e00fcc3f572b6b913de830dcf0af8b1567758e1fc5fe7b529783d72b656fe29bf7704976ec4aaddb3968f6b8410cb204b600ae552206c929a1d589f76de8f00fdee424a39de97a162c84c17890327a9b0ccd96cf5f2784a9053512294a4b2a88dea2dcdcd98eebb1f33a26465f531986d1515f5704eb782723a3c757ecd5c0d6cafb75d8444cbb825275754fe373c4575668cf8249c028347d4372e4cfb57a408b2e4ab268fd1d14c2d34afd8a8dfed7428bf9d5678038fa1dfc9f47ed9c974a959fcb3965c20bd8f288c5b29e9322977792d6079487dbcafef61deff4001f82c8e1e31e030ff920bd58d69d9d3d0e5b0f3c9b8477d15354329bc485dbf280f51379cf8d604736d4777e720ce6ce8df339661c3e89b10341f40935c82ea1f5e0c336d859aed5e9de49f916c409a31434fd18a17b06b157b2cf8fd4b1bd715498dd8f5c57eb7328814b58686ce09d77da69566a6562a6bbf319c2ababc45c9f8a01a4834daaaedc77698a1758638cf4083dcbf0ef3e60544824bedd389ffa4c5d75a97ee56eb1a6a88155228800295e079e4717014431d2820867e46b25287842a7083e2a6a8561c26d206612babfdc34b1df9ca4a32a4f19b0f9d98b07b9d7a0dcd82edde6b0454e063345b637750da75a239a9341558e1e3540f961f46d8efebe216293e24401801ac65f82bc1b702b38995a3c69399645ff242bca25efd4a2e85330ef9023c8d1b44d9264532e647f1190730cf24abb1dd3b0dd567d4d3c2ab707ee74dcb1aa7d0eb7488ac711d325a1eb3475526c41f74ff8f91bd0ee5507d50349496792a2461fab14cdedfc222165d0438deea93bacf5818a0af53cc2de188f28794a591b3a11289e2e0a201f54e30994aeee5b46ab5abda521777e8fb15a53472bf63dc526c8bd75d8514fba0c29da4a79e957a66ea77c37aeb7025f24b2eac7782356521827e60293d713cf6cad48cccc83bb9454dd22ba43fd23c87812e239701dff8c949d22d07209e94264b427688c9970d9bc9510a415ab58f08c86ac38d80ad0271635528757b07babb8c31cc0e1e9c99bf7a07d99d920835ce72e85ce5ed1602aacb778962e6aa93af14057efd051561f77996243b06c8da26fb46e696216620dcc9bae5abf202192d86a1b6d6c002225fceed3be9a4e2a9c611392245a6b15da0444a24f6f2a72210fa863ada30f7f27e98eedff1ac101c4f3b2d98bd09dc5947f51cc24039050ca52efcb2b5633a84bf1873733547f5b96106b74de88b88917be5faace63e906e3a6800bd41666be2c3ac79204286435fe7bf763de9ba28c966ca555e77e41ec831b1caa9cfae788a3cd7c05b7244349b22bd9424627352f8db369e2acf511907c88838a27b7fccd545623f5673f50dc794a6b7bdd4582198682a4f8bac58f5d3333d9cc205e447efd4630280c4083758e44a4a41bbf4c09deb7f48d3865b9138e51fa19a4423d1e40dbf7999e0883427b75e5fdc72e137d6d1a64938dcf1453559ce972c22a6e2f8fc4c8dca1c71cf25b6f442a0525613f6e3cc88068ca52b567dfc1e1ab19de44ec5562bbb47b4face30b46b1ec34c0370e0687a6aea111a887549c49227e9468309cf0330d68a72f7f2620eb0eac1380aaa277fbe86c516ff0c3731419cc103c535975b64114099769bef5e9fee5396a2063b11657a8968431b7831fe5161f8e006144d0a3a015367d0a9ae601cd52814efe1d8e3638a8a7c326017cef14c5bcbfddc1c96e7fa91d21a626ed1295ca149100219a6d5f7c3d1afa77fcb499bcc3223a5b32d2aafb367f467a0015380f00b1db790d0bc18beaa64cd409a0f94d7fc2fb02726ab95d4d61a4e0c940a835cc8ba7d25d466e399077c843fa033e1d600276034f0b61c31eb3a183784e112c89f2e8a64129ff5b4e09fc61d8d463d01b34557d45717db21e1d0ef85750abd487cc276c45a1f81ce06912896bf45fcc80345c27faaf0c63e303be7b44f001990ec3ab919cd8e7f78cc1cde691f4ce27a7632a983da200bc9d24e5870cb4712571e71f418bbafc40ea6555ce4da37eadf5efbeab4f6a280530b674346a5f0a0a90133e37072e5d94dc7d26642b152978b47685c7c973257045e3d0b6dd96cb5845d5be690794fc3b6f3f1867d1e646ce422e16c2da8258391314f51a91770600dd4faca5cf48ba1284c96ef164cde29e48ca17ede11561aceaf851ea2011586d27f93a00b020c9f91598cb6505e7697f2d114da19acd0d4078c2638cfc8a871fb6e5ea61eafe9b2b83f5b32c7bfbeb99e5bd598464507678089ba1965e9920e2594aead6366309ee4b2e1d597362017d1868463ef38281aff30835c255b50d34ccf87afb4bce0045b81a39ededcd77789e083052c8e03a68a507053eced7bde2b0d5fd35f3081ebbdd3df2120d16c1dc892c06b6f19cfd32e4bbfebb0a985329ee1013ee96c3817ea8514311041847fa45af7d4750e3e17b666b343b92aa5078f9f544d45ed312b6500d124d86388bb7f4c36de5439d9983cb216945e0fd389a76d7b467d4a14414ddca775f694374784ab251a35c26646c14db98d85c7d8e2b318f60d643d879d5830993636a579a333742c20ecbc6049e1c7c8bd58f5009200c26707eb7acf5a4ebadf88946d3260ed5011e4dae9b7efdebbc2251a69462805ec71bd3667e094f618124fdc68dd7e38863e3077b9e07428067d7dba602ab4fab22c691359164ba1bc9b9b0ffd1fa946cd0d28aab6bb46e424b476b836e5f8f01346a7aeb2ed7f999d4d75d206412e318423b541968c2ef2af0a275b97dac972d6c367b93deaa204339d28cef59752850b616c26be87d3d4592154164b8c0a950b9dcea06069dd2d480433b1783b2e048eef04137443c1b77d980cbc81818d97ef6f06de604691b29ae55f0d749ac65668a024423803eccd533e015537ead53d2c8fbbd11376fe3509fd3fd52aef61c45dfda36983de690278b65ff7af994465b7fc815a597f660ba443b8a5daae31b5ae35ea808fa77e3c115a7abe067e9745be0a21d23aa2e22c54459b477f1ca016fef63fb2f22e7a7a7deee5879417eb7dd0ad763246e64c8de5ebe501264d7b46f200a0734e8ba38cae4f7870777ad15e278cd0a031df412d220458aa2e4bab93424f447c6a5987d96553e1d357c9ae57461d6f7f4a8e694371673a04e1463b523998d435cdd863b7cc8971b00b4e08e7bc6bf4926c3cf4670cd613794287316536e33ec5eafb793cd05be39f5ef9a9dbdcfa26d0efb24eeba5d4befccc6c12e5a97012453c7bb42991f705cb773a6c8ae8b440b29eebd8fa770d57ed28f05e452cebd5785ac71594f5a5b8e5338d0ccfce3092dadf7b093615c7276b408305fca2edf355a08015e5db91a5a0182788b0fe007a99bd808c36d501e33ede3f6c06b82db8a52e20a66ade5f7ea162ceeedd5748266661b9afae3c115495039bb7a903cf9649a5a13d299631c8957d966c18fecef06d7e8f73ab61412fe9dcf7ec29468ecf84256e1940d0e9df74d38f75fe248692d8f012793f0989c447e0b5d9653a5f03952389393556bcd55d93e24870cfa874bc530a04678f64f674ad029d8d0a50f7467a193b0499e6b7a823199915216e428e33b9fc6f3d7db6a0e0ba5d7e2690bbf489abe48c0fb85cae65f2de835f71ff181f912bfe345518ec8f8251c47febac277bf8337d185df491dad9885076ccc9e5bdb1a69bf2c3163571bbbbacd994c051c551b4ae6074d911fa6a15dfa4429b4f23dd79ccf5ce290f5a1351b4dfaf7f12ae15b566989c00d34e255c9b05228f9ee6031fca417d71ac75996bba84438da99a9d1bbdabcb7cc0ee0c595dcb045c1b45115a8516c7eea6f7707d6bcadd18d0909f8d3956ce64a9028ff00458d2d415eb54276f63191b72ed2c023598255f962e3ab29bb5bca6636ac72502cbc1bce19237f0967395db40fc6e018301bbf3a5b9c3cc4d03c7d98aacf37d4e2ef6d8e5200598ce5a5b4725b169337318f2d036ecf4918a656d204efc2bae2db3073b4098bf7b2ddaafd87e2742063e63fe68da5b1a68655effa3e389e0387a3b658cef44e4c025d454d056d44bcfafffca0c5101213639324694db4ba7cd1fbb6adb983c714cec5b9a8c075dadea62def609f4eb500e676c2603ce356ffe2c816dd3a839c6e877f177d88559d077d3452591d25a5fa3946f2b0844a22d58027a3b0e7d0c275cab0638075ec17e8ce9e76e3789dba14c9905e21e3c0cf500bf913741785d3437167150e271a7d1b192bdafdb17d5324ace1fdd4a243614293172594ebc5765aa4efb42414adefbab07ea17b5decbc9cd8a35223ddcdf1254af01ec491f107e6302c17da3a570010b9ab49d7d803b8f533a17e32dfde8cbed1ecb2494e3ace8f19ab9eedf8e9fc2d9e600c8160024d9a53f4b2276cff3a43e6d9f53fc3212d95e90f2eb5e17a9b40be5bbb71b927577ae64ea796b5b4c7a52629065133bc179f100f9bcd61f74dcc3bc4673fa97b263af4a7ae0500856e832212a81a371055c27849cf658f765bbab2c7fdaa40cf1ff663318167b339e303832c48f7fe7b4833e8cdf75529a7815986cbd43c17f9214c89ff215564f69596061082838027157de5a4111ca21b92efa9bab7a692c4c0feac3bb7ff36d2603f4d501d09a3b1134b51b6257f164643490e4fbda1dff16bc9d20359dfd8ad59ae8a8d2d74495f0263092edbd8b9be2e5bc6ed913f2776a294025f0b1decf479376cc9662c5912102fd10705ffb8e40c92c9defc85d27e363b80f4eb90cc1fc65791e673f8e3230526e433cbaac95fc9a5a13e99180ee5dd3707f6970932bc6e0e224d94fe6b869ad86e495e235f2cbc860900b1dd031b9286208617756d0d1813e1daa66c2f3e733541820ac880058a99fa58f677eb626dfd3a8d4a504049a47c35e8ec14f1a737339cb710ce3a83cc001b84511baa9e1aae1b1df909839344dac908f4070e5f13b48e1fc1a5c045922a8f03b721d2368634890c137c1c16b837ba5c89ed8628dac3d6bb4d717eb26646a1c5f3d0d438add2b4a42a8d88ded1ddbfa6b5deed6e1a37e3b9209542de006e261a13f734c8e4aa2fa31ed167b8ddcd91227737c406b344dd44ab844f9823bba1eeaf58145c7d8a6392bb02480109f4c3dc9e86a84675dc2de6104c1bf9424a4ee13b5fef7e81062d56415a29d999c80802859d307e213c3bfc904502dbe89796f67d1e68c090bac68f2655c8c49788820ec1a9ee5299e49cd1fce1a8751fa11226bc3898417fea48d969f9b1e6b33a079be36c0f6ef08682e2901edaf9c9216d1c5778e17b70a7307c93509ba2efb6b1def5a84cad094d865d5757d55234cbdd87d45f9efe9548d54b641dc6eaae8671498452e6f5954eb1b0021d349eaf7dfc2652153a6a7dcf603a392dbe359d119342043172cff2fcaa22f41e5e4b26cc034398b1fc9e60c57b8659339b4718aac5b1d9f9d47103b4fb0fc3660e0e2044943dec1ac4f6b504127e5ae77275cf287367c324081ab7e3a4405966a372ae32a09bf978322a79d7d1ce45aa22b14eb6e66d47b8c702b80d5692e6a7d380866f61e6589a080fbe720e20d10a636feb1ebc0996fc1c0fb6e2f83ea9bab7a6ab3a19880a05061e11d3ac66bc0e10cd43bd304e956acc8bdeba60bdab0572359e26ec9dcfd77f3e5f4287f88fa77479a2053e15fbd2bc0cd8f9a8294cafc55ed4820661b3046cc2e71fc081945585b60958ea67a69d8b55cc18b39ced4e96453f00aceff758a4403d762d78c634777ae8780d11311885e41a5837511e53e2321ee46f33ea553353bd825c4c66776e1b4a99b2e649445327911e7adb6a5f3a8a402be0c6c42a8019ae0c75d6b1aceeaf35227e79685bf799f5eda354d68768c8c1b7403adbe65577be3b233afa8f8ae0740c41483698388995b84a04338e702ade301f70e0e137ff503779f85f30d2d83410f041bbb608f429de659640b9bf03b86bc4ca6bfe74d0b24aecd0021c1bc3b048d7a6aa72fa4a3116578cf910c96828e647dfab2e7b1a88ffb9fcebfe6e3f2ecb499e61feaa0a87cc8271d9ca0857c2803b5ada76dbe1c163fa9454331e3292d6582c5c399f9cb5794b43ded257ce812b96fdc4c01362d45a2c26a4a4cbc230355fcdd70ba502894c7ed7c93b07e227988deaabe0f594e581ec63f60a7102c81d96723130ab4960e641b6281b04c538f7d37bddb39949aac4646c17a1455a592bd564f3608303162c395e961e0249f6badbc74b19b883e77480361c7b26d51deb88698f91266f0fee2eb30f38b9dbdf9b34c2de5fefc0d441a7510ae4488a70faa929fc938ad1d3116ae9847a94727dd607340fb2786b12898c980f3c5a1a1ec3a140598cc5df65e8f2cee0e9847440898c28507e215e29d4032e425e3c9140bb67e9a017c089cfcdf2cfc1cf74d000e30a40acca9dcb0beccb2ac6935c152de31a2d27b63b7fa4744e19a6865ef72594e98930650d3174e58e3dca0c94dcf1ce33558f2d20eb6dc519431d823b4acbd22db19ef0ee8c99a9d3aa1799f25defe64a13661daa5b5a1d4be59a970ec87ac6533561be444d1492480067ef4b147bd535611b171a19cd6090ce73e248a9a1e34716466453278333e0493a840f89f81c27f0216ffab3cd5516a2da6dd97b7c2ba1ce83db582731fa703bf630116612562960a7c94322532abe9867ba4990138d207ad8b78f03ae49be80081b9e4489800817a4923d379f7ca03404d2ec75754ccb17e71fb6b9f507de00576f89274cc3ef0409ae012bf1f5018e17590db2542480a29f4e593913e1cb63b32787789751d1802b0cf6b2c5e9fa44223601e2b3fcb491ccea517bacf5236aa9ba871aef52af37c6963c39fd622dfa386e6ae0c36548395104a53b70\""; + +/// Hexadecimal string 65536 chars (32768 bytes) +pub const HEX_65536_CHARS: &str = "\"0x65536b37b273ac2f126b11185ef91c1cff07c9b5081fae332ef45c4e90dc0bad4868ee6713acc2eac2d7071fa5885f8b2a9988a07e01bc7de1ad0cb0f06a3467905a2e8723987bbc96c86ac4f1503479e1089d8d90cf4d836c4d12816a1a39dd8c379add349bc3f2f543a81051140483f61e81c3f18ff1c4049aee78e1c245846ec12f1b8392028c6a2e9e3fb110d20f0877110b2f266609a3d92f0cd8b59b7385cbd2e70417ee062ee356ca191f8c68aa68243f5ab62c5f4b237033c32ff7813405c8a4bea82f73380818dbd169cd1060f4f6791b96a402bc4dc83f6a6fde353e3a5de2e626706ae897b7da6b1e3522ed2f4b0f340378c70bfcaf10a7fc805d696822a76f4d8d2206a9a8aacc132dbe770ac870b9a406bde566ac665c942978e5edc1efada06bc13781315eea26f9977f58327a188a9051c6eaa00ca4e2e8e04f26f8dc2b589c399a6160c1345816f35e33b4a0db6d33322c3ccbf82cfc5c1b28be7da5d834d1720343c1bfd8b8964552bae373ed2a6b1b6f02766f679a2c9f1007610014cd71810e8117255b0a07e5e5e87711ac4713e13c6adc899350c4b35d9a22925bd46cf2cfe04fab2fee013e3560380de3b879b01a2e03347c784eba5b1f0367aa5a51cce5dd0cce8f983a1c0876887d679ca207faac11fff8e8a64c81ea02f0aa7ef2cbba80f75eb37a3975963b757f7fcc805adaf7611d729f1a47df9e85df5b2a2a8eed2b69d657a4513472ca6b6a44a1b695c8183dd178dcd621b2066ecccdc1fb88fea080735bb2edb104b88547ad6de9cea81a2afd04f245d1d15c92955648c248501a9b837ab549030860417f7ddff056eb73e90b1263923fd6e5ebba43ccd9f839817967df5c8a6b2b7fcdc62e4a55978c0baedb046059e4754aa043fc975d57c09ce6dcf56b1cdd24a85272faa1d821853f71b0bfbff2967f4b2dae326e4d990c3417f6c990559d25ff5b3d0b2573f49212bcf33d06fabd8a971f90060b396e178c6b52f55667958b1307460827b78884022187a85ea89cd2981f88365b5e2e66e0fb55926d7eed93edb19afdef4217f64a33d1c9318cc05cfe774e0f09428ec4a9dd38ac42a48751cefea3c9a8935ab04fdf7b24b834261def53839cb68a89cc61ebaab7047cca9d5727a86d79d5944737b0642e8770974cf2128c663d0133d9555cb4aa141e39dfc10c70e2fdfae954a7dfb39af8b7419909255d2d81fc069071fc29d1763c7579e729d87fd148a9bf26262a9332b2994a024adbb8f43ca76389fcead15a14ae24f005cfec16d4da950c8c1647b8e2357ab59be554f1faad237e492d6a5de11618817da26a611f122fe6c5f1ef9a826d3905ed4c8841f9f5e3431b994d1f4b62d10877c973aa1daf9614bbd55cdd865e36e25b9241c08e3b6627846113d6fdf4bdef35eacd48ca2d76959baa1484adb988262b38013926c388102bf0551c35616970f9d5fb1fded91a25e12f1f12afcce8295aea0ae7608163798af65025a84ef6e1f3d60a5d354d55e0fe408023894f68c2e523be63725b5cf33ace0828cf89a69ee428d073bca15014e7faa83014f1bbf6797ecb80bf75ae012fbd35b48f3d7e213d85c3ef991933021baaa7984e7c92073fcb720911ee736479e79eb55046d3ce4433b669c34e38c14b232402222788c3df8aeb42ee8b487c3cfe6eaece11d9b3aa681b143f9f360c7dc0f157d613c2b85ec407f6d85b1b1876c298c0ee50ecb9e0351738e6eac1e0e5749ab6a5f7aeb7fcb150af97b0c6b4b6df19dd0d06dd44764dfc10361e6217c8808b9b34b62311c5a93b4e6e0f7ef3ae52d4ca2689ffe94a04a4405442059bce9df2d919299f1bf75e74d5d51b963b19a0133e6904cc3c0310fb50a7c0c04c0e9a7d259c12c12cbf84dc72e3e460d74db9a2a3890d551b9451c84ec4a5e565c17692124a0ad25b47bad7cbf8c07ff8df7b40b3226309b783f939baeb02767c829bfe6bbf285b66b235b301372d8896521774d45d5c28ca6593b6f81f9aa9d1c74014fad0877f8134751210ecd6348555bb033d30a2d07a438b111c0ff0ebed28e22b81ddb4b9fc36cc4048afb3c876f62bba6f26b882cb2410d5f8e2895c35b90bd12010f424c64e53d33af47fe5657a2cd25e80b08e8a26274fdec89a6388dfe80bb6c48522115b6f0c78245e18b42a7f175ebf6816c4c45593a9f40dcbd881a1c9f8a9a010a886a319b6fe97e01347f6ebcb64d41687a08c869b0043c6cf684d11e75161c15f1037351c3b51f135c02935815fe9f5e382f4623a3e34ee42946e774c0717d6299a4c3720f26adcac65105771621ff851b47a1f2b96aa6a0d664021e398de908b83d64719da4dc9c6d20304d00c9fe89433f6a787abd9c282212a1c087781c40f5a615fa8a31affac834cd19e98e7b26c115e6f48f397f74e12d1b214ae427f13fad70b62e051d3fe4f90fafa426bd12321e17fa97bb0fd211401db77ebd2ad2bc5586f9a84ca02af969b1bc4c7229db28e02bd5ca60aa60d3ce159dac61bbfdc6dbe3b58cad672bb64a2cf8e30093d2be04b0f84be835aec191fdfa517d671b2bfadd7a76d45b0dc99ac383a53e324d734bee7738f093a6714437cb70a4d93af9dfe71b930158216a5e19e237276f9ba536b3cf0e063fe7101edffa5827b38b5cb74abbbbecc823ad1df97f8b638eba5a75ae875dbcb1077530c06e56ebd5df1956fb93f3a605b8c1b69fe0cdc42fc6ecdc0794057b1c6e756ad5f6f105727b21074ab582e430ce12fbc1b234b4fe9b5ded642378fea32006d6ca4c539e07163c7fec74848db5d164d4f4f2619aad89f246bba33e7cee1bb434c3c4baa73448226e4711e0a13b433dbea7b474248fea8fb3290c6ae92e42dc4a365e6562381bb1d7e903336c8cfef2863d383fbc0b45f015b0b831636724d6390efe15617333cdce3efb5d0c6c99a9b77ac6f6bce8008e85ad4c5cad08aa9232c312f5df1dd55bea468587a58b0a5d966e37f85acfe260b26016e2c2ff7a5f0fe8c296e3c9f21a69067fbb828dda563ae945ff5bcdcff950b53b1890c37add5ef49c3d77c51f046e7d39f1ce013ff1f95b1ea5c66b37e7a244605ac3e8a5d33c521d7286f6bc22b7fe4076d080d8e66f8921772887233e6c883c5705862d2e4894efe63f475460a10f66a19fe50ff82a2b3c03084515bbcd25aaaf506c9fd90c4eaa150a24fabd3331220d83a48961cf9c4e9da384959935f79d84a2218348d70d394b522ec0787e934aaa472633c75c98909d91576a322198980c06f0ea370573ff2c2e2daa3f71f0f8d92eaf202fdda79d56bcfbd5dcfa435f8300c25a179338bc0d85f9ac297d283aa763892fcfb96167ad6d803eedca540bf05d9980531c135e553bf5c27442e228c761863ff43950e66526a1805e0671afa5ff0eb021fdf175fff6a92841d639b7c88bf7195a8f32b43871a648048ec0fda674cf79239865eda8dfed6d74968400a53a647b66e5b00276d088d2dc91b57f20fafc5103ced1a56ec7f6bc8e56f8bbe42bbd034ed6aeeba802e3f8ae05426758bee5b0e583e34e4df9632b636b79b467c84618ce7aa026dcd7d1ecfc54a87e6bd6aa5c3d5159693fed90a6b59ed65e0bb6d4c4a2ba045fb539652e7df48386bc2ed72e5c29f8d7e8e45233992fd18c473a39cc04fc8cffe0d70cbc0f75fdc7daa1d9f4ed0434732051968cc6123a5ac63e49a8984aface0787dfd84dd5f16573034c95a6b10b980bca6868c903a8a6d4119acfd7c844c1bc5b6c86977e2de33440e4b31f472d6b7b58bf4394a079a529587d25f2ad0fa1bf22e196d859bb45b81788b5789384dad71b23d2a226d75e0679e1cfe9efb1c4d66463a42796877cfc9e5e2435b072d3f4453e23fdc6555d2cba5ebdcf4ad536b85d30286f3df49cb5972abad4019159adc68abfc34c04e46a5bdc182e622a08694db464760c5a4505021abe6b0dfe15c8eb5fa686fe0cf8a337dd14274884cafaffa3127ca9311de8cc04d19b0ac632ea8ba34c4308436cca4e25efe3796279f3e9da9f697448a9d4a0bcf4673f3e3e6a3d630d9ddae3b6a1ff95a545db4a5fad2f17bcc6c21be224b3a038e346d6e6db12ff12ccd6d573bba31807b31ae12056ad5b77cfc5047df67a48392389665ba2c1e28f9889b8448f0fb14c30ff2050ebca2546af586aeb5d83a785eaa54874521c5ac3daa5e9b365d620801e6384b6d72122dc72ede0ca03001af82d5650011abb62769a1a55ee09aed875fd706f14946248fb4b82d12548107db353b6f39a9da939acce756e7fb9c1866c2c00de1a79a82ee91b46122bb8f30d584619cb21a247cbffc8b7b9b09d76eb13bfa48c8f28ea335c9ba9230a030ae4a69fd7ec47a5eef8c5f9851bb1142092f173239dde5c059a728590f1741dc9f8aad1590501ba5616b7bc188d257009190356f968bbceacf77ff23f014f8310c9f0a89279d02c8d225754ce535000f79ec86455757319f65c4202df8831dad4c8e2b94a67c8949e46b733991f783d9368282bcc00475da4a1be720974d972544c810c5d789cba5e7b0935ed4f849e47207cf9d54342247a7702b8176a164bd7816fdb2bfd52f4c0d6e528b4080150d0eca5707dc5c1c220cb5c0508edc220d14e84b5386ba229b422558d49c9ed3dbe128058ca59aebdee15c9f040f10ef8d77345dcd95e2c500fa995330b6c0b2769c086d03049ef3efb98999265395a2c0ae3cec18c13d270bd91c1feb53588812942c9ad73f6d89eae4e84160abea950ad7da47e2c82dc9fb06d194642c2abf3af0b30e5d04ce6e028d0f522d4639cae0bfbcb2649d2e64a6a55f7437ffbbc65f8fd5992e59a98e98f0a1a83929ef10aceaf6319332d7ab660dd00e2c4b898fd25600161ccc24aee43a10eeaa62ccf5b95790deab87b88b49ec872f48c8713cd4597f6e55a556ab242ed843f866b64ac5b100916f0e4711a02bbfa9c42e47e5b4e72e84c28e27fbc7858d1d8791d600f29936680f470fc4badefbaf72f2ece3be6b85835b1bfddd8aa5c5b7a87f6e9c64ee9e76e836ee16efeaba2491865c24e9e0c5de706a567a54b77400bfaec3b2c9d6f43fedd6a13f713fea1474a6c3504ed17bd5a5e3ad57d807eec47d6e6ed65d8fcb4b9c0e7588680128b865677c38d7a2b8918cb76462bdace3efa22217c64b05cbcc8d29039f438e2687e521078677e1a44678a441e31752d86f6da8a46f91766a40a03526cdafda3e69e636093d8b27eb3d12f235525e21f98483db3f9735d76f2d5bd5f1b4a945a367ee471def08f2f5a5af864772436a0d2f8da323824489dbdd65da331c9ecf8eededb058c563ba0b1d378109df5186fc7e100c0beb4651e4ef8de0f33cd0e49ddfd7efb84be4ebd71f778f26ad273af8b47aac27f103620eab42694fde562a68312b551afc2c592c7f7efaec178625c51eac8f8028687c953bb26151305c6ba22d44d7c6c9a48bd1542e0211ef0633ffc8bb0ac98b3aebef98fe710b4af01303b1feeaadbbd3d631f3e133c646a0853887ad851130dd2924e84da97191494b93fd711639c4aa54eb60341b67cc5f39fe6b8844dee0c70d38f729d4aa44553a161e9af54c667b9ed083a65451286dff52d9503e3f679b5fa38ce7adf78ef8558176a0fefffd45e8a402105d075b9d50e6aea8fc00dbd5db0d3e0259e0207b07ccddb1cb54c0c7d07699a9bb817246c86bc7e76b4726d2648b266508dfcce13b7390eba8b3b5629ee150b540fc032ac6fcf6a7e20d8a5dea3dbba99b8470ee035a991f2e2a7cabba89431308d6e64a7cf4e931572e937069659f88d36afa4c6e98cbbf12fa624b17cfa15118151bca7912380d4d66220f7b58fb3bb71f3af7861f553066a356bb4333d18e9795fb5a209155f38c7d0141364ec4f0de11a38d5efec4f592f6fca3388f5772527d9e216aa09d8f3be8591d0e4efd9fd19dba829032b524eeb123f27ac8695600658b034b0c5eb4a1166e952667e8441004f4e8de0d0cc9c4cb79fb5579b9d6f2ccb390baea7c9719a55beb0cb87b82314a0c0c8a0e66aae85fb39224ff7cbcb4e9f252de601c7e452713f736039b7d4e90d6495806ea3f2b129ceec6ae7ac401da17a839d8eb394c0c89e7b39b59ed2f5994659ce9baeaf3c1adbfa943afdd952223457ebcef5a3f9f6d19858fca3e61a78a755ebfaf2e57cdb369a8742161ecbc2fc4c5ec29b9537992e4221e14a7b02e9e63b3e5a6da6587a7ad2a4e6963190421a11879350a39c22c9cc12b9ecf7a3f9132b53e07cfdeda0db6e29ce5a82b347db645377183a02cb852feaffb44a57da36c02945bb803d057a094df43dc1b4ec751f4dc3be45bb3b5490822d790d4d67cb82e23e418327be8732cd2db2f2479651a9130620ed92b6166bb729b0e5a9bceee3d1c7a98e0dee9ce57911b1d1a1f3da7d1370414c19b1454c8da246835158fae4de3f882240345aec8174fd5268f5b73b60c602442e7fcee71531b0120368abe2ad3bb5ac2da6f7ed1a14c168ccffd580261d367327ad4834c524ec554ebc0c248be39d8c6503e4ed2e8df7b36d59ed1b6b9f5d8ac455696102a9f795ca0aa75a7e9c86aa2f3f5bbe0dea2b09b9f0afc708ca38152decba96419ddc4890f74faa7358824328d036453ac16ae420fd5f86c0ed8e3ca44b095f6d4429b534d7f877ca07657704d0c35a5a1fffaf12e12a51bf210dc557353fe27406c36a185d3da3f8013438345e1c28cfa98038883bcfb957e0dfb57ff33f8caf5d5b9edc596d26629559ab6f035ba941fa665337ea16422549c918221fe0b5602180c3ba3fc7c18c7e0afd471c0dca54bb3732ac40b90f9dcd5aaf665b0ca63292ddd7235b67892df8e965fca21a8e77c1ec7d444b6d2ce05bd2253b9cdaadef964813a03b99695bfb5a8aec508e98adc14d35f7daef7d9f384776565152c354deb30b98d0c0543ef080fe48495899862fc553a2d48e1b39fd2c699c7891100c01a89aef6ce38871e04330c8164d6ffa27efc4a4c7f282b5341a8f6f92c87eefcf0facfc9a6417f93df635b600c8c218e185522009e0cc730d30dfbf3f3f2c6b2b582a6298ee855d0d7df41b0005033788c81830fc09cb97e4115ef9713ad8b6b74787db30427237a48fe83126cc2b8d3d431a3deafd22d532e9bb9ec2e74dc0f4901c060cb215b57d07e0c7cacb6d3baede035322fe9c32f212b28f8318a87f73f603c293d516b475cd96eceb1739bb4081a2d2d82fa6a6d0f33231ef184316041e357efc6bd400b7228943b93d2eef303c5ef7d7295ca959f10ead921d6e257dad7160743bdc3db3c8fd87d26b2aca6ed0bd945f41caa574d9ed61f65a45f8ad7ec50f6d7160d05ebd4516d0c3da1eb4efc88f88c5ea244ed90c07239f3d217983d93295926d6d0c8597c1d6985dd9a0df8cae5b0af9718820bb0ab425d4f8b6b5d1a8831b69c695258d2e268deb2e8743a1cba722078458fb7d5b5f4fc0f6f07207a25ed4608b7f8b8ab7ace1eaee32460e5ae00d299f78389bde8f7a64b58116387e5f0e98522c6743e808d63d7d4d3596a74d2812780ec35a9715c8fc440826d37ab29a909cc0c244099453c4a08fe541099cbffb04ead771a5c7c7004c55c44990856f2e02e790e9e4a3db5cf4db2873ad82a97159c86aa8347f8dd8ae846a4cc42f1448046cfa0e121fb9a7787136502f5cc5a21b4ff3da68a5ccdcc54e49f825dabc00a10d437d39a2a9c2e5deb72ab8b188520671eaf1b0091ab7a271a1b6ad840d089f2f79d1ac7ef5e317775eee5b2153c550ccd0038050eb7b7a6789aa6bfe68eebaac4851d93a22af4a08a14e37ec02fb0f89edbf0c1d15905a6dbf691f0f0f9e96539aeccad6267f52574aa4df31ec79dc9fd6f27b5a26fbb1945528180cad006e68745288c3868d611257541af28f036cff9b911172e38272cd5fa693ab89a11e02d795a0c7b4bda53203027994d2dda4129b474a2d94f708cf27840443597d9cc6149ff11d6c747bcc070b4adb4ce4bce847ccb7d7f899d1c4cfcb6db49b87c029b468c71e2a32771364c16c2ec2e6d10d1dedfb3379310e32b3e16153609723855caad0cc183c99c9143953e16a22b832acc833a8cd3b64d448b76134e6cf42b4b68e920c8682387c64bbb4700d8b131c45fe76d6217f96dec776fe6b49d4a770513d8228fb9e8f51f3b8d086e1cce2c5a3af3f4acfd76d985147d7067a3637984f15ce82560c5e1304f25bb1720ebce8407cde58560943896b8a3ecc59cebeb77131b1d33ddc1000dec496650992b25e3b7b13b5ada9ffb8ce961cbe53f43496e95f9f49d860cae0eed145b2c5ed2163a6a06acc60affd3536cbc328eef9dd2c3d778ed42deab72462cd808f2f1cb4ee11376ab3e4e2b1d3b9e4cd061aaeec1c616573ca29d6561f8e675af33805c2d57d2e539fd646851f4fdd9cd00a863bebb6ddca04cae87b5078c7349d6467f8c96fd93f881e76b89d506b5be38cf916094dbd6ba046995de1e38e5874e9582a7920afb47a220e7925f83adb6c14b70ecb6c85e0f14ffc1e99b11e8e8d45a0c4cb7459a758f55a5359776277548a3cb8e806bc1df0238664b35fa2173827781cde2fa9ce98853a86e3bd0bc8f38a53eba0fafa306b97e5657eaa4e5d2325ff5f39a5308126e0a4bc23696bf2d6233d3fb7a2c83f3096f82613ea80c5e4c3a5d5983d0b637c62bba0e18002e8bbec09608403107d1dca13fd22cf5f936829de8d614d8cb04bf969d3a3b7d2a7bd9a572b2ce3e05f62220548e0d8e93cba0c187464ce909cb440f4051a3d58f16ab95185579f37d1e2f890cd730fcdb99f7bad003c4c71bcfdd430eb9dfcba3140c30d9663e6b94df246aa73c5acbff26b7b6c373a0cdf64ac1229f17815be0134509bb63e066f9259d3e7bbc524e95a16907c335d78f830c4b63e461efc80a0d1dd31b63ee14124c23373c4f41f61b1923f4526cfabc5f3598db634ebf07cb70061ea71417f8efa32a2ccbd67fe4943633ef76184e38f3ef594f5ee8e1c148b30637358e32b5031b80db19e982291b383ccf5658cf2398594c5b570a2a284f45b44f343b33c76dbfed74c8919bb7fb09b46c854e0acc2fdcfcd6bb2434ae40f91413fca079183e087ec16be83cf854fa55d3b63fff2ac24877bb1b3205b77c407bb70c04d55023e99eb036fa7dfab98735234fd73a2b4b0962f31fb89e207f5fc8b9d4dbcfa00a184331970379dcf55e0b184fa79f32a9cd7a3b39b2bb9f3a44d628d85abb7453ce94e22ccb14a379e2f0c5caa0deb484e8f8c455e97a1e16f84685300b88211aee0d6ac18965571fab8166e5bb1548d6b556a4808b86494102a8f547f58cc47140d0b8f39718e1a5f35842b2ef476e50eb08ea1202d6db96fdf41c080e6b4609d9acf3114d4532076db372b4ec2c52f2b96a797896b2c1745b13012c3ce9e3e02eceaeb11a714539419b3ece70ffc777b3f633e406d56fd8f3d1feae707a1c9a64d527ece1d57fe78593df50b33227f35a3e31d8ef6777617569113460d7271aece80c344be8f5c139f567568a657d8b5bd5910c3e2f34e4fe5930d5dbf5eabe23ccd4c841a3cddfd39a287796844c95bbf6675567b1cc84feb5d3d4e514206dd4d4c2f90e296a04a8a30744618228efc696c40f7857d97e2ca25b868f236d04bcd20020b654a69180f67d43b5c34b626653abf25cbd364dd465dcb7d9dc1bb61bc3bff4afb9ff31f12ec68fa3e08a3fc91741edf8ec23f0a425482034b47022b74cd8ee32bdc1a4fe0a7e9e626315e1e6adc2686c6394a5520d7e918f5fff74e77a69295aaa00020b8337983e8af0cb58aebe4b486642cd5021c5998b5019cc64d93467c0daad184a7eaad2419755ef37c39de6c0d5905b8629ce6f31a1a447578248787af03d74350f25fa8f859750a98f94317d5915e4ad1b92ff45c440e809a1a612d962e2da2778677040884342e1a655559b1726be4ad5a6afe98ffedec7e4ffe0415b6578e3d4c206f9f9da50fad9d78eb94af03ecda8cc521db0cb49f9e334001e8d6dc7434d5cf0866a27e3c169004c2c722d0dc8843e29091d4c529c59c8d434eef4e967a52d38e86de94f27e61f9162ebee11e95f1cff57f31f943ad04c8e14870bee05454650be86aa4284abfc5fae87fface3d519be8a28571282ffe9869e16bf7eaa3fb491ce1f8e9a1a861a76cebe54283ae43f59d654319cf563ffb04663131c39adf1ca127c89d175880423f686823a9dc8f853912ae6acbf1d7ea0bd0af675760c7435eb4c99961cfe4d27b1d61752fe2d33ef19800c9966752b255693e692315f140a80f830be5d9f824d9136115284775106ed6a0c62bc59ddea4c1662fded15c5eda869f65f0c35f6d139ba8a8048379ea6a3c083e78f68f616d23b913100d71d5348d0b80d516288c9471100af56ce2b5eb5b310d7d8814fb0b7518377b8144ff5a84b5432dcbf931563aaca31d0239c321823fb3c28dd41a89354ce47d4ba95cd30d63dc673ebd03973eb03a7d85ea88d61a6f983e7fb4869591d26e2e818a1f77eef3bf277fa0cf386978ca5195001441a1c0579d78a49b1d5b007b7e2a4c80cc4a9721d6084dc91f6d21f6a10e775c07ca4ecf9a911015cbb63335484f7ef654818804d7474a4c8be35a899bc8a59847590606a07c7eb743244fe10948ff842c8cafa524aac50466b9a5f40461e7fd71950eb673306e477ec34aa9e0259885b128bb7d9c702b410d7d524dad6da5744054c8ca10fe8575375ebe645689dae51cb5142d3511e21a7b43840d0b42333bf558fd51a2870c4860982be9f53355d8cf99ee8bc6836d4692195b0e21fab6280a87e6243097ecee16e9d578012f67f7a377464f4a5f2f222163388a817d0f2460000734e882ce7ebd4276b7e1a6a22fbcef87d65a9ace315ddfbd8e3a02984cc509312fbaf7f4e47a074569ace19f14010aea632670957b2e9d5ec789bef90e32914d3d43c67a4571da14108c2ea8b80af4d332c831ff000d02a98e4ace3dd037ae843a8647163a4e80cdef62e2536c822bc144e643e0f70dc0cbf0a0b54ccf5d188733eb05e87f92add63cf7ea85f91c4fde4e2dac2cbdf2f656cd01db7777ca4190e32ed439d66c18502decd0c8cc1533b263d0d747986c4e6a4aff70e2a84badf0295e5df47c1e35ca13d6bfbcfe7c794cee421e96b219af6b694ad0f24d6f76afc88c387903aca32ab3573eb56dc593f33ee9a910dcc3ed709f9dd830a2029376172e63c7138b757ab345f1de0accb21c2fb6a06b656ac2399c7f1f0d5fb3447c89128448c35e61c2032b4ea4a840119826f69ea73409ababcfdb04356f66429b29499ca72a1037f7a08fb2e6f265b4e743e68035ae0311b2ccb61aed3fe17dd22c77ab38c49b09184107fe012d43ab385a11171ed09929cf462649449b68a2a10587d2f6948df8dc509e05a2bbb1f16563a0b6ca9c8b40528f55d1ba56d68b3d39456c2ea71acbd06db6cfea521f51314aeb39444080a9b05aa99bff9fd5f2feeed3d3b9ed33936c2b1bb0c80cfe0a2bf6fecebfd4f2ba88d6bec458dd065b852d56bf61cdf98651790cf3063d310453ef7efd57c63ae348ae3686433387dff0595622d44ea5917e645d312a1175571338f4ef78c7292ca625d81de1166cddd0005f0332b4c8741c281a8bf6bfa08520622f74e19a7a43a26a8c3cb806009d362d5119c0182b427f52652ecf4c346195aceab37c5aed0e9fcbda7d4d3a7dd59dc3b10c7c5fcfc98ffda7b357327812d2e08492efb7c8dc6fd6f7b3c22193dde3ca229c1a902e9fac48c06bd4ae72560b3420dc772b0ad7fdae4d48f2b286eb5f85a1ec12c77a21eeade57dfff2326e46fe7de05f5b0004fdddba3adf099a3bf1a6ff5cf1fd56cacb8bf08c531237d5d769a1d9cf676dce2ca1309745aece9b341dbdb960b84c40f3f45d72408f2bdf5fb32403c5826da8af71d6319a0e6e9f282b578536cba20c2ed267df4d4ca2f49628df45b836b993232981e0bc4f121b80295847ce60cb0db41f67851d6ac64f683f7cf6f986c5fb22095183645fa87717325496930017920dbd74b0be6899036293809aa863466c271158211f3e26dd86a62bf4c159cb17bb5e303f50650b8776c5644988a896fd7fa1c43fb6f142f1451c6b20250612c0af43ed0c2570fea925070ba9699a6b7ed67e7b83bc1db2e213a5a63c96a2ab691cb38e90a435a8c6f638baa9af40c2913fe0c1d516569acc2c27a57e13116a7ab0cd86152c9d972d332eac62f8c4ef9cff1de4c3e13cf03c3b9dc38c0e71069450443b5e07b51e7cce9b96dfe22b652f29541f0ebde8911fb56c2e6baf361ede39994d3679afb266734128094734dd29a0c2f90df70f30bcf45d0fcf461d1d22548d7fe807a571e690cf3bb71cceccdcd358bb66e42da73b8213770cbda83a50d1e4869545254cb4819f63da570b4b13c8545a095ee5d57bd74a84f70ab847d8d9478f232928a7335d8c36f5985514b81099ab32dbd5536f16c6074e2dc92b44e35aff65c9321e89f0f8b04864d595467c73c8dc618d747d01b3a62ebca9e7a4dd86ffd218e1cdf85ecc2ce300fd5787ee569367c97565a375c414ef65431ccff63866be5e47db305810c7df61727bd1c6b5dc37048656320eb71e3ba587d82b78a34ae20063e4f6598bf60b1d85c27d0fabcfaf9cc414013891e6f02d5953c42ca1c8b66dee0be7ea6a5e882a1d6d35ec67b6e6c0fdecdd7d5a418212ed30ee1cc9116fad5297cf60b1405b8943872197347a890766bd75b258c0cdb6c836dc62ed8581077f389e4f236093a2e530db8baac66a22ce78c5bc67ad6a928a5e666f59135ba35fdd6d8f96176eb7ea8d5d7e73cee10471ad348f89f8e17d1f7770b166253b986d9238e4014d6a1b2d7edb9c13c0779d4e0a11d09df3d2fc3facf27cf867189dc9c261bcecc301a51ac355dd08815807a171fa5d8a045b2fd37137b2f43dc630a4f426b53e01a59b88e3020160ca2c27c799d63b4591a8607cffd25ded698064761babb99103f2d8143192c5d9630cc62beb875e793778092f3046cd9f1b4bc83307a53cbbc36f9f1118053cbd5c06dca39edfd2512e7bfa16397cccd20fe4fb38c6f6ad66513d42be20a40e54cc0deb93feab18795e45bb5878663e0750ed3a4f2d6da17cfd85dda8943464efda1fff03205c84e9cb2a74bb26c938e6bbeeeb20ed2609cf795d6eab980a28c723a14eb809c553d9e5c5594aff1e96074649c1b8f4ea42f0d2dba40d0f07d251621297038119ae5a52c80cf001a332ce5f647d6c2d020ff621dbd5effa4a4be4b4cbd540a0313bec9acee45307429b582a5f71f71a6888bc4429f85beab1f6c11f3ddcf1561ad641e2571d17c77ff28ea33772d89aa6e8cd12c530168ac103a98421c299733ff4842ac944574ce3c9df87506daeac089eb8df22f45ceb8c1230be580bfab7d2a746071f28aba102e93c2e46e30760430ebb3e9883f2d09511f99331cadde5f96d18ec33f3fe2af12d46a3e4819ef95634241dbb23f0ada97759544579e0497f0f4e3077f1dd54a14ebae8b9d825a729e4b853f52b8d58832b6a20e56f27b85fcf38d39563f4afedfa6a5e7c58c47c417438e080463a8ea8db995297613db3be953e31fb570f54f9d90fe16c60fe2d75957701ba1940e98658616d5545d4deb230aab310daba86965131f7a6dc0f7d6c6e6a12cf2481f3095b822e52b54e2cde5e017081358ad1085acd94e434025b710ccf25f9f4423e6e4fc682ffe1479840d37466ff4771ce035855cda8a3e2907323fe7760dc4c39e317cea433c7a32fb587dd860683b08a9b8a568e95498b2107699f5e2192144fe52ba305240a70b8e9d60b56421d7e7edd141734706ff670abb57e1683b1c12b387df907f1982db60a0f206c5892b3b50d1daaf77194caecda12ec5fb1f89fafa5f749bbdacca43de4f7b6d418a9af0ad856e662a2ebb3fa6dc83a41609b1f7d5e90b53d9e8a77a73cf80f2984e3af010aa2bee35794712fd3502c9c58a81f7aaafab04f3265776d6bc82e79d0ec7e7da13fc57c79dc9e4f6ea29c2149fd57c6b878bbc3adb2aad4526dea40e8e30c76fd836d2bb825dbae2fbbe6ce6fb25c71a5cdadc37c0e1852a1b66d7451f90ea84b352c40a78cdddf18b637bfccfce077209c739ce802f85392568f0066f632e2cefc0f468aa965bce09c33713c25c4e0d5381157e7cd13f764cfaafffe0b47ec94b87b9146290719bb5156c44b97623754f59583d8b7f7450ec1b1d7efe8b8f5a09decbdaf790922805d7b06a29aab14cfbde149a79ab9890cb337087f0ab3f40b8abc1f2a5d3bf1aec8a7009a91c7b21548000d50ae6d2955bb9262787eda1fe5e0ec769a0c9a3bfec552fa05d6af6a10223fe4bcee0832475e5a3a38c823cbea745b87cc39d7d49defd664749e920843b801cfc14f558baad762f8796da716674e7ba1a4662b3392afeb24ec6e3a7e85f829d64bbeb812b021df0f8bc00b41660d106920b5ed28f3fffd8b59aff15f6cf514394d457d7ce7fb965b515004c8dcab97e3138b9daea3919f09af0ba2eb15fd37a75b29589f9e7456cab542d91268b8395fa90b8a2f5832f1f55cc4e0fe7c0051376cc1cd09b18b6a4674dff495bbef5869d148a967835c4adc996924e744e19a6e80456f06e30d0fe86a081f481e2aee593cbb63082d1110eae5267a5c6e8f38c766591f5de6ac8f58cba9aeec0be824b61a025bbb8efd53d1e59f6ea24a9408bebdef3b3cd5ebade03afc2dab103172f92ca4c9dae8759c44d67e0b442c00e4b81a50abba7d808c0b2f2bb9465318da6cd9bd89e3948d785b718d1d225c8ccac620bc9b31aa838e9272b4427ee3753dbacb7af3cc175063bb9e063a0570ece3c50fcb9468b7e9080feb5c64507ef91a79eaf0d70a0298f9e51d3a92b2591e3e6bf81b4c46aa5a64d620f1a8817d29d8d0deb2b373d094438a829d124aa966d8ce495dd5eb530989bbf7f4a2410928fd2687a5975bf9384c646d2480c92973f0e76cf8c126265371ea0612548cbb21a2772366d43aff01a3a0fafbc609889431eea924b2fbd0b03c1990f7480bf48e55bc897cff83e2748c8628fde910b643b211c951a01b4e1953853a7602f01ffc05b58f8a8a63f9b1a1573a23342154807940ad3080c77f95cc3fc714cf427845cb2a02a801be971f980f9e20ff0333c3b326c510615c2a266e3e4479f6bb9476b29e9aa9ba2d18e5a47f2c1dc8ca2a2f68d57410f5514c37499f7e8aa634567f1f5f43a1c858ad964d4ab4b3fda048e98c2bb369a3d7c9bbe6d202450bbe4e6874d01ad3ade3070cc6435223911150230529e67998d86740ce1cb35530e4510f08b968b78e319c904e2f92187af2ae70900ea7faff3830180a72ff9a55636e6ea0d6be5ea7c8a9dd2cb4e67afd3c4baf5a9506f75173e5c8c556b77db2eba2b25f98db84d64302ad1952936eeaf8296379e14cbcfe0546ba40750e3d14d29d884d1fcb0538d90fad43f53c20694e19239e24f5adcaa000718d0819be166a2827a04eafcf04fe4a2f8eea90335fce30090601f5dac609543ce74671cc0b735cfb447002a2edb9b182cce199a57a186f6bd2e5ea5216b93e13e88543de802becd8c0e7b0de5d08dff22903024da169dc45c8c44d4cafbdf8ac7e3d77010dcd94d4a95f1be9322c8b99449a9c737d2812f8a6fde48043d51058633a0db37a64ec4936ab4e80fbe7168b3d44bb92e50d964a9d98d254a72657755790ac9ea9fadf5a221d5c1bb6241bbe6e9682d9f74910f8f3a37d5f597ce1c8ffe000ee97434a452be176171dc2017530ba5314a9cfc90f3fdb1dfe8c9df810b3ea1303892a9df04337d1728d0257f85b921c9e1b2d954a090e0e74cd016114f9e7b829a6d311cfac64aeb2a3cdd10a96bd99ac77faf29ed4fbcf93e81fad1a37bc72f474c650eda34c4150dd67bf9d9c028b65748f99dff24595483a5a5aa8f09886b51cc538aa8666669923b1b39ddacbd201c11b6368b1e15f60bd0620efa37608a8b797f7b275cbc8ae396128243944fbf9dbe14e00817ce4e374ae3c04ce65b31636d53531326108d77012400faf0b6fe6f311e923692d9ac42af0792ec8d7b47303d875e5a95fc355a0a68dd690f29ba2aed1193635f6ac0e428f8b6494402ebf4440ff409bf80fdf4049d292178e18473701e97638ad7e6958a951a457a0df68a1f0e9869c77cc535ea6e8b35fb4b94a30dba24bd9d316f5dbfeaf50832a1573f6965959bc18149d6f7971f567f035d491c7a51825d5b4b5cb72dba919720e6a46f81aacb8e352e5fc15844a173609e89a21a462676c6501fc66c2f57b041e8a1c83917f4a0e7cb890e64b754c4f6731544349c1217343a4c8a7495082ce57e5137a1c80d3d479eb26af5f4ab7452ea86ae729abd2b6ea4c7f22c1aba4d71d47cfeec2344aaedfee4acabe07011807029a52aeb083436e4f4cb73320ef0d5196f6f1b2edea8fd93cae03e54997934e38e3a9b036b24d491f803799a0fece0cee98abced988ca5b32e179a3c8628269b18dbe3b2914d9710c4666cf28e11cf1bd296428875f25085e98ef1fadd60db3e3dba3cda144824723fbc449103634c5314954b5be07d0f2a3c3c45700c712bdf0142f317c2201e3a3670743f9bfe23f91f5b9e5fbebf6302a8faf6107eaa1820ee82584557da209796ab692d37f2bf7d2cbe192c25b2a2c2ff09648ba21bb3414ccc82a3b49f1fa945f51806b24ee50aa3e562ccdd374778f3ca98beb0459bb56f9359bff810aee72909db26fc5a25715ef07bced17999a010c78226b3f8bbbcade996bfa7f9eb312113b84cb1d65cd0a52746d2c7a89b109783202eb2aa14bc7a1a46fa225b69d1992838338c6f5e04f0858d026bc7968a632d3fb345f39c2dd850931554c7f8950246134b9cd5f4f252a0d22d958a072c61d25d5ec54cb9242259f444c5e7e536f0c32fb716993fa7965fc98e080970a36a08dc4eff9f85b77ebd468cdcd2e72d0a2a021b0a853bcc033ffc48bb68dff288828e46310dec368b860853a6c19069db8634486a6f545744ce83cbb1dd2c5f2d2868aae84f1ad0fda0aafe11f0fc0acddabe4877896d26c90d6542692b80c65395f69132f0d2fbf7cd0ead779a468ae9f617eaad77ba25db0acd78dfa2b2005aa8e8273a364cf2559e955efb36db928aef178a4d4184007d5ca5c7ec611016a355ba79fbefab86a123fb97f53b0eb32cfed5ace50a458f83644cf40b91066118343e605a1f6d3d5ba3efe9e0f2250d5bb5257ea2fe097a7a6a3f80fcc6702956e79474e6bb0ad3564e621fbc320437499ae7c72a2384664a44a01a06106bbf02d9c1953ca2d2d61c95a1d6038b11635bddde4ebbfc5b419e3058ba0b4b6d9b165becb17b64d4cc450b31ab20964d26ee2352085758f5851d41f754d8a8f8a920ea88631b61f69d60146a2167a704129a58fd8fa6165c162820611a8906d1688ace79bf9e66512cfd1484c096ce50388e602ed43cfdf67cc63847f3aa17dc2db6b7002ca597de8170c5c45011b43fcd529de06cc294bcbd8d73220c46770c5a9701628669d049a83e764855c5d32d40e646989d7dd84266a7d280eebaaa8d47e470634732d59e9ab4297eb3f07578b53f3531adc1391cb47dbfa75fdc02cdeffacc061deefbb7f0ec9f0f96e8ce0496e5abf5d87798a699e416175058bd093e97c1773b5616aaf56ff4ca5c26fb191c5ae41d4c2f361fa60b6d554b3480aec44c9fe477d2f0f7a45d7c6629344e67c93f33afe231625e5aebd5f81541f8218052a577b9ad7f6b72ac5723585e6de4ea90a2872b2b951df89ac3939cd87c91200b254d574c7f7cb55af7c9b278b4d6b201b12f0b94ae06cbb91007bef89368b73b0ae8020f955017c5d46c291f0fe20fbdba1739ea4a331803c6fe7bff876f89949cd5da5e2e7eee764b3c7c45e8ae0292753ca8ab2ae631fd8438d349a55beb9bdbe841e6bbe7882bcdb488470b0e55e6eecb0124ad3fd3db5a1376f11969b8287456d3d44f9a32dd474999da8f7ba73dabaeddbd2c810cc63006d8e5dd154a09b9e814997ea719de328d4e4eb6ce1ddf1bddd6e9d7caaf9b386955a8fe142d010e86532ecb06b3685c0a69161f2627a8333068c7ce52140eef125b42b36bf27f74070d192ba8936aa587b9d398446c2261fb965280bd6bc56309184181df312ff673c4335afb8a9e2140a1ed9832d14cb05c8f77ea84f3f39d7516707ff801d070e9f425c138f06929f5fe233ae282b20085c6cfd5458c7d08c24966692af215c5693f9e09861c01d9b0f860dd4b582c44b4a73459741a15e0edb6e3022391b79d5caccf5386b635829f3208a8158ad8b7a396f6890d0f4cbfbc61a2624c9ff55d45ebde3ac5c5f07c9c043d015c249bf7b40262997f5c4e93716f5e381b544e4cbf76729fb813e98333642f7d68cb6caee49668824a79f2d59cf73363b3aab88b4a6640547e9beb87cd78b6e056197ff9871fa1dc4f38d05f21a6d156011761b90a605015d45a834c13ff17854037e72af3a17de04b9deefc24c59768c8a615231b6f3a0ab37d69ad4a1ab4318803c69a3423f70ef6ebae7c80be230a4c64ff3b7ee3c29c6eec97370868294bd185ce576781e8dbbb69138f9569e4c638650301017285f5bcc2f606b39222130456ce7c2608366f51cbbdbe36eaf3448d841d4021c2bccf17c6de26fd1a2a36fe4477d9a966e34cf035b9f1490b7cd0161cfe4a8afc343dc85119e5ced8c7757206d6bfbe673677dd099007e05d031b53fac544405a942f3654502ae43cf1f882d7c562841daa43f96e062437eee30a993c488ec4d501ebe4ae0019344c8d9ba892d2e1ae84c07e24e9ac59852a15be0425881742c7c83caa026da718596830ed3ede80bea26565d24a529fc04ba53f5efbe355b36451ba4d33d7f7b12e1a09081838682308bf2c5a3b7597870268591f34fac0691ca37901fea0afd16dadd5cd3e2c28d2f265a4fc4f027289278184b6806e95fced0d23912b9ac4f76225b96bfff99aa953e779976043dbed61175db8b9373b21343de3b2e8a367e32197cc198a8cdba743210cc7a42971a10966a4c990088df071f1ecc96db33ec361bbe98c825ca3575b0f23a8294ea969cc8dbddab551a450655e488341c0541d8d189ad94b5fd67d052554cad0ffbc8d020a4110209bcde2e4bd6d461af522794470be1d268ff633f1317459eeeac3717e72bf9da0f4823dd4e2981c46655c5ae2dd213e329c5b151e9fa973396db3cf3d79e5362c6e63190c1fa936d95b98d81411aab1da95ecebae94d4c113fd49dc269eaf9033b6ec49c13201c14f7dcec4b29a146056800d4eff9a0c979f6a381de3f69f6aed2c5828cb82ded7db52ecd793b753205feec5a48f61b714c151aefb9944e254c43045a6c5e9c002c79f2bd42a17b713f23b5de9aa788f87e4266dc70ed569ef6e68ac621acfb6814f7869887fe266258bc82b29a70d391922a2713c31071bf73da0b4843b8e1f5a7f3f3f2aeaece946a819f00e34e0e2d98fc32580e79cc237647de6126a451102b1e8c5cc9bb2a3c01f0afdffa4005c60aa869011d4dd3eaa4a7bab937bfa7f82518150ae35fa9ce24bf829c197b792fc0994c7ebfb42a2f9a93f4c1892e4c25f662dd8248d67aa4ed9afa83ce9326dee5a20cfd879fafe9573b472817e74efc65e8ecf5f47199c9737332f634a05b3354cfe9340d12460f9846404e1b406c911db86c6678fb04a3317a645b1993e5e61bdcc0f822ee280bae9d84db191051a9958f22740cf0894555de3cf136f4cc8eb1c4ac44a8a9822f95a15fd6f80bd37252677a2cd10d703f9294855d169242679002d3b322656b4ab8eed68e404d79614fd4d91816c20a390341703e68e3ee241e41739418373d0fb9ba1fa6a0366b0cc0a2e9dbdd983188f8c0400dfb354b81f71817cd185526793fd68683952ca58763933e76fb7f1020df7f59bb35b63d41d14cd157a24fa3e4248b5cde6286519e8f437a086e9191b1faf82367faa4486978ba6f1810dbe877e5e33977dba474dbe70a3c4d4671b3f63e67e1db5281b1e0d0e0f1a66b1397e0e7726c4c374840a5fbbcbb98b6952fc211261def9275eec50e83729df2399756d6162cc2ca4e317ac25801e89b8f18974e47a6c3fdfc263181a689aab050f6866485b67fc445f037294f95592bd75c454a17df0a95a7e2a30c89a9bb2091102afe444d98002f36c65be3c1f79fb6665ae408e8744ebd1c399d0c95b20b21f06f7ec00f86554ccae8171ffc4698173464551795a77ce540ffe692450ed264e99b64dcd4deb6665c99d906b8b193ff1a1960d1fe4417660de1fe4f7df440b46effcc4ce95944bf2ab5ee1aa227d2cf5f513c191f6d5baa23d5f4f759e9404a0c1c86236e72d1bea4aed258bc652bd97ed8a226ad6e50a61918b3201406099938ba0d528d9bc6ceb36df2f7ba6c6538678a52db407d9272e32b13eadb152dd4b8ea3d384de54f57c33354c9123be7b8bc8a770ac5338c912c1539124449f627d105aba2102a4b0b67b0c65221cf18203e77f49027d0ab585b8bfc6861227b7752342921b1c00ee5a8742c089f201a190b19e41d3740e919a808e1ce8d3591a7beb851d81b8a55d61586651d229c1650f5ae4d2dca791c1b6d3cb2ba7059fcdb19dee84e1ad21ed8f12bf460be2dfd201a9fec9f0ba555af60262073d1910898bd32ff43a6cc844a9eceaf111208c959f48b03fd34393fa60ebcd35c973c9bc61a27bfc3f6e5c4b0ba005d1605542ce249fff70a03097330181444e4ce33c130517551acba9a693226e84a5edba5da80a3fb530bbc8bb5999739dee9c696a9ee1c7198f4cbe9635e007afe30cb77c1315a02429e226e3727146074c38e043c4770481cf4d7d321421ced4bc44cf359fab27bb5add0ebc1f02d1de646c157aa0a2fa053f76c5c17dc0e21271d3eca7ea2b4db478f23c052e106a734f93c5b3ca78de70c8c549dd411b5944ed9f18854b4b1ea1acf5a8780163a1fcf2104cb4c483fd17e8f759880a84c0e4cf5f8304d6e4389237afa8c93650b27e0319ac90f660782f92afab62e21fff102bb8d316eee157c2096ae69ea4cb3234c3894fbde89aa3e8cb18819f09ab8961d91d33901b0fb8234142d6692f6f38298534088a4c66b78760ac4cef690e6e67ef1c25bc8d010a0d12ce5af512bd2cf723bc65aa592ed8af781767b89cf66584553ea49cc8bca17efbf881a2bb424add154ff538c41bdccd485e625ea495c8ee50f6aca3a1bb285905295dcfe15b564a649af905cef87ab0c018facbcc04002f9903c8ee91de5e455eb4c35c1f0b1ebd65ed2c79fa3aa1a3ad8fdde6949c21bcb0489310021dd0ff77bd3a54e4e3b445547cec1ffd8883c38629b4493d0317093e90d51903c814885d75af75d2afd96792430eea0b65b981305cb684cbe871f4c5b5de566a86d2982b6ba8643116f50e988b228a460de65d9ac360736fb2b5052f51014142b0a9e2b01c36bd85182942ebf39943787b4b19f33effde6a4b78f137e41967cc89d4ee64e44d67ab1dc84dd30e65c51866217d0819ede95aeffafaa14642a6a8ce8ed24a8abdda1f5972db5f0bb9649440d298dc68e37ea3175044e099297e0bd5d62a9baa8be949ac6005a39105ab710042110c78a487ce7c87a2eb18341b7477b540fca6b0fbae9550aef51b759b9993e3485535e15799c473c5d71b1f42f1f460b9bf9467ac4548d5119444d721349a122314eb746e50915a4463efe65931bd505cd5883f0d6e4a2b4142da4c61326649baa5f679e8a4da39ead03b18235e148dc5bd8c6cd35c616bf1ae2f6f781baab1f4cf5f618ab074b774f2246830822a196c16053ff2a97bf9a1a04b83544abaa2c40cc2b3c3c578c00cad70dddd0ee284160987387de74bdc1198d083e814ceae60b4815bcc8677bc2999818615a872ebe40100a6c6bc0224c0195ff0b464e2c7ab6e828b977f5019e3cc6afd85cf39ad89fd1f73186d57d7b111e7ecba67edeec09dda5a22a968c9315df130d07863488ad7d72812f23a3d10a2d4295f6ed0e18a861c01bd621bf7090617e92b18e32cb91cf35741ecb3084b7f7b7bbaa2e358be52f2c197e1d6a8acc294c7c43f059178c1ac2ff2fc5ae567651350d4a87284f7fa8c7c31b9c2e8ce78d70205ffd1a869fc63c228d3d7973ff6f757abe1aa7c71f3e8624b8ed2c1dcd0643535eb15e73dea20e3ff98c8ddabdbe2fb6c346c4211849b993f27973dc6143a86228bfb2b5e2b94db481bc8d63c61b0e6115ec4b75799bb3fcc8bf5ffd7e0287a8221eb5388d05ec69fabee5ad152347ff9ddfc352838e139f791970773d32170afce03ba5d306042bc4e3a9c9d447ca0d93672bec897ff1b59f3a91e7f769f0cf7d1afe5af0266c48f486997722470555f59015a6e9b74a32019aab26bebe79b86bdf6511c434889ecf5d8766fd798f9ae4f10acdf4810add185d82345b0c469eed4f967a5ae662859127b22b1f0bad7a32196b6f941d3a2aba1641af14e074d19dae0e024b5defdf98064de910758b275eb0d39349a9aef6b2b9b9e9ca3d5a32d771872fa0c6ae9e43f39bbdfd36b9c017ac31be13f98bf47da6c367b92bfc8532bce28d6d9762fd56919c033378d0827ae8685a4ca04e3482fc1a3a023e103e2924cc4d5b897e23afcbb8f09ba3586ac455f95f4ffdb99e5fc93ce6aec0d673aa6b08de5926a19eb9a7de0ac787881bbe67a10690020006133c407293fb99695e491d26bd254354c22e08e3fdb9fc7b828f446c7538fd631d5ddc9992dde920bec9457ff2021bf0ca34f87beb6235f42e3606a48b183ea3c30dfa04ee94b11969a308ae876011826eac0a64ed655e84e136da48efd9d4682ca0e41dba225b19272b3212b98fd0dd1ee708aaf2c16202252fba04d8d9a960103d431b8e037b6d9b97aad6735fa563600edf95e8cee2bf5d0977538db048c7dc011582cbf4d7f94df5b39928f015538f76acd23db98b72cc785554523aea0af7eba88041df512957cea80ee6df20da80aaf0f76e850b4d5ad7fb780a37a34f695c8dbb96f1c03dc28cf6c07e803412d3c0cfea9e94e300334e8b6d94ecf53e0811238c0417399ba16309ab6fbfd99f7574c552911c05ab03dd301a47daed3fbe186bad39a262d8edbde91902aa96169eb53c5f7506cf14671e5faff0e6f3c33932c9374486e727858fe5c6a8bcfe80ac5672bf1f62cbb50ea9025a48cdeab0abfac0dc1edb00cd8fd3e92d19e042ab6e319561c26b1d5fae34b408a926941e4defc5655b193e1538db3e88e657d0e15f5475e51b039e85c7ef373b7a9fde3d9aa569fb5965986049fce25a73449b953ab590071979a479fbb6b8f44f03c8a9831e09efbf4138dad207e5ae666ac169682300115e04907e090e27deb33992f89cb3b4e460f7a7c577bbdf749f2707ba42a8263496a56922cb7fa2d97a72753a98db9165de7083ddca4af6f640d1fa6430e47ff43c22f87206247343bb0d365a77d459edd065ea228127ce1a1c56fb535e109fb0ef6562d04459b2ee1b7f8ac7666eb3a44ceb809419d0cbe0c4ecfc274d4a3ee511e1060b3b7948c86bd3c9effb965d5429c099df9b14c9e0db340c7090ec8ee9356c8bc8a6b8b4315b9092051d69b9ec86715edfe305eda73d1188b8426bdaa49329fcbf916eb555e23aea4b57b9c674ecc0f3107ed0b7c41008a61d0b77af5f5dffaa015ddd623e33ea020c0f8f5007d2bbf76fbd94ef312bc27b77d69b730e4eb5e4686e2e4950ce7a2084ca361d93c54e0b0b4ca9eed6710f1a2dde8875b877c226d2d10b828e7e346aa2bac738d1dda914d2a8089eb012a9ce5359ff74561150f7202ccbf5016fcecfa22f1489631524a022332806f7e9daf39fb8b774d4d3490107c2a4d27546d7cb2270b5804d01c3371aa06a3d180f0b4df11d23a6545157c1eeaf1edf0a02f492d45a639aa6e9d50246b966a2eec5053d40bde13b502a2f86f384ebd94d6e4f69cbba9ad9a126e2a0eb6dd84d129e9c5d2cd5763d05420e283170e73be17bfc612941e1b6b035f20d1ccf010faf6188ef76068b908fe5519c772bcefbbabcb7ef717e9b906a62a0b00ec56bbc0fcd72f08f3ae1a257dc847874f420a42d73bea2fac9900baa9dc87b3cfd979fd4b20d754b0ec92b8afd19523a2c46f913febd7489a156ad0c69c26a0472db960cb7f73a020e202294f29ea25eb51de2d4f72a2b9b7f9d1a379492e98c9514e77b5b516f32b974f67ea7a33398467e33e125db8994913ec7721c66a31407fdd147f6275f768bb61f1288e6318610f2a471ad84e88230363edf13c7173da724656392e8c08cc53729080ced477ff76cac77e2985d38272e06caa793d60d14d5df6d051699deb8a51b7749b20e0115e648a87c8b44c8b26d7b8f6447097b0566ce388a30087c1050fb2475d8540813a0c48e902419e37524d0744b92d3f14081fac0a101232565bf45b70cd90474fb38bdd6f48c9a46c523524a921c4eaba009e37655e3529a15a16dec68fa4baf62c51b6c670d39fd0a769f4714311d0e63ec0c5701fef71e22c29614b3ce509ed448bfdde4648cd32b98747e93e2cc668f83aece521ac4306960bd04241c2d5e9bfec9b8160603733f3d015e7fa2527ee300bf6de1c2c9fb0dcc3611941dddf5c8129b27f09801169b0fb1913bb7748e7193f87550479d6b7ead333d8da7e98feb892cf92b2ef450335bd4d2eb203c698b59d862ce9096c030eccc5885835f37fe2aef3184c3b75b2b9b3803b526bb32e0194d812eede8bfabb3abb84c66d8c6450b08e5009647749da4891b4e93e5dfc5de09b546cb2a80b9cf2701d1ca4d15bc5da86caeca4ab339b179d9e497db1450d4681d32da23c5f3b8c24a9f86ac1d0df434821823c47d8483ec86b011ef6a219cfa14b74c2064f029d636d8392494fe106979e179eb7ce234a4dba1551cf447309efa5ad1f70556ae08bc777714d586578effae2cca477e2ce61b227f5e2d5241e493d0b2379b621219faf8d8be44ea9ef4ebb8b6aad01440d0e9b10c964066e849128334566f5dc39724a8ca22d8d057cf32c7916ad2870c5eda332f87fb65e781b419a10fbe49630d47f1b115545309a5390e505702728ed43936c42d1885df2f9bb06cc834dd68d42175d9289a91879678ae06032bd324500e51f0ee40b14d1cf6d940a5e0850d3b202685963b8035e7ada7bcb80dc524d19188c2160f8539edf53d2fb0567ebd56a1a2f7c43bf7de5532a50b9afb27f43ea631ce59367a3274ab8607452f9811f4400dfde9cb049b2a475c8943f4f83e0600470b4f6ce177e2198c8fe569d2bae0a1d0b139211d741024b26f3a9916ea3c0b704b36c93275a4991dec8d91a541ce27b332183a4a1fc372dd34bd6b876cd1524e6600b7db242a799f20837376307110d7454e187b6c56d62a574e51e9e5f95e81e3ef5fa42c3518bb3c42ce4d49fb3395c42c2d44bd829594566f4a12e1ff8c4ecc5c34aad3f1a8721ce44d3b696863434948aa4770927401e4b213842605e41a3412a73158d51586e5311d5314de755737d4a0cafad4b8eba58b552ba767e4462ab4a3bf2b82d952bcbfb28cd2746b65ed92935abb4fafe2e7603317dadf8fdeeb6b46c97ee02e5727b8bb31ee910f86f45add2098d6a6b0fb2bcbc12e7b8ea5ea851984dacaad187134e5163013745e85e19947b2ef56735d9b461ed5ddda0e538b1239bde55ba16bed3b1dc453e4148d89fcc32288fbebd90b7e1daf6ad6ece865da477c8658075866b5c4994e8d0632391d8c6ebe0efe37f9c439582cb28824c2aad6548342eff83fda4d7d90cca7955c66b2ae0274ef207c2fa6fb7504ef4871a5df25bf001b8adf8dfb19dc19ab03fe960cbe6f44b93418ba8ab173c075405cc6d28000f22f6c7cfe4063e7aca85914e30ef32ceb78709e5134030fd91b142c2ad6f00ff43bd5e9804b6571aaa1d57777a059f080d47c6f38ecc9dd1d1f1a593107c91d9f24eb60f5e22b58ba06e7f032e4d66ba8fa89ae8eb539d0ed255409b0d4b0a3681f1dc4f5a519211c8efc83186a8847c9913756ab7d0614fc275320f1e8312e204e6043a11af78587f65a0cf95d55d7aa10bf3b7bec83fe9ffadfd9f6c0b7d5e01213bf5a01142a4c25bbbd865480da06eeb7b7db6943d40b60b731d5857561556d87a5288ed6b575d80b60db8c0ef59c1cb3bb43c62a35da0ca1a5fdacc0f1d5b2bd59ddeb3f4411df1e5d813f78a5ca0eabafee8c7e1a0b55b8067c9f784d9910b8d22d8224b90d89c0ce85dd2c5c2882c213d7b8a3d07c512e572c8a9696e0e8e028ea45572196fd2c8185f6ba4077e87051020fc197b21b69774d6be94b6e50d7ea64ee11515ab61fc64e6f7ec85446e7c1033fdacedf6a627e3b29d8136f45ee98f445df5680b953daffd7b625d98afafcf8b590b67aed14f8b7c50af82c8387790ea496c9ebf781ce6e31347b5b9970ee98ea734ba7f8a1a1638fee252cb38afcfd435c9acba935b15dbbac6f3197f09d4a470d9b36f3a6f27cb73d4b7e840cdc0593074bb3a07881806e614767145003acf8c8336d6827b082a23bd71fcb7665e4b4351b5b86fe7167d89fd4a93480c8807658796dcb6edf799939dad35645b23e73ace82096c83a1f92c985c85ee9c963a9b4f9e6659d3f426e00ac8bd31d9cb649f6da7a5553b2f4303697ad1bd765759aa0ed9111a7c92963d4b6f09dc60c2d71b830725b8f6c932e917a0e122372a2cb1673408a6c97ffad46e71fe989cb3f377bb9906ae8c2528a6a0bdc4bd4c2394a9b165a79b9796d769dda30a4e5e214ecac12b45912b38e367e571f2816b5be12554f19934c0e95b94532d854d8100f26d26cc4686b82e5663efda19d69c39f1f8df964f52139ddf90f5a3f1acec7f5419b1d7b5bed7601a37fa6b26d98058c60cb6da1aa740d496e08fe23222321dafc8b2ef5dafe7207a57cd2fe009eff2518b1d9dde687a687139d184723b03f6120c1ffc62fc96f243971418a10003242b9ff8c297ef036d5f014622d5e377d18eeab535043d37fda41d58656c370bcc862527040d79650060b8c54388bd4cd1b2928f1c5aef361671ed44aa181b5fc7810481d8eecdb4947c02b126fa260e3f0c65446427135c9b7d5902ed16093a750188a454d594180b968e26c21fef0984fc1be54ca52bc141d99d9f181d0691e0970dd5a28c2882085db6cf7bfd0c5573c978f938214eb2d52c499bdf9a89b8409247df73b903ce48de5f7cd945ab6be939d157cb7ca6939054940b5dfdb32248ad51323d1742e31dce87abd534bada2b7bdf2bc45927c1696eb8fa6594199014f5cd7b07b3f67c1025435518bc2aec0836925d0e94ec05ef11cd5cac5360b2ec6718f9ef989848f0578cbb0070daffd728b0ef14a95bda563173c988a81d03993b69823404ac4d56dceabf1bbbee8147675140897dc12213ce461b48218ea733f121a1322f58d0223acd6a90de5c67c10f329b5685d17d3a89c22b94f778df6d4c9ef6bac8232d22d3315e234d852ef2e610ab99fd4393da933e9cf51073b5f3d590b5ceb456b12096965598290a50749826a65e2f965cbccd3b7151e0e14489b8836cb3a5d389f42d5fb53e3677d14ac4b8bfa239f5d0412781adc3d0372c7a04aaa04b66bdc26315138da0306b387ae992e575d098e65bfb1e9939f9bd1599eb9f1f82a9e0152ae8d39c9231db5f1eec0e7eaa6816c96b0ad41fcc5c4ae38fd91fbc986fd9fbd489690e7a2bc5cb7ccb02da6bd99e6c4bc5dc5f40eb863eaae35ff9d9213c59e7885dcf6cfd59aabcede31ee3a5118747747387ba8eb269ee0e25b265a9bbc221a064b585ffcae30cf99f47f3db6b84d1aeec9c44692ceeacca71a272e55caf2be6a35f145ce28b5b3f9d210067fed63657d6a16d9f30aecc3cc5f3a9c87f5e30fa950b8b2f3a16e5103c90cdac1098c9774c89ac73d126a10aa36fca4943fcfcaa7de8733e414f4387818c27720107babbab2cfcb6c86ffb69dec906d96f52077a5f1973846206e50ffa7b1f6df1c5b5e05a8ce7db5e86171f8378a8e9bf06f240f0484720e268f08fb19a35b48481192d56cea354cdade638f090769410659706b660bd7d2831544d06f7ab99ba58e8301544426f345571c39366d420fde95da95210d4e1201ac76b987594253b8a026ca7983ec54ffa7fe1e28aceda66bae4c239cfebad4228802d6a445a1aae432b2c2cfc203d59ba2ac771a31c00c09e84f66b8b1ba4fef1780bbde37b9145d36b9e20c013a9ea0b1663cd9e27cf2298b0c97b024a5f456c500261e3fb73d72c5747cf0bfab168d0b86af86a554cff342966fa5246f810fd0301505b26cead8f37024e8c6fe6affabba731d4211afe60b6831f2ceeee91baf42a0a657358a98cf640f4eabdb3cdb38e37e45e6420086fc74e6a0b7c049c807f4050248f3cd11e2905119288950c2d5817c1677c8af8b015e1f351841f938bff6b7f702c8c391555b99dfacf06b19ef48c67f9d07a701f6a83ea0fcda3205235b0cfe8202f735266cfd4753bf0868b2fb77d29b640731fbe17f366b43f56cfa7cdf8c87d47dbdc9ff7154b9ab83618bfc80970b8f49a946c85688965923f7da71649d15176882ce3be480a20ed60960549c04fe806646b5eb7dfb9cf95742f7bdcf91bddaac1dcf48291fb62ecb5a15303dd6ce6c21ea72bef3f683df0df65ff43e0b5da22b0555fbb0e7093ac81df61b4d0c7de6f4f98dbdfadc2a82085e0e6dca0e3979bfb16e562d76e7616305b60d3dfdb70c14d9a1ff16a729cd7400846432279f68e97c7c7bfdd362cb901153e51dd56baa81ef7159738c19452ca6bed8546d9ab03cc12bc2ab252149eef44e3551c81be3f0a446e8a00d84d210cc31abc6b750172010c6b7af7d40ac2512c655285043a888e7115b46a0eb9c4913d75e86cfa17e617272107598da589b774036cac697192c6f9e54fe53ecd1c4ed1c3d3348ecccb0340582ece994cfa7544e5cbaa41051ee4ccb5d6d762e812b7d5cf8b7e76d6300ba211c48265521fdab33cd858dabc97167a0bdb78f26637740407398b7fcb78b9d435bcafb3b379fe3e5411036f48703ec672e34b8e85ea1e163f8fdeb146610cd93620ccfbfc85ae4bfdfd1ecdddc5f3c8e0fc5630ec43bfbf6708028696dfa94200db11cbdfae810e3d55ca1f855a1d9b98377b54aeaed3156391614ff1415146cae45c3f4c0e3f377f90d7e60c28c34e958c66ac091a66494a9cb1dcedfa5cb6eaa38f062664f113c3491e28606088d11e446a8cc41070298927be4c68a85cae340ee34af8e008bd1e2516534aaa3883a6f986ca290ce83bd717eaddf95888e66455619cdacc38912ffd9a44b0b072709705ff5c70ddc556f31d9e6c2b99e59bfe0272a336ec7914da06f957f27c378a6c563acf94ef5a91fa747893f2383b1ab848778abce8dc0a2b42172a7fb91d3cd8312aa98d155b5d9f13be2e5545b3d9bb3050c2447aa007575ff9aec56517c977752a13b199d82e299b253e708a21c1dd27d6659c7d1a724642b142c013122f6dde657cec6db586553b72935463668fad9ae186996ef38fdf6fde5aa8c4a7f073521f3cc2bbd9e33c4928a6a4726ce3a560659a41293701dc275c39eec03d57a359ea48ebede800e9208f4dbfb377293efad76fd92a7e6dedf8a530149879a8e50627059046c0c90e886e1790d5354e48fd8dc621596c55063784bb35c34ad25eaa8cb23a361091d4971ef7bad1cd83e23d9e40ffeb5a70bd0eeab408b4a427f6304b2ce806fdb8ed7b7e8c2102f66bda076abba771cdba1d0f5d7d456779ce8bef20929972e7417e6579a74d50245e5cbed2f04bfc519ec77c6234fb8c257515e29d1edb96ba6e551487d36a0cded60da9fa7a843f62599a1f6114afbe43b72ded08043b37e4dcef5a7802de1596ab457049a003aec15d0fa943aa0578db81492775c046217a204204ade6b158bc7c4b7bc33e771e49b9ceb385f3b6d11acd8d1c209d4b571b90174231bdbf9e4aff370b46a1e80398d19fb0399980852005063035090e408d1296d28e85dc58e90c7b68196b54faed1a22c5cc356e99822825049239b3002b946218e67841c61cb008e59c891481218da34708d17d6896fce941b7b3084a481c226827fdfe19e4c7455a6a2cedf205affaa821833b63855eb5964b074d4b475196abf9c592d54bd98cbff0d925fb36333b5e7d4fc87ba654cee389c47ac9cdf1673898e71ec8b23520386b0b0f6f9b807204d85d772383ab5d24526d6c57c87c5e2d566b58fc8f8058b38064153ab6c3a50f5f06ea4764ab9d606f6377e7a2c7412efecb71a4cb634cce3a63bf5359e8814ed77ddc6b4a40fc204d9af66876d06ecd2e40f9366c1185eaf88108b40b6f0f57f5ae175f5d46beb5036134f474cafbe8f4ef2260df5ba2da17bacd1acbbcd64ddc03746ba3e928cd0afcfa2939a8d6f11924528922d9f10ef8a312f69d8dabd8cc20de9b7fe58aaea75af2de6dc862940564c61c19754187341caa90ec381760e1c4810d8e771335ea5cb1a5c5e8e558af3069660d79e1ad66e30ac224cfab91a1bad9a4e9ea244b7aa1544ab11d44a25787bf9532f5c319e6e34f803929fa09cac811af9740548b28ed6dc4cdfac9616072f317cb0dbe1118488ab770d8576f090e58f5dfb6288f42afdfda46ed59277f7e06823e5ea03e2d414a79f454d86db3e322e1350ea893dec7099b1ecf696a4a3fb89d359fe29ab620529dc6a7dc5d632b696de9f333f506f84bca9fd7f66c936398123c12073a51e335ef3a0bf6b80055a551bd036177b7838aa07680e7f92556853232ef151caad7d7b09b9c7dc04925a835e1a4683cac902eeea199831cc929753e8c3f00114ed86bfa35e9ee4371be2bf58d159b93fbb24933a7815bb02712340282141d259a2deb9a1788ddfe75b85568bebf1452dbc3ee892c8d824cf01005c143a6cea5ccdd9a8c70e9e1b8e832bde53a109fd498af345eebeea6e5334e304cb584370ae3417a4a465ef262bbd64c2e5f6696a59991b4e9bb50a7381be46d0adc35e67276e1bcf3d34b781f788bdc0e3f6f5f21c246246eeeb28ffe8e57dd2fd09088bb2771e9c39b56726b036ee0f8fad7fa03ab203db69d4cc332b60d74e967f245f20b3d53774b795ce491603ef7af90021219ab199480c9edcafce4ff364a1e88f97350299bca866791bf4a61fa014d7d2dee73f62d8a4dd890abc2b6e781f946316a65aaa83dd774b942ff8badc448d7cfa5e9bbcd6d30af77496d69fb684e6c5a06e8e90815da8943277b2b9c05d112e3b70393cb8e0980edbfe6bd772feadb2ad553ac867b595959b3f078d0e89183854b41b4770db8a79649c5e4340bb205fb32822d4115434459be3101fb6ca248d6254149a414e482b6f602931c9e3a8ad09bd5db8def3c766799423d12e9d3af353b1861f9dccc899694c256e7eb2403edcd4c3b1dbac2a5677b44ce17dcd2995d8223608a7b6ea41bade82b2b195675775f2610ebce08eb81bc4ea253467a9f76e2701555266147b79f031ac9418b8b60433299bb850f4e463b446136b33fb58fc684de519142b477e32d11c3a41041edba421c1620f43a6813020d1d33cda7df23d2a8c24b7bd7772149b8bcb3fb28e53c0195696a648c0a7272ccdc2c78b3b0bd52b61dace9f79755df413efcfbb1ff1d5491b5010dd2dc6241c7c9b541ad5cb251b0e8e0c432de389aed2db2d339a84afc67594e460dda83c019fee3e1eb85591d0f036a09addb34b852a7bf79c21e5592fe0cdf1c2420e73293a2832b2ac6e734834cdfaada0265071f96aee6b10662747f79a4f96f51c8a6049afcc43da5b766dea3c42378dd4994817ad9ba561fbea39dc16f8f2766bc8f4532ceef2fb54e879c403e6083b757a6c6055a22c035cdb49ecba97c448263dcc33f509c5c0cbc2e65430b8f32779ba737bd292d77fb5797989090b7e126f67c321f23d671d5b80911e32ca2cdb23bc79596c9b33e5c2941eeb7b7931687f94a159b72a93142774147ddbc71f6ae38ff359bacfebd6aa33676c46703790243089ffc240a7649d4e36216948e3e9ddbb71541b022b640f90253064f0f27276cb93193b54466fe1df047752677dc29364f4bd97793a6735752f97750206c10f60316b3f8d9b9466412e997247ce9d9bc0e8754ef754110736052404af720c7f1fdd7017150160b35b6a2640b906d58475de9060c4b7bc314722e967369ac9a34cda947e542d7921a81a8d5083934e92af91841256e9b82930ca87ee8640c7ae66c5a3c3fdab86dcda30e079976d22a3cf56f9c156559828b8f7334bab35b654f018f331dca72dbbde50541b951d543284d49d3b96f1f2da217632c5e3660283d27904a56e4c83b4cac7675134b8745cdefa91c16905e16d958b0dcb29b5d52020ed7de8cab9dd8ddbe2cc6c44ed652193041e66b8ad5b86c6de0b905daaf17ff243ef2b21991da5b8a18c47f5835c405b13017d9a23fbf126c53bb2777a6d6681715da504698dbdb9d35500a9a11a4d1a41e1890c5aa928f33268c21d8f46a51abdc740aa3923f6b6e1b689af06a5cd1b983ab6111d9f89197e96accd56b7923fdcabc93f044a05902afbe293e4e17bceea5769574fc49020f2ada228b29f86e86439101eec7d551a882c8260d2798a5464341fc08b03f2a1e8d1420b00aeb0629742061b9f53ca9aafe67f06fd0e596b2f47f6197de46646ab70adbd7b200b375934a19280eb3be89c573b2b44ec0c4239f54bb158f23c06b65dd3c5c5d16754e0bc66e647409c38bad393e301cb6c511d3d4194c815b6ee562e0b3e5b1eb54782aa91a9b5957e61c9bbf37f51605e858549b4f1858455860c5ebf685d8f1c864f881e4b7e4b73cf866beeff7232c6d5c08c9b89ec81a9b0337545f8115c4fe44dd6f6147e6cb7788f7e3c1feba373da09f42d731522c32785653d33ff195bfd89f0c06892919b3a90b2ba324d0cadae87dc7d2f795e9a33abd500cdb3ed36975add5b149376a0888e6995023bb5fa7fcbfe83556309b452a436aee6e7d7da5242f03c43630c3bff362adec8ec4974e330567e81311d2732d20ec8e01c497b5fc6ac43f50208e9e11094e2b44a07dbb975e43f0eb0dcf8e9ae67391d3770db07f06166c585915d12f618a50f2c952ca38edf636caa511bdc0e70cf567fd7ab6d1280e5650f6dd4fb2e9951a012d5e2fa22a90e7e0d29637d00a0226cb13c1a90c6a7989063d314c91a4da4c65ac011231320ad2fc335fe7b728c139e58682a8aa4d296c0257234e89a8e6890ef8d982c244fb8f7b391167fa3bd52cd13704de96aab9589f7ce03532fa1b23618fd29d5d96a7ec7c43f2db64562ec1213e364f50a021c954c24f64b84332b38aff3d5bf23a2be97d23f7740a072d0e248993143a9d28afb08a577eabced00f638d953ddecd6bc8c54deae66d5d9d61f3ba20c15961270ac5e331437306200e89a2c1ba8b4748280d8092488258e7a006d2e5c47a83e02b71655ad8dc8ce9d7ae0302ca9ce3ad281fc83f71efb0f38afc85f9a02c024ea521b09aa7cb9fa6ec18b352d069adb122513b6a5c976004fa08ac546aaacad730c128bb2233b504640eb977697238304a8867e07ad5452827ce5c57f688d69f8b637231bbba36b435b4c2eb54122e20a256b316fe0b9f09ba49c6e890f1c125b3b0a54055fadf566b4116bda77d1c1f54f6179470dbc0f18a750471e97b4df70e1f7c8d65ddc7724ecde9cd8d540ecec35ea698f3234ae9cf110fbbcd70ae1e8c085abea9b8eecc97783ea2e645e696d4d62c47a714fea6c527ce28b679ab060070899611feea7a2dbea36a5e5cb0db1cc0d1954377b0b1a80063c510ef3435f8ab3c0611712212a0d3dc09efef11a5c9b2b6427913b993a6429e5dfa64530c7a064292300ac582663714d24541cbafa772c2ecfd224650ec4149d7f095af599fb9abe4ed606a3c9a01fd6fdfb8a39152dad05d896a3aa98b9b6be9484de85057d8f0a9fd38382f0a1b18ab6fe8e5feb15248b2852d4bcc5381d228f3fb833720ba581518d3ed51b7a7cbe1f1c883de8c0deaa92f34c54c14dccb5a333f84fb444fd93d9219be03045ea966a61a6fa74690c9dedfeea1ae26cc89c1e2352f9af665672f5c7ed01f12b6ae5a71563b2416fc1498d604aa8bfbefd0210eb2fa619d365dc507c15ff8a80e63e106902633f6625cb7f3a16aae1f77c8aa40d1dbe2f239219e19b2e238754b267c5c32b410592fd969ec8638dcc6c878f681999709a177ebfc6380a1edb6d88b18a8aad25cf4c1afca087d2c7297af4135394ebefb8d48785f045e894039845f0403969c4de08195ba4a280e664caa381bd709d7c9eec0f1bfe41f1569f31c57f98e3a51a1edc09c08334f181dfb582cb2c3abbf497508112620ec29490991316dce02458f3721ae0d09ac121586d5f3a83432bfd77c5487cc4cd6879f7de8737f10ca55ad7660dc66cb1060500cf5e9c0af985918520d774c51dbf55b310928707cde6b80ed4c2d5280ce9427bc3ce0778bf9286567bc07b784fe362188192180b0c726f73981741cae69be102d6fa51ded9d1d07d8f1c6ccf34e1e8041d3285d7a2e1ffab2fd909b96cad746c8c9583cbd611f00e4ed72c2d9b11db8c8f8a185b507299d124f35e17047e759729e284ca3f5c0706c7a4aea95cb944c0279106b33fb36c559a9cebb9011ec6532821684fcca72e59f709d986f69a8bce0c1567e8a2c3c239bd6f4688babbb14301ba31653cc4665540195d04a7b97adb80a8301807a2f71f3b296586ed84259dbecb51543e2d47814a6163f25c4250694560c0f6bd13a0d14137949bd3942aabee89af46cd1b044fae3eb930a1bfce61e7c0612211d38ec068089a90c4e8409c4a0ec5aad74c259357d9f6da5777d6d210cdf9fb64ab98f6efaeace77f23a73a5c5a333680c69e87460f55b1bec27fbc0173c3a491c1b82fa62a90399d7154a4540e96a7054dc34e6413ceb9e294344f2acefc9107306f49da2204f3ea2d3a38f8e9ff0a88eb2807a00c2c0b9e01cd1a95bcc825572be38f33c77fc2e9b82af06e15c3b5fbc0c4c343e50dafd03180f1975357ed2d1eedf46a947a2a4209051ed388a54477abcb3a4a36808e4e3aa725822689f861e2deba71abdcabc30af4fe2e644b11f8eb3dba80cf7b3fbf9eb7f5d378c42397cde37b33f4029548b6dbe0e9c0c87dc3bf8d516fd20b510be907bd560ba1e60f4e36a8ac3d5b34218ceac208dd732706030a712d38de4857358e94ea547688de11de94de4247fc9e50018e1d93f53e29b360673089e92c250453f0ff13e6e117ce94250076ca5e4efcb5c9f30a6fc123f470ece1f640ba2bd4cebdc6856e6e9d0a2a4d51c48f12fafef263ea83dd64fd575692ce6caf6536120393b349436cf782839950db0366b2f935f509e45ced95961f6537c461daed9f8c1d1ccfca06965ce7b578a8fca69166d700e7c5b8646b9f6cd92d88856ecb9bc70559e86dce5f8d1db1707d3cfcbff2bb7b49b189aea939f994061ae816d5016989eb581451798fb22dce881832df99e472ed1b22339b5c4d3a2f7e904702d03ac3685ab7a8d354610e846c07074af671040f9aa72f533f064a27118cb01a46817dc3997988c4a18ae0e0ead047ad1e32825f0e6393951bab272e41d94bbc47f4657b8fb65c1bed5c6cf73cbf3479c0bfcc5196af8bc3b99fb18dfdde33f2c438cf00c29e0a3481010f7348aee0894d1cd0223956ad0ad94fc70469af646c66ad124f1f7f8f98e7d49fb16df47c538e2c08e1264f4e404edddfa5cbdd5efb24b8f39164bdee556930951e1ae491608eeec5dd8a644cab8f60cb51a3ba144d1189b2a4778da5a7890888ff7d67f41195feb8ac09071403687776963000a253b3d29adfc97ab7ba383fff162b0e612b4e2c8f29e9249b7da9aa24c45a051ad1a860a7b440a0e32e579920d68c4220f7415caf8171043b87390baf3faba39fdd606567b6be277d6d2225ead573ac7b662d8f8350d2accd5efaedf0a3892379e9c33752a908c4e8353c83350c3e9901fe74bfa688d776ff6022a5212451fc40883182f03a97ffc776fff4cd9255837c86e90fbf58070b77c5ef94c8c7be4d610722aaca275ee4b9d099f2516706dd3d2e4dd1b790c35a703909fabb2c0d4e405bfee8d795e0a4cc7e64f5adc7a63d5164b195dd2eb96f64dc501916dfb83db3c7cc6583e4ba71e1c042a9fd7e749279c282ed1667f93de0921052cbb7f540f0b3cebba625084b8fafb1cbe43a377efa6e9d423dfe1ec135b5989ce33b5a35454a337e3ab1a2894f531ffd41b758fa33ed4d81da600084666b7d308928d39d57a4fc5f19959928e2b03473eca7f85e8ac4813ae0084067d9777a015715f5bd52a5e6bdeb47d89c194a01037ecbfd749c81dc6f3882b2b146770fc2c6f4696bd3367de7a344d6b2cff7087159176bf8daf3f19b39304b7e1158eb406b05bc032a3102ca08ef31b0c76c7537b60e8617ecfe1cca51127c3000d092bb871f520f027cf9e3ecc7c309bec05243c83566866b8463eaba69ee315c97011a1faad7fe85634d8a02e98c60148e0290cbc5f129bf4a4fb125ee7f73c0a1543d799cf9b78f507d0a2d243b40c574dad05cbb562a5265c7e9b222c49a66619b736447f0804bacdd141456051e51d00752c417a1e43e963b81632b0d2f55d63814f441cb13f7b4de940618773c8e8793a46ad675042bf70a1a855826f35c3ca70b87f294b21b0f7f8fe518ff659654cd88a644ba0122bb7310da8292d7e248a5b6333f2bf518e185230d8c0eb552c0863c912cecd8c0b09e5dd12755956d655dca984ff8f58f27a09abfe2dbd04228149e4bb651dcb3e5c20995177c4e041db0e5987f05cf5373241f8589357630789f4beecf54990c575ee19e44d102cbe196e9b216598534bcbba914e5b9a9db58746d31183641842e627e632ae64b68ea00bf1028b28c72692b0080e7bfdf75ebe1ad250d982afb13e0eff041ae6dea39bd81c73dff596ae04e29ab13e510524e722e32a372b94b160c62ec1874dbc6f6b78dc6060c3ee2f621c8bbfedea0307578eb17529944596d52dc9cf3ce36a23489f1f6ac8e9f875640c87277067d226467554a246446ecc26839c062f167504a18f84bc277aa36c614456cd8e0f3ae3950793b9d0338b756b275b999be099882319d49996d670bccab251c316d07a258291731841c8d4934f5e24f518e625872d38a50f60de3cf9208164a18666c6446c01e07109a3b0c069a260c15fe25c92ac258f35cce04beadd571084fe6cd7b69ff6e3060a69543e3c3487b3624989bdcfafd05946e9358c2e1ed74ecee9559bdc96999ef308348d3d30b3b4bed56618d2ab85265575fbf57cf04e9727ebd4047b619e820f7392f794cbae73820f1a4dddb39beef22cfaa43e0bea8b69edeacac9f1ec94963f97e847037729dc41087632b491a497abf2cce87d3c8bfc1e1839ed3ca39aa07c38bdd0a716082ed14b4215107785393c572f2681b960944a5bd236976dd64033ef413be6096e44535e5f80f82388353d12094201cafafa91b5376c85949a3887b43b3230f11019f070f5ac8721f5ee403a431d2335d61303392077d836519df7e9ef24f934e1cb280f3317024d29086fe92be30717e9efba8d6eb07c41fd3a6dea96187de1d2ffdb63b04d48f67772b11a6cb34481ef41f990599e1e86752375526784a3c1f4d97b331c1fe66e325978ac95fcadee4000884a771d48dc3ce951a2c9e2f40eee6419948a0941d2743a3c5fb951c0a6770d012693dda2aa872fb47f22567cae362b4828dba8a8347fdcaf136574553e7c3b6794d13d19d2244139fb9732c6ddced4d823e4cf055d3d8c1f73915e837a6e0149255ca89dfbc9ba0e2bd03714a494b0877276524de61b26bac8cd51f55880bcce5289638aa7ec1bf01ff981c5a02bf6268aae8de093187c876595bdddbe1d85efae6bf8f0d6bdb8f1ca6a04489516d637c497ff04eb10b98231b11ae7b841709efb0c84fcfb9b45b50f9090bf4330d72adac441dcbeba3a05922cfa9661cda0c0213044a8f76f71e401800d733fc496316faa2ed4300e38e5d06aba03c22a60e319e59433fc794cf41555549896b7375cfed93906240286b97a608db0049c46d159d1af37c8bcde7f21d3595ad168f9e0e1a4808d3594d66b7abc82f1a6b26a1020d41c3c514e0f74c90e6e8f90923f97b1d1c68e2d5014b4c39e1f141481afe00ef0d661d9e7b7b406a05b27cf1c199b21cc989ecc0f11283d59dbefa13b3b3e42f316cb65b9fe685b3990a502e998c208bcda8bb6e2b005bb3f41b0fdc981949b0333280dd8154f4a58055e287ce4692ce17f75fe4e4363a687768368ff79d60856a1426546d91d501cb68528ad4bdba7c444cdd66926da18092a8eb2ab989029a03e8294bd288b07716737a010385f3788777be8fcc15a8a003f27eb855f7807b78948ee62546c22e2a9d9e986e011e942969c9561c6d3bed28b5f1ea2ed708af3a4b9d8442527d683f2af2ff0f19f5cdddd23702dac08cd3604ce8850c24134eaa883cb39a9d6f3eb2e0975201c806f7c8b237abad30f6261704960b0c859ad26d696e5ea9ec129890ae7b90110902c4ed91aa29563ee9b07e73534f2a48e1a48881cef1bf1a597006b778cb1861f364749880806355d95898cf1a622e5d323452b576741db3cd4ea3c3502fab211755a686cac6ed4479c67636b331c81e84072f6c510e9f42cd50bdf80c1bcb5fbe3a7bb1715afbf89443c08bbb2680588ae67a442e16d4677e345b0509a61d30e5f93414f4b535ecf4797f4ac1b9f2c66d13963eb36bd789c6fc90e79c9021e057df0b6a28c2febef82ca6247a6fac2ef981f2cb13e4ebd5273ff0155a173c85b7525a0401d00a5294f11e295b9d9230c20aee4cc8e17135e5a6484eb43a9a2dff0e9a776ca3bbc0116b24a1230e20a21ad442c2c0e9cf23227d9c74a87485e223d01bcebb5584eccd173f572f00c03187bf0b3a6f01da929040165ddf2da37cc04baab1ab65dea89e4ec45d18c78f6feecb33be606e3c724d914ebb81d9b2e4b21657a0c2d80e0ce3d66f9b5e2a090804a17c487107aac548c011379e2c14fa63c08dc42f95d673da6fbb601f56764bc54741ae4c99a753df9e472768ddac6e05de8de89a6450eacfd96b1f86658e733f5f9c3fb0a48babc6a474b17d518101cacc59e42dd0d22eb29fb9c9f281861dc9e38d2162b2ddad5af5a4b0882f7fb78bc431ad08eece35fa3d6fdb82b01630edd437613fd7200ffd262fa3c30daa5d9eb6746d421cf00b4633a3d869f33794a0144f0dd84a1dff5c0622ca345b0a2938040e70be33ed44c4f43c4addbe087942343fb326450946880b253a96e9691cc2dc6f1d67164887f36be09464cf65b571c123880b400fb6992b533e1f54f939da2f00d4ef68da75ad8f13d4b132e72ecf97752d3a6a46dfe7a1a17e533c0554fbd4296fed20664789062779a328ac65173ec7ded581d4710fda8d2aaf6e8a4a3f302722859caeeccf30f93218352641c32ba67dffd90838e36a735fe319b9b3237a9d3a1aaa332131db6ebc98e051d02d9593e1fd831a457465f2f01c7ff18f0f4819a6527c270fb83a6bcb0478e1bf2287dc1c43c61cf8bc241afd41841373e564d81f1c4ba420a6e71b068bfa6c501ed4d9e9d2be32e6821248821afb7a41a9f9152254ecb53c15cb2b453db2624ea39c534318f8d82227046c4c221d8c3c72ef47928ac3574f1df75d8b9e706df6d71208cb1f1e2b02daf03b5c40d0bcecaedd01961f1c529c76e87eda4361f14790ab7394804094ffe72a12f43514761fb8626b6efa2038d208beae9b3741e2f7f89199ce19521beaf5dfab65cd1a996d9235cb0c2d1a317270c3066a5a7e10c5c07a8517de954dad4dd801eda0c56201bdb05d5dd2ab9d5c714dd2c681ad80cde51ce503056defda8916a91b6223312fce40a2efdfa673af7153d26dd00d0f8f9548d864718e37ae77792f0b2f1ebd15536e2eab78611f5731947debe7f2865bbc12ed2df4c6b979a6eec953cafded230dcee32757228e93b7464ff4d70ce26dbe9f36f063584a5bfe611eac3aada03446a496d66353b62da3732b94e999699e123f04cdc52e7435e27f531422ecac8f975e050e04e402a8b044d5c138d43bdbc529c2bd33cc5d289dec0267a15834e25f8186fff7ff7b28b666d89a9c147e91ff11954bd7ad58dd0a9b64d8d62cef3b1338c793fefef81864607cefbf42345e1929170aecf64d76429d8b9c5d46d4f810b44177c9b49ce51084118516150c6eee2313f668ddaa7bc95ce7427cb7876282ad0e5dd8c7ea32272866dace65fbfd784964de578b9ad3a4cfad36202f0c4a9e21a20817764b4f8d51716c1a53da492be3eeec89308c0f282c9dda08d451b0bec08925414d7de85baf1a25b2df1e064841b8e3b1a54bbbbd28ac2a472ca53b3a562de916208b2e5fd4e266b4e5c871303538ded890008d06eadbb72eee48acd9202c8d842c6b8058f2869182b352f1eda0744a70a84206643f81f3a3ff88cd8fa20c673f2d556cb5bc4c2f8b73cab62f5e95b1c86115eafb0798ec26b69b9143556c28d601d3df060a7f66d7a04f86d643bcb5b2203461b45468c410d0d8ac00a4dfb7f152fe19b7249e96f7e95f26b3171de6dcbd60f948ecb02940eaf5ef7b0f174a4c9c6369aa5acddbdcfcfc7d10dbbd2bb2a37ec89f0a3678f0e13afbba2e93d6ca70262c61be1984cddba702a9b03249ba78f10e42238147473d46c4d83f6267e3b9465feb61709cceafd55b2e40d0f7f6dec8872bc3ce5d76bd95c74bca04acd83553fd8ef3de43c7ab75c563b92b4d1d4574ff1bfc30ca7cff34990827a415544dc3aee8dd4fa829fdc73a90e417be10d0f3b4940dfeaa1b63e342124a549632774e1f8b4ea8551ca0c5db23af8f9d4904b1c856fb1af5b1765e6e10c50f501cec51251ba43065460c4c28c62c6e78c608020d05908569069e923f62f4df9968c5e8cc85ddd1a24b3dfa5bcfda1c63f5c6a5586bcb442edb8477aed80beb98f41199bbbc651faf831da597952b2dcf9827fdcfd7ab73501c12d48290cfa42d54a9feeec882aac8be93cefb68b7df376f08ca181245c161f80f20aa18ed333545a7112d80bd13f7288117ba293e2dbdb89af2a360e95bf9104cf752a5ac0332af003fe04c415958454fd420ae8af4a590a891295c916a1b5ca1575fc907f8ab95b1f3475dbc483f4c4633817c248f7faf20f3e0179497d771d10f982113a7384c0da912e5208c14b3b1b747a43db2bc08f902eb9f0e8ac70ff416041e8f34b463ba0d63ae6e80d7835f3befc3e70f9df6c5384c82313ea953d482c3dc26e75f8e73f6b993b44f20b829b8a60cba964175088d874e762dee989a97e071d36bd3fe1054644594db29ad2dad2daf6cc9e98ce72f6b777246fbff8423431322d63dcc6746b32230ddab0e24699c3e729e81f630689f5683f987144eeba2c4767b615dd1027faa69ec220499be0218e66f0c7b9c97fc10716c60c47ddf165d4dee1157e93dd49568d1f930b9a86d02f7b30ef4fc98ae9fdcf2b0e38a40b6f10741c5ec1303c6d324e178b4d502328f6b4702413f29466fc7774d3a64a0770b39caec7e594e37a6a10f925e3a000c50da50d4fd56b24327b2ad8d9ba29aeb4cc5ada1cac2850c0cfbae38774dc3ba2e27f2ed71abc582da19b0bc57a6281241a548895f81ed1180a2b24499a02b67bea809269ac68b0830db4fb8be4bfcc87e278142f189c71069a48e3924e8ab44e32d2f8eef816b59591175c426a1ef5c7959bc219f895a80640512324fb81b8732cdcdde03ff0a6af260ea81f5379f7badac712b9822bd66f749473fd7b863f3cf1f04d8bb9607e471c0751eac01fc8c93bfeb3325c38b5dd370977fa1633fcc790722693b779ca576136e7423e4e5979bfd8e14905f9b5357af7693cc4d8c5a84bc0649ee9241f3814513b77f950bd3728d2377e543349a1a90d78e282350b331bf9de827780a6d63674993168a8d8a1fa759cda1e6c0afe0be05e14cda543f5256fae8c1dbedfddd1e5ad8662a35f9baaf8858965084f64d6fde2d48008c0d949c02a1e01bfac4ef2d96d83e45c4e72dc00ee043db90d055f918a1c0dae749391fd432e898f19629229829e56380ccdc208b49bcdb6606383ba481bc4be2187af221d0f8579148dd66dd68041bf92e3ff078df9c5df6164129ac31e21238169b163410e2a98163a1523cf3cc1cecfbd5ca7684ab88025e11abc05fe8c52ff68441efff3714e54d9e0417a1425c5f4f1d0e0dfa1868863ca918fec1f5a8e0b21f0ae749fb9295f85c25a4e289d8c0ac32d291f36e0e40ac6d92a227b2b70b8435fecb064531b84263e9dcbf45cdef9cb147f8a87608772e51b4ba2d86978e4102291c858de0eb0a51068d2dbf2ec3298bb593906f83b36484a9ef0019b156abc13a7cf84f7b36199315d609bbca4820beacbf30395b560809ea6acb24e546da890080cee950df19042d6f58acf0d3d0bfb51f02a8e54f76205ff34e670aa75199fc8b4e9765864d8f5601b51f3dc06c9734b322ac4bb5fc4f7059f3ed147169201209b4ea02cf358b5b0ddd6c9890096325e581a5db36dc3225a00ca4be484d58efbb25e3fe537e96e916f0cfca18b8b48bb3f6d9e9243bde52698027d1ae1c050722cca64043a77b68a9b0221b1e02a1eae592186e858d433861679045e6db6f68d3c3dac81a0eb4cf1033329bfb294e4d0d823857c6bc88770f11fb0d1004a410fcd26a5411acb8d85a0c3c188d3ed77c13f7c65c0afb93429da02d6b082c766a46165b91b8a44573e68cd4617bbe673528cd09399890af88c50ff54bfebdcfeda611a7cc5f48f0a67a66ffa9e812e8d9c110309ee9c8a11c9694317b00abb81279ec2f5dd5f52c0f17ea50b92253da2d62318daac6fce4f183302fd9241c03c4cf599d8689bc6afa7257b6e445ed97879daf3ea4fc54cbe441b39838a4449747d1f6c1b5263aa19e4288007888b795549a9f8f68def34ec583a3f88e1c0b9a9b5204a6caab1af4e0207024276fbc31a015e9c86d723dd032d5a57f4064823ab5afedc2e461fb8bbd78402ad3b9d9cbc01afb755b63aa9b0685fc540a24d669d3279fed66609863606aa934fbf0ff767a5586dcbf9e5c0f947ca68d524e36989ed2fd504ad21a3164b7a61bb64a2e761cd81d3524fa7c41915d1620cfa5f9c583be3e1ac14c0c82635c755b954e9ab017a25d35d275b8848051fa154620b5e9db540a9baf6f034b0eed0d88a423a41da50d769467bff952541dcddf5d8fa8b4b4f97fa4634fa1118712eae5c2fcbd9d6762ad60f909f087e4ecf7a97a1d2e885e720ca8bbecb04500277ce1ae37882d034de86b3e6870377889b4a82e83b29b05438b6b2a3811403da177744347832ed885bdb25cbe37ee9d092a2fd0df58e064fb42969ff20c39e915fb4af981a28d032350d3c23f8259a5d7bcdb7544f7d6a815bb0f822e720cf6087d0dc5627b4d2f01d074ef50d6fb5f17794bfd07c446bfa367376aa44b801eb7e30bc5107fccc546c16c52b06e76c4830ab2f8daba95bb0f3a6a5991c267608b0e37a66099e2dc6f24f75b9fb4b3edb65d3a4d9850d487323da40e7cf87da2a7580f0b7da764eb92b9a3053e7e55f0d6f45ceb0333e8050ea6f8b7954bc22075fd3b619a6b09594a0be2aa94da2fadc9cf8e86bd4845a1ae32e571d589e4f59b3edcbcb106dadd32c21c8c476261b7396c924317ed821f4b414d83777a4a5d0db38fdafe99febdf68376631289ba334bf7da9d19ad3999e41348da8bc95b294beaef0e933914029e5e7b8f1ec3086f0dd1f8303f8f6620bbd6c5bae29356e61a1b2b8c8f368547373f4205c3c989baa78e1191da15c4e48c5e98b0d94164613c248516aad49b172d6d0e7afe12d81358cfc0359719e12fb76f36c9629419071511329e0fb9fe544cbec302eb6b7f73fa226f7fd7c662cd248e8cc46a3bd640b9c9677a77947f545cc44a054192a1f7641729f9eed9e9188abe61fabc1e8aa984dfd779d6b0b93f78a089d8187634315aaffabb80e19a1cb8cd15d160937ec91c1097ebf119a296b07485eaf9885760f5deb6a389f534b2679322156b7444a4e8bb34f4ec203156495336766009bec24c2b26acae5894f30025b7a9d28d344f23e274e36071e125121471b585dd1bf65f6b19acec147a948be300cc42b07460aa6bb1a29233fd085fdfe7e15a442c1e6f1af8c9739c54709b70683649e7363fd6bd9d00bbe7846c948ea9eb072eee4fac07a2efee6d7392062ed28643e6c56ee1d021277f08921fe59ee61e07ccf28e5d87a7a6aa514a69196669e600fddc0d51f6da190ecb56a2387fc21b7cfddaa01d4f4f9b0cbb3bcba19fc57439af6aa4bfe9cd2da0fe1d6287f7558ba05adfcb2896e877daaa1635b56cff8e3071da2ce3adf307226a7e2c88f33cf485a9eecbf27b4e986ddfa67418534a8f116a93682f414dff1c458c4f1ba3f342a9228549bfd603078d07061da145dde4ee6021d76bd6df943e5c8ac0a761803387b46a5c166277d4e5a67698608a76045bf4cb81c821f9133a511efc687362d32435f0f2ea6e5cfb24dd2aa201439c7c5cf30782e1b5e16260679ef77bd7abd4b78f153ec69d2e054c520c5d5031809c781e1799705561ace4679dcf33875c334f8a62a9ea0a379c83dbc0dd8359174403e241f19d70abf4b7381810f63462d779543b8f37cb344bd28906139395e8d568fc3cc2a12c77caca902c0aa0a07fd001042182f0332b3812d65ceaeddcbe3f41cade320cc6f15fc245895c5c4f820372155a785f488e54f8d0453d1094448d61540bb8c629a5295156095b036b61c9d6f631fb65293ec16e46d69eec2b025b82b4daecaaef5999df5c99849f07537e31d2bf1d4b70dda3b7cd651b04a0c334e3f05b02dbf6a6868f26d6e9ce254c583653932a3979e58c023582e976cd8185e491d10a2a57d6504204ea005dd8599f3427fde2b688c9d06f6de7e9425b8976b21e9de305f600807b0c1b09148277741a9ab9b96618d03a1d457af7dd7bcfb1afbe8f0b4716a660c8e3b8b33332ddf043857b594fa841dcecd3ebb3bbc9ca310c541ed583bb94ee37875d405b12bd0453546cf0601569238791ec898521e3ef8de5b4156aef5502fddfba88b723561b5b791e024ce65dd3a888b6abaf6550e579a73a5f88537d6966ad4232f9606412c863239ca8f0a070df60b32739dde141fe1f14ac9fd402e0d1702fb1796712c54803fa4f979b12afd0b63bd8bef38813c0c651e0503f058d2a8120249337ba7b09e89e736d3b2ef11b87c117def03a1ce67709a756426e5afe91fec17929e4e8905a058a6131ee3ae14e58f63a81d6f18d54cf9aaf0028eda6c065ebbfe0eeea5b2b8d88da14046847a58c9f4825f82c9af4893184726824c867e0aba615b6344529319e11c3396a42d97705dd31555af7e05b40637c033025ae1d82230f3edda382c482c9adb4a91e42d7196dd463b48fbeecc895a6fe6429bd12002716aa10424645adebe512c6314cecef677cfe6c62d8c2d106d7ed5ce92dcd55e80a3c766f32d1b9b288a63cfdf868cb632914a464646252839312772694e9220bcd748f033830f82a25bc2f3b0e9d86fd78bf37b7beab07f55d0b425b0a9b9842b08de52828a3cbfca156f44c14e9fb6a45be40145b1d76c48ec1bef40d022d7875e76ea550376b8ab110f1d4c460a005ab40b49484cbc985aeb9e6418bbb67bf4b4c39e882ba91084de9662c2ded559e4ecd5126bacd38afe57c017f70ae984432ad57481ef9940e79950051c9bcf9691703fb67a984538563a820783eb0b51a95ebedca9893132a8268935ace2cc95380f02b8c155874007c2182b4a48b18cd622b998bb371e181f37d16b4ee5437ecd419c865900b549a14b9b3ae03399b324458ddd96a1e9d163c8532e19f64e778ac71d8c8ac6072396c5a5d9337704fd15ffb8407cb22b914033fe85382d04f9899b2d13091e03cc4cab0bf1e84f6767c0e29ecaea8f5e9d0cf07be7fc715eb8b1e44754edaa0ccc51c5cebbd29d1349658869b160eaa225f56af4f30f0e\"" + diff --git a/primitive-types/impls/serde/src/lib.rs b/primitive-types/impls/serde/src/lib.rs index b03a87bde..22a17114c 100644 --- a/primitive-types/impls/serde/src/lib.rs +++ b/primitive-types/impls/serde/src/lib.rs @@ -9,7 +9,7 @@ //! Serde serialization support for uint and fixed hash. #[doc(hidden)] -pub extern crate serde; +pub use serde; #[doc(hidden)] pub mod serialize; diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index 95b626471..cc8da52bb 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -9,7 +9,7 @@ use std::fmt; use serde::{de, Serializer, Deserializer}; -static CHARS: &'static[u8] = b"0123456789abcdef"; +static CHARS: &[u8] = b"0123456789abcdef"; fn to_hex<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a str { assert!(v.len() > 1 + bytes.len() * 2); From b479b82f1d1759d144e5c7ceae0ca7429f59b032 Mon Sep 17 00:00:00 2001 From: Anton Gavrilov Date: Wed, 23 Oct 2019 09:37:13 +0200 Subject: [PATCH 020/359] Use published version for secp crate (#246) * Switch to published version for secp256k1 * Switch to rand 0.7.2 * Use 0.7.0 version --- parity-crypto/Cargo.toml | 6 +++--- parity-crypto/src/publickey/error.rs | 2 +- parity-crypto/src/publickey/keypair_generator.rs | 3 +-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 9b4ecc7e1..a4f61c5fe 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -15,7 +15,7 @@ required-features = ["publickey"] [dependencies] tiny-keccak = "1.4" -eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1", rev = "a96ad75", optional = true } +parity-secp256k1 = { version = "0.7.0", optional = true } ethereum-types = { version = "0.8.0", optional = true } lazy_static = { version = "1.0", optional = true } scrypt = { version = "0.2", default-features = false } @@ -27,7 +27,7 @@ aes = "0.3.2" aes-ctr = "0.3.0" block-modes = "0.3.3" pbkdf2 = "0.3.0" -rand = "0.6" +rand = "0.7.2" rustc-hex = "2.0" subtle = "2.1" zeroize = "0.9.1" @@ -40,4 +40,4 @@ hex-literal = "0.2" default = [] # public key crypto utils # moved from ethkey module in parity ethereum repository -publickey = ["eth-secp256k1", "lazy_static", "ethereum-types"] \ No newline at end of file +publickey = ["parity-secp256k1", "lazy_static", "ethereum-types"] \ No newline at end of file diff --git a/parity-crypto/src/publickey/error.rs b/parity-crypto/src/publickey/error.rs index 2cc66f733..b8a568da7 100644 --- a/parity-crypto/src/publickey/error.rs +++ b/parity-crypto/src/publickey/error.rs @@ -43,7 +43,7 @@ pub enum Error { } impl StdError for Error { - fn source(&self) -> Option<&(StdError + 'static)> { + fn source(&self) -> Option<&(dyn StdError + 'static)> { match self { Error::Secp(secp_err) => Some(secp_err), Error::Io(err) => Some(err), diff --git a/parity-crypto/src/publickey/keypair_generator.rs b/parity-crypto/src/publickey/keypair_generator.rs index d8d6bc77e..e816bc6df 100644 --- a/parity-crypto/src/publickey/keypair_generator.rs +++ b/parity-crypto/src/publickey/keypair_generator.rs @@ -27,8 +27,7 @@ impl Generator for Random { type Error = std::io::Error; fn generate(&mut self) -> Result { - let mut rng = OsRng::new()?; - match rng.generate() { + match OsRng.generate() { Ok(pair) => Ok(pair), Err(void) => match void {}, // LLVM unreachable } From 3a028f956f99a32f59f92ab764282409f1c969b6 Mon Sep 17 00:00:00 2001 From: Demi Obenour <48690212+DemiMarie-parity@users.noreply.github.com> Date: Thu, 24 Oct 2019 14:44:56 -0400 Subject: [PATCH 021/359] Upgrade dependencies (#239) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Upgrade dependencies The hard work was done by `cargo upgrade`. Split out by request of @niklasad1. * try to fix compile errors * fix silly wrong import * Don’t require users of `uint` to import `rand` unless they use `rand` themselves. * Remove spurious version bump and extern crate * Fix silly bogus import path * Don’t bump crate versions unnecessarily Co-Authored-By: Andronik Ordian * Don’t explicitly depend on `rand` * Revert "Don’t explicitly depend on `rand`" This reverts commit c1cd0f6c0c12bfccac10cbcc834098a052c8c2cb. * Avoid depending explicitly on `rand`. * Bump `send_wrapper` * Fix silly mistake in tests as pointed out by @ordin. --- contract-address/Cargo.toml | 2 +- ethbloom/Cargo.toml | 12 ++++++------ ethereum-types/Cargo.toml | 2 +- fixed-hash/Cargo.toml | 12 ++++++------ keccak-hash/Cargo.toml | 6 +++--- kvdb-memorydb/Cargo.toml | 2 +- kvdb-rocksdb/Cargo.toml | 20 ++++++++++---------- kvdb-web/Cargo.toml | 14 +++++++------- kvdb/Cargo.toml | 4 ++-- parity-bytes/Cargo.toml | 2 +- parity-crypto/Cargo.toml | 18 +++++++++--------- parity-path/Cargo.toml | 2 +- parity-util-mem/Cargo.toml | 26 +++++++++++++------------- plain_hasher/Cargo.toml | 6 +++--- primitive-types/Cargo.toml | 4 ++-- primitive-types/impls/codec/Cargo.toml | 4 ++-- primitive-types/impls/serde/Cargo.toml | 6 +++--- rlp/Cargo.toml | 8 ++++---- trace-time/Cargo.toml | 4 ++-- transaction-pool/Cargo.toml | 8 ++++---- triehash/Cargo.toml | 16 ++++++++-------- uint/Cargo.toml | 18 ++++++++++-------- uint/src/lib.rs | 6 +++++- uint/src/uint.rs | 7 ++++--- uint/tests/uint_tests.rs | 2 +- 25 files changed, 109 insertions(+), 102 deletions(-) diff --git a/contract-address/Cargo.toml b/contract-address/Cargo.toml index 83cd3ed13..8b4f05682 100644 --- a/contract-address/Cargo.toml +++ b/contract-address/Cargo.toml @@ -11,7 +11,7 @@ edition = "2018" readme = "README.md" [dependencies] -ethereum-types = { version = "0.8", path = "../ethereum-types" } +ethereum-types = { version = "0.8.0", path = "../ethereum-types" } rlp = { version = "0.4", path = "../rlp" } keccak-hash = { version = "0.4", path = "../keccak-hash", default-features = false } diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 829e2ac73..b65c88813 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.8.0" +version = "0.8.1" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT" @@ -10,16 +10,16 @@ repository = "https://github.com/paritytech/parity-common" edition = "2018" [dependencies] -tiny-keccak = "1.5" -crunchy = { version = "0.2", default-features = false, features = ["limit_256"] } +tiny-keccak = "1.5.0" +crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } fixed-hash = { path = "../fixed-hash", version = "0.5", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.2", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } [dev-dependencies] -criterion = "0.3" -rand = "0.7" -hex-literal = "0.2" +criterion = "0.3.0" +rand = "0.7.2" +hex-literal = "0.2.1" [features] default = ["std", "serialize", "libc", "rustc-hex"] diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index ae5550d23..eb1a85e20 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -16,7 +16,7 @@ impl-serde = { path = "../primitive-types/impls/serde", version = "0.2", default impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } [dev-dependencies] -serde_json = "1.0" +serde_json = "1.0.41" [features] default = ["std", "serialize"] diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index e494fedd2..8e1bcfb16 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fixed-hash" -version = "0.5.0" +version = "0.5.1" authors = ["Parity Technologies "] license = "MIT" homepage = "https://github.com/paritytech/parity-common" @@ -14,17 +14,17 @@ edition = "2018" features = ["quickcheck", "api-dummy"] [dependencies] -rand = { version = "0.7", optional = true, default-features = false } -rustc-hex = { version = "2.0", optional = true, default-features = false } -quickcheck = { version = "0.9", optional = true } -byteorder = { version = "1.2", optional = true, default-features = false } +byteorder = { version = "1.3.2", optional = true, default-features = false } +quickcheck = { version = "0.9.0", optional = true } +rand = { version = "0.7.2", optional = true, default-features = false } +rustc-hex = { version = "2.0.1", optional = true, default-features = false } static_assertions = "1.0.0" [dev-dependencies] rand_xorshift = "0.2.0" [target.'cfg(not(target_os = "unknown"))'.dependencies] -libc = { version = "0.2", optional = true, default-features = false } +libc = { version = "0.2.65", optional = true, default-features = false } [features] default = ["std", "libc", "rand", "rustc-hex", "byteorder"] diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 365ea5805..d90da7964 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "keccak-hash" -version = "0.4.0" +version = "0.4.1" description = "`keccak-hash` is a set of utility functions to facilitate working with Keccak hashes (256/512 bits long)." authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" @@ -9,11 +9,11 @@ license = "GPL-3.0" edition = "2018" [dependencies] -tiny-keccak = "1.4" +tiny-keccak = "1.5.0" primitive-types = { path = "../primitive-types", version = "0.6", default-features = false } [dev-dependencies] -tempdir = "0.3" +tempdir = "0.3.7" [features] default = ["std"] diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 22312e8c0..4ee909d4a 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -8,5 +8,5 @@ license = "GPL-3.0" edition = "2018" [dependencies] -parking_lot = "0.9" +parking_lot = "0.9.0" kvdb = { version = "0.1", path = "../kvdb" } diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 76b2ed9ea..cda3cc24b 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.1.5" +version = "0.1.6" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by rocksDB" @@ -8,16 +8,16 @@ license = "GPL-3.0" edition = "2018" [dependencies] -elastic-array = "0.10" +elastic-array = "0.10.2" fs-swap = "0.2.4" -interleaved-ordered = "0.1.0" +interleaved-ordered = "0.1.1" kvdb = { version = "0.1", path = "../kvdb" } -log = "0.4" -num_cpus = "1.10" -parking_lot = "0.9" -regex = "1.3" -parity-rocksdb = "0.5" +log = "0.4.8" +num_cpus = "1.10.1" +parking_lot = "0.9.0" +regex = "1.3.1" +parity-rocksdb = "0.5.1" [dev-dependencies] -tempdir = "0.3" -ethereum-types = { version = "0.8", path = "../ethereum-types" } +tempdir = "0.3.7" +ethereum-types = { version = "0.8.0", path = "../ethereum-types" } diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 0774f586d..aaf237da8 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-web" -version = "0.1.0" +version = "0.1.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" @@ -9,16 +9,16 @@ license = "GPL-3.0" edition = "2018" [dependencies] -wasm-bindgen = "0.2.49" -js-sys = "0.3.26" +wasm-bindgen = "0.2.51" +js-sys = "0.3.28" kvdb = { version = "0.1", path = "../kvdb" } kvdb-memorydb = { version = "0.1", path = "../kvdb-memorydb" } -futures-preview = "0.3.0-alpha.18" +futures-preview = "0.3.0-alpha.19" log = "0.4.8" -send_wrapper = "0.2.0" +send_wrapper = "0.3.0" [dependencies.web-sys] -version = "0.3.26" +version = "0.3.28" features = [ 'console', 'Window', @@ -38,6 +38,6 @@ features = [ [dev-dependencies] wasm-bindgen-test = "0.2.49" -futures-preview = { version = "0.3.0-alpha.18", features = ['compat'] } +futures-preview = { version = "0.3.0-alpha.19", features = ['compat'] } futures01 = { package = "futures", version = "0.1" } console_log = "0.1.2" diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index e0306dfe2..db7badb98 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.1.0" +version = "0.1.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" @@ -8,5 +8,5 @@ license = "GPL-3.0" edition = "2018" [dependencies] -elastic-array = "0.10" +elastic-array = "0.10.2" bytes = { package = "parity-bytes", version = "0.1", path = "../parity-bytes" } diff --git a/parity-bytes/Cargo.toml b/parity-bytes/Cargo.toml index 38be22ced..e49adce31 100644 --- a/parity-bytes/Cargo.toml +++ b/parity-bytes/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-bytes" -version = "0.1.0" +version = "0.1.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "byte utilities for Parity" diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index a4f61c5fe..c91abd777 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -14,30 +14,30 @@ harness = false required-features = ["publickey"] [dependencies] -tiny-keccak = "1.4" +tiny-keccak = "1.5.0" +scrypt = { version = "0.2.0", default-features = false } parity-secp256k1 = { version = "0.7.0", optional = true } ethereum-types = { version = "0.8.0", optional = true } lazy_static = { version = "1.0", optional = true } -scrypt = { version = "0.2", default-features = false } ripemd160 = "0.8.0" sha2 = "0.8.0" -digest = "0.8" -hmac = "0.7" +digest = "0.8.1" +hmac = "0.7.1" aes = "0.3.2" aes-ctr = "0.3.0" block-modes = "0.3.3" pbkdf2 = "0.3.0" +subtle = "2.2.1" +zeroize = { version = "1.0.0", default-features = false } rand = "0.7.2" rustc-hex = "2.0" -subtle = "2.1" -zeroize = "0.9.1" [dev-dependencies] -criterion = "0.2" -hex-literal = "0.2" +criterion = "0.3.0" +hex-literal = "0.2.1" [features] default = [] # public key crypto utils # moved from ethkey module in parity ethereum repository -publickey = ["parity-secp256k1", "lazy_static", "ethereum-types"] \ No newline at end of file +publickey = ["parity-secp256k1", "lazy_static", "ethereum-types"] diff --git a/parity-path/Cargo.toml b/parity-path/Cargo.toml index b19b176c4..a4096fa73 100644 --- a/parity-path/Cargo.toml +++ b/parity-path/Cargo.toml @@ -8,4 +8,4 @@ license = "GPL-3.0" edition = "2018" [dependencies] -home = "0.5" +home = "0.5.1" diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index a73d23c0a..cf3dab6f5 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.2.0" +version = "0.2.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" @@ -8,27 +8,27 @@ license = "GPL-3.0" edition = "2018" [dependencies] -cfg-if = "0.1.6" -malloc_size_of_derive = "0.1.0" -dlmalloc = { version = "0.1", features = ["global"], optional = true } -wee_alloc = { version = "0.4", optional = true } +cfg-if = "0.1.10" +malloc_size_of_derive = "0.1.1" +dlmalloc = { version = "0.1.3", features = ["global"], optional = true } +wee_alloc = { version = "0.4.5", optional = true } # from https://github.com/microsoft/mimalloc: # mimalloc can be built in secure mode, # adding guard pages, randomized allocation, encrypted free lists, etc. # to protect against various heap vulnerabilities. # The performance penalty is only around 3% on average over our benchmarks. -mimallocator = { version = "0.1", features = ["secure"], optional = true } -mimalloc-sys = { version = "0.1", optional = true } +mimallocator = { version = "0.1.3", features = ["secure"], optional = true } +mimalloc-sys = { version = "0.1.6", optional = true } -elastic-array = { version = "0", optional = true } -ethereum-types = { version = "0", optional = true } -parking_lot = { version = "0", optional = true } +elastic-array = { version = "0.10.2", optional = true } +ethereum-types = { version = "0.8.0", optional = true, path = "../ethereum-types" } +parking_lot = { version = "0.9.0", optional = true } -[target.'cfg(target_os = "windows")'.dependencies.winapi] -version = "0.3.4" +[target.'cfg(target_os = "windows")'.dependencies] +winapi = "0.3.8" [target.'cfg(not(target_os = "windows"))'.dependencies.jemallocator] -version = "0.1" +version = "0.3.2" optional = true [features] diff --git a/plain_hasher/Cargo.toml b/plain_hasher/Cargo.toml index 2228b4a91..2a7f94989 100644 --- a/plain_hasher/Cargo.toml +++ b/plain_hasher/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "plain_hasher" description = "Hasher for 32-byte keys." -version = "0.2.1" +version = "0.2.2" authors = ["Parity Technologies "] license = "MIT" keywords = ["hash", "hasher"] @@ -10,10 +10,10 @@ categories = ["no-std"] edition = "2018" [dependencies] -crunchy = { version = "0.2", default-features = false } +crunchy = { version = "0.2.2", default-features = false } [dev-dependencies] -criterion = "0.3" +criterion = "0.3.0" [features] default = ["std"] diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index a73b00644..46fa7317e 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.6.0" +version = "0.6.1" authors = ["Parity Technologies "] license = "Apache-2.0/MIT" homepage = "https://github.com/paritytech/parity-common" @@ -8,7 +8,7 @@ description = "Primitive types shared by Ethereum and Substrate" [dependencies] fixed-hash = { version = "0.5", path = "../fixed-hash", default-features = false } -uint = { version = "0.8", path = "../uint", default-features = false } +uint = { version = "0.8.1", path = "../uint", default-features = false } impl-serde = { version = "0.2.1", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } impl-rlp = { version = "0.2", path = "impls/rlp", default-features = false, optional = true } diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index cc35994f1..d4527aa48 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "impl-codec" -version = "0.4.1" +version = "0.4.2" authors = ["Parity Technologies "] license = "Apache-2.0/MIT" homepage = "https://github.com/paritytech/parity-common" description = "Parity Codec serialization support for uint and fixed hash." [dependencies] -parity-scale-codec = { version = "1.0.3", default-features = false } +parity-scale-codec = { version = "1.0.6", default-features = false } [features] default = ["std"] diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index 8f2048c9f..0e2342f34 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -8,12 +8,12 @@ homepage = "https://github.com/paritytech/parity-common" description = "Serde serialization support for uint and fixed hash." [dependencies] -serde = "1.0" +serde = "1.0.101" [dev-dependencies] criterion = "0.3.0" -serde_derive = "1.0" -serde_json = "1.0" +serde_derive = "1.0.101" +serde_json = "1.0.41" uint = "0.8.1" [[bench]] diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 83900e213..0fd6e9b5b 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rlp" -version = "0.4.2" +version = "0.4.3" description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" license = "MIT/Apache-2.0" @@ -8,11 +8,11 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -rustc-hex = { version = "2.0", default-features = false } +rustc-hex = { version = "2.0.1", default-features = false } [dev-dependencies] -criterion = "0.3" -hex-literal = "0.2" +criterion = "0.3.0" +hex-literal = "0.2.1" primitive-types = { path = "../primitive-types", version = "0.6", features = ["impl-rlp"] } [features] diff --git a/trace-time/Cargo.toml b/trace-time/Cargo.toml index 5dad32a52..f1ec6e9a1 100644 --- a/trace-time/Cargo.toml +++ b/trace-time/Cargo.toml @@ -1,11 +1,11 @@ [package] name = "trace-time" description = "Easily trace time to execute a scope." -version = "0.1.1" +version = "0.1.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" license = "GPL-3.0" edition = "2018" [dependencies] -log = "0.4" +log = "0.4.8" diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index fb442756c..844607efa 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -1,16 +1,16 @@ [package] description = "Generic transaction pool." name = "transaction-pool" -version = "2.0.1" +version = "2.0.2" license = "GPL-3.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" edition = "2018" [dependencies] -log = "0.4" -smallvec = "0.6" +log = "0.4.8" +smallvec = "0.6.10" trace-time = { path = "../trace-time", version = "0.1" } [dev-dependencies] -ethereum-types = { version = "0.8", path = "../ethereum-types" } +ethereum-types = { version = "0.8.0", path = "../ethereum-types" } diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index ff2133d13..6342f7d03 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "triehash" -version = "0.8.0" +version = "0.8.1" authors = ["Parity Technologies "] description = "In-memory patricia trie operations" repository = "https://github.com/paritytech/parity-common" @@ -8,16 +8,16 @@ license = "GPL-3.0" edition = "2018" [dependencies] -hash-db = "0.15" +hash-db = "0.15.2" rlp = { version = "0.4", path = "../rlp" } [dev-dependencies] -criterion = "0.3" -keccak-hasher = "0.15" -ethereum-types = { version = "0.8", path = "../ethereum-types" } -tiny-keccak = "1.5" -trie-standardmap = "0.15" -hex-literal = "0.2" +criterion = "0.3.0" +keccak-hasher = "0.15.2" +ethereum-types = { version = "0.8.0", path = "../ethereum-types" } +tiny-keccak = "1.5.0" +trie-standardmap = "0.15.2" +hex-literal = "0.2.1" [[bench]] name = "triehash" diff --git a/uint/Cargo.toml b/uint/Cargo.toml index cf6a88d03..3a61567a6 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -4,21 +4,23 @@ homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT/Apache-2.0" name = "uint" -version = "0.8.1" +version = "0.8.2" authors = ["Parity Technologies "] readme = "README.md" edition = "2018" [dependencies] -byteorder = { version = "1", default-features = false } -rustc-hex = { version = "2.0", default-features = false } -quickcheck = { version = "0.6", optional = true } -crunchy = { version = "0.2", default-features = true } +byteorder = { version = "1.3.2", default-features = false } +crunchy = { version = "0.2.2", default-features = false } +qc = { package = "quickcheck", version = "0.9.0", optional = true } +rand = { version = "0.7.2", default-features = false, optional = true } +rustc-hex = { version = "2.0.1", default-features = false } static_assertions = "1.0.0" [features] default = ["std"] std = ["byteorder/std", "rustc-hex/std", "crunchy/std"] +quickcheck = ["qc", "rand"] [[example]] name = "modular" @@ -28,11 +30,11 @@ name = "uint_tests" required-features = ["std"] [dev-dependencies] -criterion = "0.2.11" -num-bigint = "0.2" +criterion = "0.3.0" +num-bigint = "0.2.3" [target.'cfg(unix)'.dev-dependencies] -rug = { version = "1.4", default-features = false, features = ["integer"] } +rug = { version = "1.6.0", default-features = false, features = ["integer"] } [[bench]] name = "bigint" diff --git a/uint/src/lib.rs b/uint/src/lib.rs index 1ef540d85..e72c32d5f 100644 --- a/uint/src/lib.rs +++ b/uint/src/lib.rs @@ -23,7 +23,11 @@ pub use rustc_hex; #[cfg(feature="quickcheck")] #[doc(hidden)] -pub use quickcheck; +pub use qc; + +#[cfg(feature="quickcheck")] +#[doc(hidden)] +pub use rand; #[doc(hidden)] pub use static_assertions; diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 49dfcf07f..274cc529c 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1576,11 +1576,12 @@ macro_rules! impl_std_for_uint { #[doc(hidden)] macro_rules! impl_quickcheck_arbitrary_for_uint { ($uint: ty, $n_bytes: tt) => { - impl $crate::quickcheck::Arbitrary for $uint { - fn arbitrary(g: &mut G) -> Self { + impl $crate::qc::Arbitrary for $uint { + fn arbitrary(g: &mut G) -> Self { let mut res = [0u8; $n_bytes]; - let p = g.next_f64(); + use $crate::rand::Rng; + let p: f64 = $crate::rand::rngs::OsRng.gen(); // make it more likely to generate smaller numbers that // don't use up the full $n_bytes let range = diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 7f36b6a65..0dedc0723 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -1102,7 +1102,7 @@ pub mod laws { macro_rules! uint_laws { ($mod_name:ident, $uint_ty:ident) => { mod $mod_name { - use quickcheck::{TestResult, quickcheck}; + use qc::{TestResult, quickcheck}; use super::$uint_ty; quickcheck! { From 9f15647d4d058db0eba0a895b36695630689c50f Mon Sep 17 00:00:00 2001 From: Bryan Stitt Date: Thu, 24 Oct 2019 11:55:14 -0700 Subject: [PATCH 022/359] export FromDecStrErr (#244) * export FromDecStrErr My project needs to be able to convert from uint's error type to my project's error type. This should make it possible to do this without needing to add uint as a direct dependency of my project. * export FromDecStrErr --- ethereum-types/src/lib.rs | 2 +- ethereum-types/src/uint.rs | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ethereum-types/src/lib.rs b/ethereum-types/src/lib.rs index b491fd37b..dbbb42ad0 100644 --- a/ethereum-types/src/lib.rs +++ b/ethereum-types/src/lib.rs @@ -5,7 +5,7 @@ mod hash; mod uint; -pub use uint::{U64, U128, U256, U512}; +pub use uint::{U64, U128, U256, U512, FromDecStrErr}; pub use hash::{BigEndianHash, H32, H64, H128, H160, H256, H264, H512, H520}; pub use ethbloom::{Bloom, BloomRef, Input as BloomInput}; diff --git a/ethereum-types/src/uint.rs b/ethereum-types/src/uint.rs index dd92f536f..258c52f20 100644 --- a/ethereum-types/src/uint.rs +++ b/ethereum-types/src/uint.rs @@ -3,6 +3,8 @@ use impl_rlp::impl_uint_rlp; #[cfg(feature="serialize")] use impl_serde::impl_uint_serde; +pub use uint_crate::FromDecStrErr; + construct_uint! { /// Unsigned 64-bit integer. pub struct U64(1); From f2425fa536baca578a31e0797fe82fb38833b29c Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sat, 26 Oct 2019 21:02:50 +0200 Subject: [PATCH 023/359] Release impl-rlp 0.2.1 (#250) --- primitive-types/impls/rlp/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitive-types/impls/rlp/Cargo.toml b/primitive-types/impls/rlp/Cargo.toml index f2de4e8ba..6f6f469cd 100644 --- a/primitive-types/impls/rlp/Cargo.toml +++ b/primitive-types/impls/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-rlp" -version = "0.2.0" +version = "0.2.1" authors = ["Parity Technologies "] license = "Apache-2.0/MIT" homepage = "https://github.com/paritytech/parity-common" From c3e8d85c49d922c11bee8969e68f0c6a9ff5b745 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 29 Oct 2019 15:29:10 +0100 Subject: [PATCH 024/359] Fix `impl-serde::serializa/_raw` for empty slices (#253) --- primitive-types/impls/serde/src/serialize.rs | 30 +++++++++++++++----- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index cc8da52bb..67db57b6d 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -40,7 +40,11 @@ fn to_hex<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a str pub fn serialize_raw(slice: &mut [u8], bytes: &[u8], serializer: S) -> Result where S: Serializer, { - serializer.serialize_str(to_hex(slice, bytes, false)) + if bytes.is_empty() { + serializer.serialize_str("0x") + } else { + serializer.serialize_str(to_hex(slice, bytes, false)) + } } /// Serializes a slice of bytes. @@ -48,7 +52,7 @@ pub fn serialize(bytes: &[u8], serializer: S) -> Result wher S: Serializer, { let mut slice = vec![0u8; (bytes.len() + 1) * 2]; - serializer.serialize_str(to_hex(&mut *slice, bytes, false)) + serialize_raw(&mut slice, bytes, serializer) } /// Serialize a slice of bytes as uint. @@ -60,10 +64,10 @@ pub fn serialize_uint(slice: &mut [u8], bytes: &[u8], serializer: S) -> Resul let non_zero = bytes.iter().take_while(|b| **b == 0).count(); let bytes = &bytes[non_zero..]; if bytes.is_empty() { - return serializer.serialize_str("0x0"); + serializer.serialize_str("0x0") + } else { + serializer.serialize_str(to_hex(slice, bytes, true)) } - - serializer.serialize_str(to_hex(slice, bytes, true)) } /// Expected length of bytes vector. @@ -222,9 +226,9 @@ pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) mod tests { extern crate serde_derive; - use self::serde_derive::Deserialize; + use self::serde_derive::{Serialize, Deserialize}; - #[derive(Deserialize)] + #[derive(Serialize, Deserialize)] struct Bytes(#[serde(with="super")] Vec); #[test] @@ -255,4 +259,16 @@ mod tests { assert_eq!(b.0.len(), 32); assert_eq!(c.0.len(), 32); } + + #[test] + fn should_serialize_and_deserialize_empty_bytes() { + let bytes = Bytes(Vec::new()); + + let data = serde_json::to_string(&bytes).unwrap(); + + assert_eq!("\"0x\"", &data); + + let deserialized: Bytes = serde_json::from_str(&data).unwrap(); + assert!(deserialized.0.is_empty()) + } } From b2ac3860d5d0739327ed67f63f42ea51b27657f1 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 29 Oct 2019 19:47:02 +0100 Subject: [PATCH 025/359] impl-serde: bump to 0.2.3 (#254) --- primitive-types/impls/serde/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index 0e2342f34..dc01bc6ab 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-serde" -version = "0.2.2" +version = "0.2.3" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0/MIT" From a9aa5893d354779af4ae9591bc93686cb1d72268 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 30 Oct 2019 11:18:10 +0100 Subject: [PATCH 026/359] add CONTRIBUTING guidelines and initial changelogs (#249) * add CONTRIBUTING guidelines and initial changelogs * Update CONTRIBUTING.md Co-Authored-By: David * [CONTRIBUTING] address some of the review comments * [CONTRIBUTING] mention our code style * rustfmt (sorry not sorry) * unscrew uint macros fmt * rustfmt: max width 120 * rustfmt: use_small_heuristics = "Max" * fix CI * argh * CONTRIBUTING: mention gitter * impl-serde: bump to 0.2.3 * Revert "impl-serde: bump to 0.2.3" This reverts commit 3fd73cbc6a61afedebd3118445dabbfaea778714. * fmt * impl-serde: update changelog for 0.2.3 --- .travis.yml | 4 + CONTRIBUTING.md | 55 ++ contract-address/CHANGELOG.md | 7 + contract-address/src/lib.rs | 153 ++--- ethbloom/CHANGELOG.md | 11 + ethbloom/benches/bloom.rs | 5 +- ethbloom/benches/unrolling.rs | 2 +- ethbloom/src/lib.rs | 26 +- ethereum-types/CHANGELOG.md | 9 + ethereum-types/src/hash.rs | 48 +- ethereum-types/src/lib.rs | 6 +- ethereum-types/src/uint.rs | 54 +- ethereum-types/tests/serde.rs | 22 +- fixed-hash/CHANGELOG.md | 11 + fixed-hash/src/hash.rs | 51 +- fixed-hash/src/lib.rs | 6 +- fixed-hash/src/tests.rs | 56 +- keccak-hash/CHANGELOG.md | 11 + keccak-hash/benches/keccak_256.rs | 32 +- keccak-hash/src/lib.rs | 55 +- kvdb-memorydb/CHANGELOG.md | 7 + kvdb-memorydb/src/lib.rs | 41 +- kvdb-rocksdb/CHANGELOG.md | 12 + kvdb-rocksdb/src/lib.rs | 229 ++++--- kvdb-web/CHANGELOG.md | 11 + kvdb-web/src/error.rs | 7 +- kvdb-web/src/indexed_db.rs | 61 +- kvdb-web/src/lib.rs | 130 ++-- kvdb-web/tests/indexed_db.rs | 47 +- kvdb/CHANGELOG.md | 13 + kvdb/src/lib.rs | 50 +- parity-bytes/CHANGELOG.md | 13 + parity-bytes/src/lib.rs | 10 +- parity-crypto/CHANGELOG.md | 7 + parity-crypto/benches/bench.rs | 24 +- parity-crypto/src/aes.rs | 108 ++- parity-crypto/src/digest.rs | 31 +- parity-crypto/src/error.rs | 7 +- parity-crypto/src/hmac/mod.rs | 60 +- parity-crypto/src/hmac/test.rs | 142 ++-- parity-crypto/src/lib.rs | 15 +- parity-crypto/src/pbkdf2/test.rs | 11 +- parity-crypto/src/publickey/ec_math_utils.rs | 34 +- parity-crypto/src/publickey/ecdh.rs | 5 +- .../src/publickey/ecdsa_signature.rs | 56 +- parity-crypto/src/publickey/ecies.rs | 6 +- parity-crypto/src/publickey/error.rs | 2 +- parity-crypto/src/publickey/extended_keys.rs | 192 +++--- parity-crypto/src/publickey/keypair.rs | 18 +- .../src/publickey/keypair_generator.rs | 5 +- parity-crypto/src/publickey/mod.rs | 14 +- parity-crypto/src/publickey/secret_key.rs | 43 +- parity-crypto/src/scrypt.rs | 17 +- parity-path/CHANGELOG.md | 7 + parity-path/src/lib.rs | 11 +- parity-util-mem/CHANGELOG.md | 11 + parity-util-mem/src/allocators.rs | 109 ++- parity-util-mem/src/impls.rs | 37 +- parity-util-mem/src/lib.rs | 21 +- parity-util-mem/src/malloc_size.rs | 630 +++++++++--------- parity-util-mem/src/sizeof.rs | 16 +- plain_hasher/CHANGELOG.md | 12 + plain_hasher/benches/bench.rs | 32 +- primitive-types/CHANGELOG.md | 11 + primitive-types/impls/codec/CHANGELOG.md | 7 + primitive-types/impls/codec/src/lib.rs | 15 +- primitive-types/impls/rlp/CHANGELOG.md | 7 + primitive-types/impls/rlp/src/lib.rs | 4 +- primitive-types/impls/serde/CHANGELOG.md | 11 + .../impls/serde/benches/impl_serde.rs | 48 +- primitive-types/impls/serde/benches/input.rs | 3 +- primitive-types/impls/serde/src/lib.rs | 28 +- primitive-types/impls/serde/src/serialize.rs | 49 +- rlp/CHANGELOG.md | 15 + rlp/benches/rlp.rs | 129 ++-- rlp/src/impls.rs | 74 +- rlp/src/lib.rs | 27 +- rlp/src/rlpin.rs | 95 ++- rlp/src/stream.rs | 45 +- rlp/tests/tests.rs | 222 +++--- rustfmt.toml | 3 + trace-time/CHANGELOG.md | 12 + trace-time/src/lib.rs | 5 +- transaction-pool/CHANGELOG.md | 12 + transaction-pool/src/error.rs | 20 +- transaction-pool/src/lib.rs | 6 +- transaction-pool/src/listener.rs | 8 +- transaction-pool/src/options.rs | 6 +- transaction-pool/src/pool.rs | 179 +++-- transaction-pool/src/ready.rs | 8 +- transaction-pool/src/replace.rs | 40 +- transaction-pool/src/scoring.rs | 25 +- transaction-pool/src/tests/helpers.rs | 23 +- transaction-pool/src/tests/mod.rs | 219 ++---- transaction-pool/src/tests/tx_builder.rs | 6 +- transaction-pool/src/transactions.rs | 66 +- triehash/CHANGELOG.md | 12 + triehash/benches/triehash.rs | 2 +- triehash/src/lib.rs | 57 +- uint/CHANGELOG.md | 16 + uint/benches/bigint.rs | 115 +--- uint/examples/modular.rs | 9 +- uint/src/lib.rs | 5 +- uint/src/uint.rs | 138 ++-- uint/tests/uint_tests.rs | 354 +++++----- 105 files changed, 2579 insertions(+), 2495 deletions(-) create mode 100644 CONTRIBUTING.md create mode 100644 contract-address/CHANGELOG.md create mode 100644 ethbloom/CHANGELOG.md create mode 100644 ethereum-types/CHANGELOG.md create mode 100644 fixed-hash/CHANGELOG.md create mode 100644 keccak-hash/CHANGELOG.md create mode 100644 kvdb-memorydb/CHANGELOG.md create mode 100644 kvdb-rocksdb/CHANGELOG.md create mode 100644 kvdb-web/CHANGELOG.md create mode 100644 kvdb/CHANGELOG.md create mode 100644 parity-bytes/CHANGELOG.md create mode 100644 parity-crypto/CHANGELOG.md create mode 100644 parity-path/CHANGELOG.md create mode 100644 parity-util-mem/CHANGELOG.md create mode 100644 plain_hasher/CHANGELOG.md create mode 100644 primitive-types/CHANGELOG.md create mode 100644 primitive-types/impls/codec/CHANGELOG.md create mode 100644 primitive-types/impls/rlp/CHANGELOG.md create mode 100644 primitive-types/impls/serde/CHANGELOG.md create mode 100644 rlp/CHANGELOG.md create mode 100644 rustfmt.toml create mode 100644 trace-time/CHANGELOG.md create mode 100644 transaction-pool/CHANGELOG.md create mode 100644 triehash/CHANGELOG.md create mode 100644 uint/CHANGELOG.md diff --git a/.travis.yml b/.travis.yml index 5bedd7f7d..2d48dc81b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,10 @@ matrix: include: - os: linux rust: stable + before_script: + - rustup component add rustfmt + after_script: + - cargo fmt -- --check - os: linux rust: beta - os: linux diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..bbe6e0c0e --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing to parity-common + +parity-common welcomes contribution from everyone in the form of suggestions, bug +reports, pull requests, and feedback. This document gives some guidance if you +are thinking of helping us. + +Please reach out here in a GitHub issue or in the parity channel on [gitter] if we can do anything to help you contribute. + +[gitter]: https://gitter.im/paritytech/parity + +## Submitting bug reports and feature requests + +When reporting a bug or asking for help, please include enough details so that +the people helping you can reproduce the behavior you are seeing. For some tips +on how to approach this, read about how to produce a [Minimal, Complete, and +Verifiable example]. + +[Minimal, Complete, and Verifiable example]: https://stackoverflow.com/help/mcve + +When making a feature request, please make it clear what problem you intend to +solve with the feature, any ideas for how parity-common could support solving that problem, any possible alternatives, and any disadvantages. + +## Versioning + +As many crates in the rust ecosystem, all crates in parity-common follow [semantic versioning]. This means bumping PATCH version on bug fixes that don't break backwards compatibility, MINOR version on new features and MAJOR version otherwise (MAJOR.MINOR.PATCH). Versions < 1.0 are considered to have the format 0.MAJOR.MINOR, which means bumping MINOR version for all non-breaking changes. + +If you bump a dependency that is publicly exposed in a crate's API (e.g. `pub use dependency;` or `pub field: dependency::Dependency`) and the version transition for the dependency was semver-breaking, then it is considered to be a breaking change for the consuming crate as well. To put it simply, if your change could cause a compilation error in user's code, it is a breaking change. + +Bumping versions should be done in a separate from regular code changes PR. + +[semantic versioning]: https://semver.org/ + +## Releasing a new version + +This part of the guidelines is for parity-common maintainers. + +When making a new release make sure to follow these steps: +* Submit a PR with a version bump and list all major and breaking changes in the crate's changelog + +After the PR is merged into master: +* `cargo publish` on the latest master (try with `--dry-run` first) +* Add a git tag in format `-v`, +e.g. `git tag impl-serde-v0.2.2` and push it with `git push origin impl-serde-v0.2.2` + +## Conduct + +We follow [Substrate Code of Conduct]. + +[Substrate Code of Conduct]: https://github.com/paritytech/substrate/blob/master/CODE_OF_CONDUCT.adoc + +## Attribution + +This guideline is adapted from [Serde's CONTRIBUTING guide]. + +[Serde's CONTRIBUTING guide]: https://github.com/serde-rs/serde/blob/master/CONTRIBUTING.md diff --git a/contract-address/CHANGELOG.md b/contract-address/CHANGELOG.md new file mode 100644 index 000000000..927c9dc9c --- /dev/null +++ b/contract-address/CHANGELOG.md @@ -0,0 +1,7 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] diff --git a/contract-address/src/lib.rs b/contract-address/src/lib.rs index ea4050316..1d4068d7d 100644 --- a/contract-address/src/lib.rs +++ b/contract-address/src/lib.rs @@ -27,95 +27,88 @@ use std::ops::Deref; pub struct ContractAddress(Address); impl ContractAddress { - /// Computes the address of a contract from the sender's address and the transaction nonce - pub fn from_sender_and_nonce(sender: &Address, nonce: &U256) -> Self { - let mut stream = RlpStream::new_list(2); - stream.append(sender); - stream.append(nonce); - - ContractAddress(Address::from(keccak(stream.as_raw()))) - } - - /// Computes the address of a contract from the sender's address, the salt and code hash - /// - /// pWASM `create2` scheme and EIP-1014 CREATE2 scheme - pub fn from_sender_salt_and_code(sender: &Address, salt: H256, code_hash: H256) -> Self { - let mut buffer = [0u8; 1 + 20 + 32 + 32]; - buffer[0] = 0xff; - &mut buffer[1..(1 + 20)].copy_from_slice(&sender[..]); - &mut buffer[(1 + 20)..(1 + 20 + 32)].copy_from_slice(&salt[..]); - &mut buffer[(1 + 20 + 32)..].copy_from_slice(&code_hash[..]); - - ContractAddress(Address::from(keccak(&buffer[..]))) - } - - /// Computes the address of a contract from the sender's address and the code hash - /// - /// Used by pwasm create ext. - pub fn from_sender_and_code(sender: &Address, code_hash: H256) -> Self { - let mut buffer = [0u8; 20 + 32]; - &mut buffer[..20].copy_from_slice(&sender[..]); - &mut buffer[20..].copy_from_slice(&code_hash[..]); - - ContractAddress(Address::from(keccak(&buffer[..]))) - } + /// Computes the address of a contract from the sender's address and the transaction nonce + pub fn from_sender_and_nonce(sender: &Address, nonce: &U256) -> Self { + let mut stream = RlpStream::new_list(2); + stream.append(sender); + stream.append(nonce); + + ContractAddress(Address::from(keccak(stream.as_raw()))) + } + + /// Computes the address of a contract from the sender's address, the salt and code hash + /// + /// pWASM `create2` scheme and EIP-1014 CREATE2 scheme + pub fn from_sender_salt_and_code(sender: &Address, salt: H256, code_hash: H256) -> Self { + let mut buffer = [0u8; 1 + 20 + 32 + 32]; + buffer[0] = 0xff; + &mut buffer[1..(1 + 20)].copy_from_slice(&sender[..]); + &mut buffer[(1 + 20)..(1 + 20 + 32)].copy_from_slice(&salt[..]); + &mut buffer[(1 + 20 + 32)..].copy_from_slice(&code_hash[..]); + + ContractAddress(Address::from(keccak(&buffer[..]))) + } + + /// Computes the address of a contract from the sender's address and the code hash + /// + /// Used by pwasm create ext. + pub fn from_sender_and_code(sender: &Address, code_hash: H256) -> Self { + let mut buffer = [0u8; 20 + 32]; + &mut buffer[..20].copy_from_slice(&sender[..]); + &mut buffer[20..].copy_from_slice(&code_hash[..]); + + ContractAddress(Address::from(keccak(&buffer[..]))) + } } impl Deref for ContractAddress { - type Target = Address; + type Target = Address; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } impl From for Address { - fn from(contract_address: ContractAddress) -> Self { - contract_address.0 - } + fn from(contract_address: ContractAddress) -> Self { + contract_address.0 + } } #[cfg(test)] mod tests { - use super::*; - use std::str::FromStr; - - #[test] - fn test_from_sender_and_nonce() { - let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let expected = Address::from_str("3f09c73a5ed19289fb9bdc72f1742566df146f56").unwrap(); - - let actual = ContractAddress::from_sender_and_nonce(&sender, &U256::from(88)); - - assert_eq!(Address::from(actual), expected); - } - - #[test] - fn test_from_sender_salt_and_code_hash() { - let sender = Address::zero(); - let code_hash = - H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") - .unwrap(); - let expected_address = - Address::from_str("e33c0c7f7df4809055c3eba6c09cfe4baf1bd9e0").unwrap(); - - let contract_address = - ContractAddress::from_sender_salt_and_code(&sender, H256::zero(), code_hash); - - assert_eq!(Address::from(contract_address), expected_address); - } - - #[test] - fn test_from_sender_and_code_hash() { - let sender = Address::from_str("0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d").unwrap(); - let code_hash = - H256::from_str("d98f2e8134922f73748703c8e7084d42f13d2fa1439936ef5a3abcf5646fe83f") - .unwrap(); - let expected_address = - Address::from_str("064417880f5680b141ed7fcac031aad40df080b0").unwrap(); - - let contract_address = ContractAddress::from_sender_and_code(&sender, code_hash); - - assert_eq!(Address::from(contract_address), expected_address); - } + use super::*; + use std::str::FromStr; + + #[test] + fn test_from_sender_and_nonce() { + let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); + let expected = Address::from_str("3f09c73a5ed19289fb9bdc72f1742566df146f56").unwrap(); + + let actual = ContractAddress::from_sender_and_nonce(&sender, &U256::from(88)); + + assert_eq!(Address::from(actual), expected); + } + + #[test] + fn test_from_sender_salt_and_code_hash() { + let sender = Address::zero(); + let code_hash = H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap(); + let expected_address = Address::from_str("e33c0c7f7df4809055c3eba6c09cfe4baf1bd9e0").unwrap(); + + let contract_address = ContractAddress::from_sender_salt_and_code(&sender, H256::zero(), code_hash); + + assert_eq!(Address::from(contract_address), expected_address); + } + + #[test] + fn test_from_sender_and_code_hash() { + let sender = Address::from_str("0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d").unwrap(); + let code_hash = H256::from_str("d98f2e8134922f73748703c8e7084d42f13d2fa1439936ef5a3abcf5646fe83f").unwrap(); + let expected_address = Address::from_str("064417880f5680b141ed7fcac031aad40df080b0").unwrap(); + + let contract_address = ContractAddress::from_sender_and_code(&sender, code_hash); + + assert_eq!(Address::from(contract_address), expected_address); + } } diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md new file mode 100644 index 000000000..f1da71a00 --- /dev/null +++ b/ethbloom/CHANGELOG.md @@ -0,0 +1,11 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.8.1] - 2019-10-24 +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/ethbloom/benches/bloom.rs b/ethbloom/benches/bloom.rs index 2231832ed..07f11a92f 100644 --- a/ethbloom/benches/bloom.rs +++ b/ethbloom/benches/bloom.rs @@ -21,8 +21,9 @@ fn test_bloom() -> Bloom { 00000000000000000000000000000000\ 00000000000000000000000000000000\ 00000000000000000000000000000000\ - 00000000000000000000000000000000" - ).unwrap() + 00000000000000000000000000000000", + ) + .unwrap() } fn test_topic() -> Vec { diff --git a/ethbloom/benches/unrolling.rs b/ethbloom/benches/unrolling.rs index e35a33270..5fd6883ce 100644 --- a/ethbloom/benches/unrolling.rs +++ b/ethbloom/benches/unrolling.rs @@ -48,7 +48,7 @@ fn bench_backwards(c: &mut Criterion) { b.iter(|| { let other_data = random_data(); for i in 0..255 { - data[255-i] |= other_data[255-i]; + data[255 - i] |= other_data[255 - i]; } }); }); diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index 5cc5b8119..61afa629e 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -45,20 +45,20 @@ #![cfg_attr(not(feature = "std"), no_std)] -use core::{ops, mem}; +use core::{mem, ops}; use crunchy::unroll; use fixed_hash::*; +use impl_rlp::impl_fixed_hash_rlp; #[cfg(feature = "serialize")] use impl_serde::impl_fixed_hash_serde; -use impl_rlp::impl_fixed_hash_rlp; use tiny_keccak::keccak256; // 3 according to yellowpaper const BLOOM_BITS: u32 = 3; const BLOOM_SIZE: usize = 256; -construct_fixed_hash!{ +construct_fixed_hash! { /// Bloom hash type with 256 bytes (2048 bits) size. pub struct Bloom(BLOOM_SIZE); } @@ -139,7 +139,10 @@ impl Bloom { self.contains_bloom(&bloom) } - pub fn contains_bloom<'a, B>(&self, bloom: B) -> bool where BloomRef<'a>: From { + pub fn contains_bloom<'a, B>(&self, bloom: B) -> bool + where + BloomRef<'a>: From, + { let bloom_ref: BloomRef<'_> = bloom.into(); // workaround for https://github.com/rust-lang/rust/issues/43644 self.contains_bloom_ref(bloom_ref) @@ -182,7 +185,10 @@ impl Bloom { } } - pub fn accrue_bloom<'a, B>(&mut self, bloom: B) where BloomRef<'a>: From { + pub fn accrue_bloom<'a, B>(&mut self, bloom: B) + where + BloomRef<'a>: From, + { let bloom_ref: BloomRef<'_> = bloom.into(); assert_eq!(self.0.len(), BLOOM_SIZE); assert_eq!(bloom_ref.0.len(), BLOOM_SIZE); @@ -212,7 +218,10 @@ impl<'a> BloomRef<'a> { } #[allow(clippy::trivially_copy_pass_by_ref)] - pub fn contains_bloom<'b, B>(&self, bloom: B) -> bool where BloomRef<'b>: From { + pub fn contains_bloom<'b, B>(&self, bloom: B) -> bool + where + BloomRef<'b>: From, + { let bloom_ref: BloomRef<'_> = bloom.into(); assert_eq!(self.0.len(), BLOOM_SIZE); assert_eq!(bloom_ref.0.len(), BLOOM_SIZE); @@ -249,11 +258,12 @@ impl_fixed_hash_serde!(Bloom, BLOOM_SIZE); #[cfg(test)] mod tests { + use super::{Bloom, Input}; use core::str::FromStr; use hex_literal::hex; - use super::{Bloom, Input}; #[test] + #[rustfmt::skip] fn it_works() { let bloom = Bloom::from_str( "00000000000000000000000000000000\ @@ -271,7 +281,7 @@ mod tests { 00000000000000000000000000000000\ 00000000000000000000000000000000\ 00000000000000000000000000000000\ - 00000000000000000000000000000000" + 00000000000000000000000000000000", ).unwrap(); let address = hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106"); let topic = hex!("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"); diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md new file mode 100644 index 000000000..6fad76681 --- /dev/null +++ b/ethereum-types/CHANGELOG.md @@ -0,0 +1,9 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] +### Added +- uint error type is re-exported (https://github.com/paritytech/parity-common/pull/244) diff --git a/ethereum-types/src/hash.rs b/ethereum-types/src/hash.rs index 9c04ee576..9fe385c5b 100644 --- a/ethereum-types/src/hash.rs +++ b/ethereum-types/src/hash.rs @@ -1,7 +1,7 @@ -use crate::{U64, U128, U256, U512}; +use crate::{U128, U256, U512, U64}; use fixed_hash::*; use impl_rlp::impl_fixed_hash_rlp; -#[cfg(feature="serialize")] +#[cfg(feature = "serialize")] use impl_serde::impl_fixed_hash_serde; pub trait BigEndianHash { @@ -11,30 +11,35 @@ pub trait BigEndianHash { fn into_uint(&self) -> Self::Uint; } -construct_fixed_hash!{ pub struct H32(4); } +construct_fixed_hash! { pub struct H32(4); } impl_fixed_hash_rlp!(H32, 4); -#[cfg(feature = "serialize")] impl_fixed_hash_serde!(H32, 4); +#[cfg(feature = "serialize")] +impl_fixed_hash_serde!(H32, 4); -construct_fixed_hash!{ pub struct H64(8); } +construct_fixed_hash! { pub struct H64(8); } impl_fixed_hash_rlp!(H64, 8); -#[cfg(feature = "serialize")] impl_fixed_hash_serde!(H64, 8); +#[cfg(feature = "serialize")] +impl_fixed_hash_serde!(H64, 8); -construct_fixed_hash!{ pub struct H128(16); } +construct_fixed_hash! { pub struct H128(16); } impl_fixed_hash_rlp!(H128, 16); -#[cfg(feature = "serialize")] impl_fixed_hash_serde!(H128, 16); +#[cfg(feature = "serialize")] +impl_fixed_hash_serde!(H128, 16); pub use primitive_types::H160; pub use primitive_types::H256; -construct_fixed_hash!{ pub struct H264(33); } +construct_fixed_hash! { pub struct H264(33); } impl_fixed_hash_rlp!(H264, 33); -#[cfg(feature = "serialize")] impl_fixed_hash_serde!(H264, 33); +#[cfg(feature = "serialize")] +impl_fixed_hash_serde!(H264, 33); pub use primitive_types::H512; -construct_fixed_hash!{ pub struct H520(65); } +construct_fixed_hash! { pub struct H520(65); } impl_fixed_hash_rlp!(H520, 65); -#[cfg(feature = "serialize")] impl_fixed_hash_serde!(H520, 65); +#[cfg(feature = "serialize")] +impl_fixed_hash_serde!(H520, 65); macro_rules! impl_uint_conversions { ($hash: ident, $uint: ident) => { @@ -51,7 +56,7 @@ macro_rules! impl_uint_conversions { $uint::from(self.as_ref() as &[u8]) } } - } + }; } impl_uint_conversions!(H64, U64); @@ -91,7 +96,10 @@ mod tests { (H256::from_low_u64_be(16), "0x0000000000000000000000000000000000000000000000000000000000000010"), (H256::from_low_u64_be(1_000), "0x00000000000000000000000000000000000000000000000000000000000003e8"), (H256::from_low_u64_be(100_000), "0x00000000000000000000000000000000000000000000000000000000000186a0"), - (H256::from_low_u64_be(u64::max_value()), "0x000000000000000000000000000000000000000000000000ffffffffffffffff"), + ( + H256::from_low_u64_be(u64::max_value()), + "0x000000000000000000000000000000000000000000000000ffffffffffffffff", + ), ]; for (number, expected) in tests { @@ -102,9 +110,15 @@ mod tests { #[test] fn test_serialize_invalid() { - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); + assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"") + .unwrap_err() + .is_data()); + assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"") + .unwrap_err() + .is_data()); + assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"") + .unwrap_err() + .is_data()); assert!(ser::from_str::("\"\"").unwrap_err().is_data()); assert!(ser::from_str::("\"0\"").unwrap_err().is_data()); assert!(ser::from_str::("\"10\"").unwrap_err().is_data()); diff --git a/ethereum-types/src/lib.rs b/ethereum-types/src/lib.rs index dbbb42ad0..9bc756ba0 100644 --- a/ethereum-types/src/lib.rs +++ b/ethereum-types/src/lib.rs @@ -1,13 +1,11 @@ #![cfg_attr(not(feature = "std"), no_std)] - - mod hash; mod uint; -pub use uint::{U64, U128, U256, U512, FromDecStrErr}; -pub use hash::{BigEndianHash, H32, H64, H128, H160, H256, H264, H512, H520}; pub use ethbloom::{Bloom, BloomRef, Input as BloomInput}; +pub use hash::{BigEndianHash, H128, H160, H256, H264, H32, H512, H520, H64}; +pub use uint::{FromDecStrErr, U128, U256, U512, U64}; pub type Address = H160; pub type Secret = H256; diff --git a/ethereum-types/src/uint.rs b/ethereum-types/src/uint.rs index 258c52f20..5cf35feae 100644 --- a/ethereum-types/src/uint.rs +++ b/ethereum-types/src/uint.rs @@ -1,7 +1,7 @@ -use uint_crate::*; use impl_rlp::impl_uint_rlp; -#[cfg(feature="serialize")] +#[cfg(feature = "serialize")] use impl_serde::impl_uint_serde; +use uint_crate::*; pub use uint_crate::FromDecStrErr; @@ -10,16 +10,16 @@ construct_uint! { pub struct U64(1); } impl_uint_rlp!(U64, 1); -#[cfg(feature = "serialize")] impl_uint_serde!(U64, 1); +#[cfg(feature = "serialize")] +impl_uint_serde!(U64, 1); pub use primitive_types::{U128, U256, U512}; - #[cfg(test)] mod tests { use super::{U256, U512}; - use std::u64::MAX; use serde_json as ser; + use std::u64::MAX; macro_rules! test_serialize { ($name: ident, $test_name: ident) => { @@ -51,7 +51,7 @@ mod tests { assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } - } + }; } test_serialize!(U256, test_u256); @@ -63,9 +63,9 @@ mod tests { ser::to_string_pretty(&!U256::zero()).unwrap(), "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" ); - assert!( - ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"").unwrap_err().is_data() - ); + assert!(ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"") + .unwrap_err() + .is_data()); } #[test] @@ -110,58 +110,58 @@ mod tests { assert_eq!(U512([0, 27, 0, 0, 0, 0, 0, 0]), result); let result = U256([MAX, 0, 0, 0]).full_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U512([1, MAX-1, 0, 0, 0, 0, 0, 0]), result); + assert_eq!(U512([1, MAX - 1, 0, 0, 0, 0, 0, 0]), result); let result = U256([0, MAX, 0, 0]).full_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U512([0, 1, MAX-1, 0, 0, 0, 0, 0]), result); + assert_eq!(U512([0, 1, MAX - 1, 0, 0, 0, 0, 0]), result); let result = U256([MAX, MAX, 0, 0]).full_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U512([1, MAX, MAX-1, 0, 0, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX - 1, 0, 0, 0, 0, 0]), result); let result = U256([MAX, 0, 0, 0]).full_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U512([1, MAX, MAX-1, 0, 0, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX - 1, 0, 0, 0, 0, 0]), result); let result = U256([MAX, MAX, 0, 0]).full_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U512([1, 0, MAX-1, MAX, 0, 0, 0, 0]), result); + assert_eq!(U512([1, 0, MAX - 1, MAX, 0, 0, 0, 0]), result); let result = U256([MAX, 0, 0, 0]).full_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U512([1, MAX, MAX, MAX-1, 0, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX, MAX - 1, 0, 0, 0, 0]), result); let result = U256([MAX, MAX, MAX, 0]).full_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U512([1, MAX, MAX, MAX-1, 0, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX, MAX - 1, 0, 0, 0, 0]), result); let result = U256([MAX, 0, 0, 0]).full_mul(U256([MAX, MAX, MAX, MAX])); - assert_eq!(U512([1, MAX, MAX, MAX, MAX-1, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX, MAX, MAX - 1, 0, 0, 0]), result); let result = U256([MAX, MAX, MAX, MAX]).full_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U512([1, MAX, MAX, MAX, MAX-1, 0, 0, 0]), result); + assert_eq!(U512([1, MAX, MAX, MAX, MAX - 1, 0, 0, 0]), result); let result = U256([MAX, MAX, MAX, 0]).full_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U512([1, 0, MAX, MAX-1, MAX, 0, 0, 0]), result); + assert_eq!(U512([1, 0, MAX, MAX - 1, MAX, 0, 0, 0]), result); let result = U256([MAX, MAX, 0, 0]).full_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U512([1, 0, MAX, MAX-1, MAX, 0, 0, 0]), result); + assert_eq!(U512([1, 0, MAX, MAX - 1, MAX, 0, 0, 0]), result); let result = U256([MAX, MAX, MAX, MAX]).full_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U512([1, 0, MAX, MAX, MAX-1, MAX, 0, 0]), result); + assert_eq!(U512([1, 0, MAX, MAX, MAX - 1, MAX, 0, 0]), result); let result = U256([MAX, MAX, 0, 0]).full_mul(U256([MAX, MAX, MAX, MAX])); - assert_eq!(U512([1, 0, MAX, MAX, MAX-1, MAX, 0, 0]), result); + assert_eq!(U512([1, 0, MAX, MAX, MAX - 1, MAX, 0, 0]), result); let result = U256([MAX, MAX, MAX, 0]).full_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U512([1, 0, 0, MAX-1, MAX, MAX, 0, 0]), result); + assert_eq!(U512([1, 0, 0, MAX - 1, MAX, MAX, 0, 0]), result); let result = U256([MAX, MAX, MAX, 0]).full_mul(U256([MAX, MAX, MAX, MAX])); - assert_eq!(U512([1, 0, 0, MAX, MAX-1, MAX, MAX, 0]), result); + assert_eq!(U512([1, 0, 0, MAX, MAX - 1, MAX, MAX, 0]), result); let result = U256([MAX, MAX, MAX, MAX]).full_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U512([1, 0, 0, MAX, MAX-1, MAX, MAX, 0]), result); + assert_eq!(U512([1, 0, 0, MAX, MAX - 1, MAX, MAX, 0]), result); let result = U256([MAX, MAX, MAX, MAX]).full_mul(U256([MAX, MAX, MAX, MAX])); - assert_eq!(U512([1, 0, 0, 0, MAX-1, MAX, MAX, MAX]), result); + assert_eq!(U512([1, 0, 0, 0, MAX - 1, MAX, MAX, MAX]), result); let result = U256([0, 0, 0, MAX]).full_mul(U256([0, 0, 0, MAX])); - assert_eq!(U512([0, 0, 0, 0, 0, 0, 1, MAX-1]), result); + assert_eq!(U512([0, 0, 0, 0, 0, 0, 1, MAX - 1]), result); let result = U256([1, 0, 0, 0]).full_mul(U256([0, 0, 0, MAX])); assert_eq!(U512([0, 0, 0, MAX, 0, 0, 0, 0]), result); diff --git a/ethereum-types/tests/serde.rs b/ethereum-types/tests/serde.rs index 1cdbc4466..4a92241cf 100644 --- a/ethereum-types/tests/serde.rs +++ b/ethereum-types/tests/serde.rs @@ -6,7 +6,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use ethereum_types::{U256, U512, H160, H256}; +use ethereum_types::{H160, H256, U256, U512}; use serde_json as ser; macro_rules! test { @@ -39,7 +39,7 @@ macro_rules! test { assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } - } + }; } test!(U256, test_u256); @@ -51,9 +51,9 @@ fn test_large_values() { ser::to_string_pretty(&!U256::zero()).unwrap(), "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" ); - assert!( - ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"").unwrap_err().is_data() - ); + assert!(ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"") + .unwrap_err() + .is_data()); } #[test] @@ -94,9 +94,15 @@ fn test_h256() { #[test] fn test_invalid() { - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); + assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"") + .unwrap_err() + .is_data()); + assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"") + .unwrap_err() + .is_data()); + assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"") + .unwrap_err() + .is_data()); assert!(ser::from_str::("\"\"").unwrap_err().is_data()); assert!(ser::from_str::("\"0\"").unwrap_err().is_data()); assert!(ser::from_str::("\"10\"").unwrap_err().is_data()); diff --git a/fixed-hash/CHANGELOG.md b/fixed-hash/CHANGELOG.md new file mode 100644 index 000000000..2d2170b42 --- /dev/null +++ b/fixed-hash/CHANGELOG.md @@ -0,0 +1,11 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.5.1] - 2019-10-24 +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 393b4cc93..25098b5ad 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -338,7 +338,7 @@ macro_rules! construct_fixed_hash { #[macro_export] #[doc(hidden)] macro_rules! impl_byteorder_for_fixed_hash { - ( $name:ident ) => {} + ( $name:ident ) => {}; } // Implementation for enabled byteorder crate support. @@ -368,7 +368,7 @@ macro_rules! impl_byteorder_for_fixed_hash { fn to_low_u64_with_byteorder(&self) -> u64 where - B: $crate::byteorder::ByteOrder + B: $crate::byteorder::ByteOrder, { let mut buf = [0x0; 8]; let capped = $crate::core_::cmp::min(Self::len_bytes(), 8); @@ -411,7 +411,7 @@ macro_rules! impl_byteorder_for_fixed_hash { fn from_low_u64_with_byteorder(val: u64) -> Self where - B: $crate::byteorder::ByteOrder + B: $crate::byteorder::ByteOrder, { let mut buf = [0x0; 8]; B::write_u64(&mut buf, val); @@ -457,7 +457,7 @@ macro_rules! impl_byteorder_for_fixed_hash { Self::from_low_u64_with_byteorder::<$crate::byteorder::NativeEndian>(val) } } - } + }; } // Implementation for disabled rand crate support. @@ -471,7 +471,7 @@ macro_rules! impl_byteorder_for_fixed_hash { #[macro_export] #[doc(hidden)] macro_rules! impl_rand_for_fixed_hash { - ( $name:ident ) => {} + ( $name:ident ) => {}; } // Implementation for enabled rand crate support. @@ -486,9 +486,7 @@ macro_rules! impl_rand_for_fixed_hash { #[doc(hidden)] macro_rules! impl_rand_for_fixed_hash { ( $name:ident ) => { - impl $crate::rand::distributions::Distribution<$name> - for $crate::rand::distributions::Standard - { + impl $crate::rand::distributions::Distribution<$name> for $crate::rand::distributions::Standard { fn sample(&self, rng: &mut R) -> $name { let mut ret = $name::zero(); for byte in ret.as_bytes_mut().iter_mut() { @@ -504,7 +502,7 @@ macro_rules! impl_rand_for_fixed_hash { /// given random number generator. pub fn randomize_using(&mut self, rng: &mut R) where - R: $crate::rand::Rng + ?Sized + R: $crate::rand::Rng + ?Sized, { use $crate::rand::distributions::Distribution; *self = $crate::rand::distributions::Standard.sample(rng); @@ -520,7 +518,7 @@ macro_rules! impl_rand_for_fixed_hash { /// given random number generator. pub fn random_using(rng: &mut R) -> Self where - R: $crate::rand::Rng + ?Sized + R: $crate::rand::Rng + ?Sized, { let mut ret = Self::zero(); ret.randomize_using(rng); @@ -534,7 +532,7 @@ macro_rules! impl_rand_for_fixed_hash { hash } } - } + }; } // Implementation for disabled libc crate support. @@ -562,7 +560,7 @@ macro_rules! impl_libc_for_fixed_hash { self.as_bytes().cmp(other.as_bytes()) } } - } + }; } // Implementation for enabled libc crate support. @@ -608,7 +606,7 @@ macro_rules! impl_libc_for_fixed_hash { $crate::core_::cmp::Ordering::Equal } } - } + }; } // Implementation for disabled rustc-hex crate support. @@ -622,7 +620,7 @@ macro_rules! impl_libc_for_fixed_hash { #[macro_export] #[doc(hidden)] macro_rules! impl_rustc_hex_for_fixed_hash { - ( $name:ident ) => {} + ( $name:ident ) => {}; } // Implementation for enabled rustc-hex crate support. @@ -650,9 +648,7 @@ macro_rules! impl_rustc_hex_for_fixed_hash { /// /// - When encountering invalid non hex-digits /// - Upon empty string input or invalid input length in general - fn from_str( - input: &str, - ) -> $crate::core_::result::Result<$name, $crate::rustc_hex::FromHexError> { + fn from_str(input: &str) -> $crate::core_::result::Result<$name, $crate::rustc_hex::FromHexError> { #[cfg(not(feature = "std"))] use $crate::alloc_::vec::Vec; use $crate::rustc_hex::FromHex; @@ -663,7 +659,7 @@ macro_rules! impl_rustc_hex_for_fixed_hash { Ok($name::from_slice(&bytes)) } } - } + }; } // Implementation for disabled quickcheck crate support. @@ -677,7 +673,7 @@ macro_rules! impl_rustc_hex_for_fixed_hash { #[macro_export] #[doc(hidden)] macro_rules! impl_quickcheck_for_fixed_hash { - ( $name:ident ) => {} + ( $name:ident ) => {}; } // Implementation for enabled quickcheck crate support. @@ -699,7 +695,7 @@ macro_rules! impl_quickcheck_for_fixed_hash { Self::from(res) } } - } + }; } #[macro_export] @@ -788,14 +784,11 @@ macro_rules! impl_fixed_hash_conversions { let small_ty_size = $small_ty::len_bytes(); $crate::core_::debug_assert!( - large_ty_size > small_ty_size - && large_ty_size % 2 == 0 - && small_ty_size % 2 == 0 + large_ty_size > small_ty_size && large_ty_size % 2 == 0 && small_ty_size % 2 == 0 ); let mut ret = $large_ty::zero(); - ret.as_bytes_mut()[(large_ty_size - small_ty_size)..large_ty_size] - .copy_from_slice(value.as_bytes()); + ret.as_bytes_mut()[(large_ty_size - small_ty_size)..large_ty_size].copy_from_slice(value.as_bytes()); ret } } @@ -806,15 +799,11 @@ macro_rules! impl_fixed_hash_conversions { let small_ty_size = $small_ty::len_bytes(); $crate::core_::debug_assert!( - large_ty_size > small_ty_size - && large_ty_size % 2 == 0 - && small_ty_size % 2 == 0 + large_ty_size > small_ty_size && large_ty_size % 2 == 0 && small_ty_size % 2 == 0 ); let mut ret = $small_ty::zero(); - ret.as_bytes_mut().copy_from_slice( - &value[(large_ty_size - small_ty_size)..large_ty_size], - ); + ret.as_bytes_mut().copy_from_slice(&value[(large_ty_size - small_ty_size)..large_ty_size]); ret } } diff --git a/fixed-hash/src/lib.rs b/fixed-hash/src/lib.rs index 9c841f885..297490134 100644 --- a/fixed-hash/src/lib.rs +++ b/fixed-hash/src/lib.rs @@ -65,7 +65,7 @@ mod hash; mod tests; #[cfg(feature = "api-dummy")] -construct_fixed_hash!{ - /// Go here for an overview of the hash type API. - pub struct ApiDummy(32); +construct_fixed_hash! { + /// Go here for an overview of the hash type API. + pub struct ApiDummy(32); } diff --git a/fixed-hash/src/tests.rs b/fixed-hash/src/tests.rs index b1445f3e4..c587281b4 100644 --- a/fixed-hash/src/tests.rs +++ b/fixed-hash/src/tests.rs @@ -1,8 +1,8 @@ -construct_fixed_hash!{ struct H32(4); } -construct_fixed_hash!{ struct H64(8); } -construct_fixed_hash!{ struct H128(16); } -construct_fixed_hash!{ struct H160(20); } -construct_fixed_hash!{ struct H256(32); } +construct_fixed_hash! { struct H32(4); } +construct_fixed_hash! { struct H64(8); } +construct_fixed_hash! { struct H128(16); } +construct_fixed_hash! { struct H160(20); } +construct_fixed_hash! { struct H256(32); } impl_fixed_hash_conversions!(H256, H160); @@ -153,26 +153,14 @@ mod to_low_u64 { #[test] fn smaller_size() { - assert_eq!( - H32::from([0x01, 0x23, 0x45, 0x67]).to_low_u64_be(), - 0x0123_4567 - ); - assert_eq!( - H32::from([0x01, 0x23, 0x45, 0x67]).to_low_u64_le(), - 0x6745_2301_0000_0000 - ); + assert_eq!(H32::from([0x01, 0x23, 0x45, 0x67]).to_low_u64_be(), 0x0123_4567); + assert_eq!(H32::from([0x01, 0x23, 0x45, 0x67]).to_low_u64_le(), 0x6745_2301_0000_0000); } #[test] fn equal_size() { - assert_eq!( - H64::from([0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]).to_low_u64_le(), - 0xEFCD_AB89_6745_2301 - ); - assert_eq!( - H64::from([0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]).to_low_u64_be(), - 0x0123_4567_89AB_CDEF - ) + assert_eq!(H64::from([0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]).to_low_u64_le(), 0xEFCD_AB89_6745_2301); + assert_eq!(H64::from([0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]).to_low_u64_be(), 0x0123_4567_89AB_CDEF) } #[test] @@ -205,14 +193,8 @@ mod from_low_u64 { #[test] fn smaller_size() { - assert_eq!( - H32::from_low_u64_be(0x0123_4567_89AB_CDEF), - H32::from([0x01, 0x23, 0x45, 0x67]) - ); - assert_eq!( - H32::from_low_u64_le(0x0123_4567_89AB_CDEF), - H32::from([0xEF, 0xCD, 0xAB, 0x89]) - ); + assert_eq!(H32::from_low_u64_be(0x0123_4567_89AB_CDEF), H32::from([0x01, 0x23, 0x45, 0x67])); + assert_eq!(H32::from_low_u64_le(0x0123_4567_89AB_CDEF), H32::from([0xEF, 0xCD, 0xAB, 0x89])); } #[test] @@ -250,16 +232,13 @@ mod from_low_u64 { #[cfg(feature = "rand")] mod rand { use super::*; - use ::rand::{SeedableRng, rngs::StdRng}; + use ::rand::{rngs::StdRng, SeedableRng}; #[test] fn random() { let default_seed = ::Seed::default(); let mut rng = StdRng::from_seed(default_seed); - assert_eq!( - H32::random_using(&mut rng), - H32::from([0x76, 0xa0, 0x40, 0x53]) - ); + assert_eq!(H32::random_using(&mut rng), H32::from([0x76, 0xa0, 0x40, 0x53])); } #[test] @@ -313,14 +292,13 @@ mod from_str { #[test] fn from_h160_to_h256() { let h160 = H160::from([ - 0xEF, 0x2D, 0x6D, 0x19, 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, - 0x46, 0xB3, 0x7D, 0x11, 0x06, + 0xEF, 0x2D, 0x6D, 0x19, 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, + 0x11, 0x06, ]); let h256 = H256::from(h160); let expected = H256::from([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, - 0x19, 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, - 0x11, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, 0x84, + 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, ]); assert_eq!(h256, expected); } diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md new file mode 100644 index 000000000..c1208e434 --- /dev/null +++ b/keccak-hash/CHANGELOG.md @@ -0,0 +1,11 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.4.1] - 2019-10-24 +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/keccak-hash/benches/keccak_256.rs b/keccak-hash/benches/keccak_256.rs index e18de9fe6..5cf5f2526 100644 --- a/keccak-hash/benches/keccak_256.rs +++ b/keccak-hash/benches/keccak_256.rs @@ -23,28 +23,28 @@ use test::Bencher; #[bench] fn bench_keccak_256_with_empty_input(b: &mut Bencher) { - let empty = [0u8;0]; - b.bytes = empty.len() as u64; - b.iter(|| { - let _out = keccak(empty); - }) + let empty = [0u8; 0]; + b.bytes = empty.len() as u64; + b.iter(|| { + let _out = keccak(empty); + }) } #[bench] fn bench_keccak_256_with_typical_input(b: &mut Bencher) { - let data: Vec = From::from("some medum length string with important information"); - b.bytes = data.len() as u64; - b.iter(|| { - let _out = keccak(&data); - }) + let data: Vec = From::from("some medum length string with important information"); + b.bytes = data.len() as u64; + b.iter(|| { + let _out = keccak(&data); + }) } #[bench] fn bench_keccak_256_with_large_input(b: &mut Bencher) { - // 4096 chars - let data: Vec = From::from("IGxcKBr1Qp7tuqtpSVhAbvt7UgWLEi7mCA6Wa185seLSIJLFS8K1aAFO9AwtO9b3n9SM3Qg136JMmy9Mj9gZ84IaUm8XioPtloabFDU5ZR1wvauJT6jNTkvBVBpUigIsyU7C1u3s99vKP64LpXqvo1hwItZKtISxmUAgzzjv5q14V4G9bkKAnmc4M5xixgLsDGZmnj6HcOMY3XRkWtxN3RscSKwPA0bfpgtz27ZVHplbXwloYRgRLpjRhZJc7sqO8RFnTHKasVkxVRcUoDBvWNJK27TbLvQQcfxETI2Q1H6c2cBAchi8unSiuxqy5rIvVxcl9rsmmRY4IXLEG9qKntUGbiIRLjEffIP9ODoWog0GbWLmMtfvtf24hWVwXz6Ap5oUAR0kLgb7HYIYrOwKjvfV25iEF7GW8cjhl8yowXx1zcgW4t6NJNqJlGzRKx8MvRWQXvHz8h8JxcHl7S64i6PAkxI9eCLXLvs8cpbEQQHt05Zu6GKm6IInjc9mSh52WFuGhgjbno69XzfkBufJs6c9tZuBf6ErVPj4UxmT82ajCruDusk79Tlvb8oQMLjoplQc1alQaLQwSsMac9iVp9MiE3PeYnTTepJ1V10tp79fciDAnNPJgPcRfDYv0REcSFgR9Q7yWhbpPpyBjO7HwOykDQVGtV0ZbDFrFRygLAXagAIkOPc9HDfcBNID1Q2MGk8ijVWMyvmGz1wzbpNfFcQaSOm8olhwoLyHUGvkyXegh44iNsPBUvSicNxTTDowtMqO5azleuWEjzxCobYbASDopvl6JeJjRtEBBO5YCQJiHsYjlXh9QR5Q543GsqhzRLgcHNRSZYLMZqDmIABXZi8VRNJMZyWXDRKHOGDmcHWe55uZomW6FnyU0uSRKxxz66K0JWfxuFzzxAR0vR4ZZCTemgDRQuDwL1loC3KUMjDpU13jUgoPc4UJUVfwQ4f4BUY3X51Cfw9FLw4oX39KoFoiCP2Z6z27gZUY1IlE59WoXGLj4KjTp4C16ZihG080gfDIWlXnDEk3VwBuBFyKWARB63sGLrGnn27b1gHWMaop6sPvkQgWxkEKIqsxDIvXLZJg2s23V8Gqtt0FeA7R3RCvBysF4jNjQ7NiQTIQWQZ8G9gO4mEsftolSZv6FlSpNeBKIIwYWSO2R6vkgeiz06euE9bwwnenOjwPNGTGk8WHIOZBJ1hIP0ejVU2i2ca9ON0phSAnewqjo5W3PtZf2Q7mDvp9imuVWoy4t8XcZq8I2Un9jVjes9Xi0FLN2t71vLFWLWZmGDzwXxpqEgkARS1WjtJoYXCBmRnXEPj6jQfwMZWKPYSIrmOogxMVoWvA8wrof6utfJna9JezyTnrBJSCuGTSNmwwAXRLoFYxF1RITyN8mI2KmHSfvLXBrbE6kmAkjsm4XJb6kria7oUQQ1gzJuCyB7oNHjZTBFNhNa7VeQ1s1xLOwZXLOAjZ4MDTYKnF7giGJGyswb5KQxkOV9orbuAu6pJsjtql6h1UD3BcNUkG3oz8kJNepbuCN3vNCJcZOX1VrQi0PWkDwyvECrQ2E1CgbU6GpWatpg2sCTpo9W62pCcWBK2FKUFWqU3qo2T7T1Mk2ZtM6hE9I8op0M7xlGE91Mn7ea6aq93MWp7nvFlBvbaMIoeU4MpDx0BeOSkROY03ZBJ0x7K8nJrNUhAtvxp17c9oFk0VxLiuRbAAcwDUormOmpVXZNIcqnap4twEVYaSIowfcNojyUSrFL5nPc8ZG93WgNNl9rpUPZhssVml3DvXghI80A9SW3QauzohTQAX2bkWelFBHnuG2LKrsJ8en51N6CkjcS5b87y1DVMZELcZ1n5s8PCAA1wyn7OSZlgw00GRzch1YwMoHzBBgIUtMO9HrMyuhgqIPJP7KcKbQkKhtvBXKplX8SCfSlOwUkLwHNKm3HYVE0uVfJ91NAsUrGoCOjYiXYpoRT8bjAPWTm6fDlTq2sbPOyTMoc4xRasmiOJ7B0PT6UxPzCPImM4100sPFxp7Kofv4okKZWTPKTefeYiPefI3jRgfDtEIP9E6a35LZD75lBNMXYlAqL3qlnheUQD1WQimFTHiDsW6bmURptNvtkMjEXzXzpWbnyxBskUGTvP2YQjtSAhWliDXkv6t1x71cYav7TQbqvbIzMRQQsguSGYMbs8YIC4DC9ep5reWAfanlTxcxksbEhQ7FGzXOvcufeGnDl2C85gWfryVzwN7kOZiSEktFMOQ1ngRC23y1fCOiHQVQJ2nLnaW7GILb9wkN1mBTRuHsOefRJST0TnRxcn4bBq4MIibIitVyjPRy7G5XvPEcL4pFaW1HCPGm6pUOEEwTer32JObNGCyTFB1BI2cRLJu5BHPjgG3mmb0gGkGlIfh8D2b2amogpivqEn2r9Y1KOKQ8ufJvG2mYfkevco9DuEZ9Nmzkm6XkCTZaFMNHqbfQaKqsEYK7i2N1KfkBct1leW2H9MQ9QO7AHCqXHK47b1kWVIm6pSJA1yV4funzCqXnIJCEURQgHiKf38YpN7ylLhe1J4UvSG3KeesZNeFFIZOEP9HZUSFMpnN1MOrwejojK0D4qzwucYWtXrTQ8I7UP5QhlijIsCKckUa9C1Osjrq8cgSclYNGt19wpy0onUbX1rOQBUlAAUJs4CyXNU0wmVUjw7tG1LUC8my4s9KZDUj4R5UcPz3VaZRrx1RqYu6YxjroJW70I1LyG4WEiQbOkCoLmaiWo9WzbUS2cErlOo2RPymlkWHxbNnZawX2Bc872ivRHSWqNpRHyuR5QewXmcyghH3EhESBAxTel5E2xuQXfLCEVK0kEk0Mj22KPsckKKyH7sVYC1F4YItQh5hj9Titb7KflQb9vnXQ44UHxY3zBhTQT5PSYv1Kv8HxXCsnpmhZCiBru16iX9oEB33icBVB2KKcZZEEKnCGPVxJlM9RTlyNyQmjHf7z4GeTDuMAUrsMO31WvgZBnWcAOtn6ulBTUCAaqxJiWqzlMx2FSANAlyAjAxqzmQjzPLvQRjskUnBFN3woKB1m2bSo2c5thwA1fKiPvN5LW8tl1rnfNy3rJ0GJpK8nZjkzHMztYrKYAe56pX4SvplpTyibTIiRXLyEVsmuByTHCZhO3fvGoFsav3ZuRhe9eAAWeqAh13eKDTcA0ufME3ZnmJheXEZ3OwrxnFjSf3U0clkWYVont3neh77ODKHhYnX0bOmnJJlr4RqFoLBitskY0kcGMKcZlaej21SENjDcFgaka3CfHbAH5vIFqnoX1JZrZPkQ65PZqQWImP79U3gXWKvz96lElyJZAFqn0Mbltllqw4MhlI766AvHraOmMsJoNvjv1QR7pCSnC0iX6nbqW1eVPaUSZDuZRtRIxfLA8HC9VbxufT2KZV3qG0l7wrZna5Di2MNcBE9uthuVLZcqp8vCmEhINDhRRlipR7tC2iRBHecS5WtxBCpbEm1y1kgNG5o60UKgAswxxuJ3RQ9Y49mPIApBMmp4LFpuKRfcrZb4UJnCfR3pNbQ70nnZ6Be2M7tuJUCoFfHrhqHXNz5A0uWMgxUS50c60zLl6QAELxHaCGba4WCMOHIo5nSKcUuYtDyDoDlrezALW5mZR4PRPRxnjrXxbJI14qrpymRReC3QgFDJp6sT5TLwvSHaavPlEbt2Eu0Kh5SXklGHXP9YuF3glGuJzSob3NakW1RXF5786U1MHhtJby64LyGWvNn4QXie3VjeL3QQu4C9crEAxSSiOJOfnL3DYIVOY4ipUkKFlF7Rp2q6gZazDvcUCp1cbcr7T7B4s22rXzjN7mHYWOyWuZGwlImeorY3aVKi7BaXbhgOFw6BUmIc1HeGFELHIEnPE9MwOjZam3LOm0rhBHlvJJZkXvJKmDUJrGlyqC5GtC5lDWLfXewyDWDqq7PY0atVQily5GWqib6wub6u6LZ3HZDNP8gK64Nf4kC259AE4V2hCohDnSsXAIoOkehwXyp6CkDT42NJb6sXHUv2N6cm292MiKA22PKWrwUGsan599KI2V67YRDfcfiB4ZHRDiSe62MBE0fGLIgXLIWw1xTWYbPQ9YAj3xovBvmewbJ1De4k6uS"); - b.bytes = data.len() as u64; - b.iter(|| { - let _out = keccak(&data); - }) + // 4096 chars + let data: Vec = From::from("IGxcKBr1Qp7tuqtpSVhAbvt7UgWLEi7mCA6Wa185seLSIJLFS8K1aAFO9AwtO9b3n9SM3Qg136JMmy9Mj9gZ84IaUm8XioPtloabFDU5ZR1wvauJT6jNTkvBVBpUigIsyU7C1u3s99vKP64LpXqvo1hwItZKtISxmUAgzzjv5q14V4G9bkKAnmc4M5xixgLsDGZmnj6HcOMY3XRkWtxN3RscSKwPA0bfpgtz27ZVHplbXwloYRgRLpjRhZJc7sqO8RFnTHKasVkxVRcUoDBvWNJK27TbLvQQcfxETI2Q1H6c2cBAchi8unSiuxqy5rIvVxcl9rsmmRY4IXLEG9qKntUGbiIRLjEffIP9ODoWog0GbWLmMtfvtf24hWVwXz6Ap5oUAR0kLgb7HYIYrOwKjvfV25iEF7GW8cjhl8yowXx1zcgW4t6NJNqJlGzRKx8MvRWQXvHz8h8JxcHl7S64i6PAkxI9eCLXLvs8cpbEQQHt05Zu6GKm6IInjc9mSh52WFuGhgjbno69XzfkBufJs6c9tZuBf6ErVPj4UxmT82ajCruDusk79Tlvb8oQMLjoplQc1alQaLQwSsMac9iVp9MiE3PeYnTTepJ1V10tp79fciDAnNPJgPcRfDYv0REcSFgR9Q7yWhbpPpyBjO7HwOykDQVGtV0ZbDFrFRygLAXagAIkOPc9HDfcBNID1Q2MGk8ijVWMyvmGz1wzbpNfFcQaSOm8olhwoLyHUGvkyXegh44iNsPBUvSicNxTTDowtMqO5azleuWEjzxCobYbASDopvl6JeJjRtEBBO5YCQJiHsYjlXh9QR5Q543GsqhzRLgcHNRSZYLMZqDmIABXZi8VRNJMZyWXDRKHOGDmcHWe55uZomW6FnyU0uSRKxxz66K0JWfxuFzzxAR0vR4ZZCTemgDRQuDwL1loC3KUMjDpU13jUgoPc4UJUVfwQ4f4BUY3X51Cfw9FLw4oX39KoFoiCP2Z6z27gZUY1IlE59WoXGLj4KjTp4C16ZihG080gfDIWlXnDEk3VwBuBFyKWARB63sGLrGnn27b1gHWMaop6sPvkQgWxkEKIqsxDIvXLZJg2s23V8Gqtt0FeA7R3RCvBysF4jNjQ7NiQTIQWQZ8G9gO4mEsftolSZv6FlSpNeBKIIwYWSO2R6vkgeiz06euE9bwwnenOjwPNGTGk8WHIOZBJ1hIP0ejVU2i2ca9ON0phSAnewqjo5W3PtZf2Q7mDvp9imuVWoy4t8XcZq8I2Un9jVjes9Xi0FLN2t71vLFWLWZmGDzwXxpqEgkARS1WjtJoYXCBmRnXEPj6jQfwMZWKPYSIrmOogxMVoWvA8wrof6utfJna9JezyTnrBJSCuGTSNmwwAXRLoFYxF1RITyN8mI2KmHSfvLXBrbE6kmAkjsm4XJb6kria7oUQQ1gzJuCyB7oNHjZTBFNhNa7VeQ1s1xLOwZXLOAjZ4MDTYKnF7giGJGyswb5KQxkOV9orbuAu6pJsjtql6h1UD3BcNUkG3oz8kJNepbuCN3vNCJcZOX1VrQi0PWkDwyvECrQ2E1CgbU6GpWatpg2sCTpo9W62pCcWBK2FKUFWqU3qo2T7T1Mk2ZtM6hE9I8op0M7xlGE91Mn7ea6aq93MWp7nvFlBvbaMIoeU4MpDx0BeOSkROY03ZBJ0x7K8nJrNUhAtvxp17c9oFk0VxLiuRbAAcwDUormOmpVXZNIcqnap4twEVYaSIowfcNojyUSrFL5nPc8ZG93WgNNl9rpUPZhssVml3DvXghI80A9SW3QauzohTQAX2bkWelFBHnuG2LKrsJ8en51N6CkjcS5b87y1DVMZELcZ1n5s8PCAA1wyn7OSZlgw00GRzch1YwMoHzBBgIUtMO9HrMyuhgqIPJP7KcKbQkKhtvBXKplX8SCfSlOwUkLwHNKm3HYVE0uVfJ91NAsUrGoCOjYiXYpoRT8bjAPWTm6fDlTq2sbPOyTMoc4xRasmiOJ7B0PT6UxPzCPImM4100sPFxp7Kofv4okKZWTPKTefeYiPefI3jRgfDtEIP9E6a35LZD75lBNMXYlAqL3qlnheUQD1WQimFTHiDsW6bmURptNvtkMjEXzXzpWbnyxBskUGTvP2YQjtSAhWliDXkv6t1x71cYav7TQbqvbIzMRQQsguSGYMbs8YIC4DC9ep5reWAfanlTxcxksbEhQ7FGzXOvcufeGnDl2C85gWfryVzwN7kOZiSEktFMOQ1ngRC23y1fCOiHQVQJ2nLnaW7GILb9wkN1mBTRuHsOefRJST0TnRxcn4bBq4MIibIitVyjPRy7G5XvPEcL4pFaW1HCPGm6pUOEEwTer32JObNGCyTFB1BI2cRLJu5BHPjgG3mmb0gGkGlIfh8D2b2amogpivqEn2r9Y1KOKQ8ufJvG2mYfkevco9DuEZ9Nmzkm6XkCTZaFMNHqbfQaKqsEYK7i2N1KfkBct1leW2H9MQ9QO7AHCqXHK47b1kWVIm6pSJA1yV4funzCqXnIJCEURQgHiKf38YpN7ylLhe1J4UvSG3KeesZNeFFIZOEP9HZUSFMpnN1MOrwejojK0D4qzwucYWtXrTQ8I7UP5QhlijIsCKckUa9C1Osjrq8cgSclYNGt19wpy0onUbX1rOQBUlAAUJs4CyXNU0wmVUjw7tG1LUC8my4s9KZDUj4R5UcPz3VaZRrx1RqYu6YxjroJW70I1LyG4WEiQbOkCoLmaiWo9WzbUS2cErlOo2RPymlkWHxbNnZawX2Bc872ivRHSWqNpRHyuR5QewXmcyghH3EhESBAxTel5E2xuQXfLCEVK0kEk0Mj22KPsckKKyH7sVYC1F4YItQh5hj9Titb7KflQb9vnXQ44UHxY3zBhTQT5PSYv1Kv8HxXCsnpmhZCiBru16iX9oEB33icBVB2KKcZZEEKnCGPVxJlM9RTlyNyQmjHf7z4GeTDuMAUrsMO31WvgZBnWcAOtn6ulBTUCAaqxJiWqzlMx2FSANAlyAjAxqzmQjzPLvQRjskUnBFN3woKB1m2bSo2c5thwA1fKiPvN5LW8tl1rnfNy3rJ0GJpK8nZjkzHMztYrKYAe56pX4SvplpTyibTIiRXLyEVsmuByTHCZhO3fvGoFsav3ZuRhe9eAAWeqAh13eKDTcA0ufME3ZnmJheXEZ3OwrxnFjSf3U0clkWYVont3neh77ODKHhYnX0bOmnJJlr4RqFoLBitskY0kcGMKcZlaej21SENjDcFgaka3CfHbAH5vIFqnoX1JZrZPkQ65PZqQWImP79U3gXWKvz96lElyJZAFqn0Mbltllqw4MhlI766AvHraOmMsJoNvjv1QR7pCSnC0iX6nbqW1eVPaUSZDuZRtRIxfLA8HC9VbxufT2KZV3qG0l7wrZna5Di2MNcBE9uthuVLZcqp8vCmEhINDhRRlipR7tC2iRBHecS5WtxBCpbEm1y1kgNG5o60UKgAswxxuJ3RQ9Y49mPIApBMmp4LFpuKRfcrZb4UJnCfR3pNbQ70nnZ6Be2M7tuJUCoFfHrhqHXNz5A0uWMgxUS50c60zLl6QAELxHaCGba4WCMOHIo5nSKcUuYtDyDoDlrezALW5mZR4PRPRxnjrXxbJI14qrpymRReC3QgFDJp6sT5TLwvSHaavPlEbt2Eu0Kh5SXklGHXP9YuF3glGuJzSob3NakW1RXF5786U1MHhtJby64LyGWvNn4QXie3VjeL3QQu4C9crEAxSSiOJOfnL3DYIVOY4ipUkKFlF7Rp2q6gZazDvcUCp1cbcr7T7B4s22rXzjN7mHYWOyWuZGwlImeorY3aVKi7BaXbhgOFw6BUmIc1HeGFELHIEnPE9MwOjZam3LOm0rhBHlvJJZkXvJKmDUJrGlyqC5GtC5lDWLfXewyDWDqq7PY0atVQily5GWqib6wub6u6LZ3HZDNP8gK64Nf4kC259AE4V2hCohDnSsXAIoOkehwXyp6CkDT42NJb6sXHUv2N6cm292MiKA22PKWrwUGsan599KI2V67YRDfcfiB4ZHRDiSe62MBE0fGLIgXLIWw1xTWYbPQ9YAj3xovBvmewbJ1De4k6uS"); + b.bytes = data.len() as u64; + b.iter(|| { + let _out = keccak(&data); + }) } diff --git a/keccak-hash/src/lib.rs b/keccak-hash/src/lib.rs index 77a3767e0..4f19937ae 100644 --- a/keccak-hash/src/lib.rs +++ b/keccak-hash/src/lib.rs @@ -16,21 +16,30 @@ #![cfg_attr(not(feature = "std"), no_std)] +use core::slice; #[cfg(feature = "std")] use std::io; -use core::slice; pub use primitive_types::H256; use tiny_keccak::Keccak; /// Get the KECCAK (i.e. Keccak) hash of the empty bytes string. -pub const KECCAK_EMPTY: H256 = H256( [0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70] ); +pub const KECCAK_EMPTY: H256 = H256([ + 0xc5, 0xd2, 0x46, 0x01, 0x86, 0xf7, 0x23, 0x3c, 0x92, 0x7e, 0x7d, 0xb2, 0xdc, 0xc7, 0x03, 0xc0, 0xe5, 0x00, 0xb6, + 0x53, 0xca, 0x82, 0x27, 0x3b, 0x7b, 0xfa, 0xd8, 0x04, 0x5d, 0x85, 0xa4, 0x70, +]); /// The KECCAK of the RLP encoding of empty data. -pub const KECCAK_NULL_RLP: H256 = H256( [0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21] ); +pub const KECCAK_NULL_RLP: H256 = H256([ + 0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 0x6e, 0x5b, 0x48, 0xe0, + 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 0xb4, 0x21, +]); /// The KECCAK of the RLP encoding of empty list. -pub const KECCAK_EMPTY_LIST_RLP: H256 = H256( [0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47] ); +pub const KECCAK_EMPTY_LIST_RLP: H256 = H256([ + 0x1d, 0xcc, 0x4d, 0xe8, 0xde, 0xc7, 0x5d, 0x7a, 0xab, 0x85, 0xb5, 0x67, 0xb6, 0xcc, 0xd4, 0x1a, 0xd3, 0x12, 0x45, + 0x1b, 0x94, 0x8a, 0x74, 0x13, 0xf0, 0xa1, 0x42, 0xfd, 0x40, 0xd4, 0x93, 0x47, +]); pub fn keccak>(s: T) -> H256 { let mut result = [0u8; 32]; @@ -41,26 +50,26 @@ pub fn keccak>(s: T) -> H256 { pub unsafe fn keccak_256_unchecked(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) { // This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This // means that we can reuse the input buffer for both input and output. - Keccak::keccak256( - slice::from_raw_parts(input, inputlen), - slice::from_raw_parts_mut(out, outlen) - ); + Keccak::keccak256(slice::from_raw_parts(input, inputlen), slice::from_raw_parts_mut(out, outlen)); } pub unsafe fn keccak_512_unchecked(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) { // This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This // means that we can reuse the input buffer for both input and output. - Keccak::keccak512( - slice::from_raw_parts(input, inputlen), - slice::from_raw_parts_mut(out, outlen) - ); + Keccak::keccak512(slice::from_raw_parts(input, inputlen), slice::from_raw_parts_mut(out, outlen)); } -pub fn keccak_256(input: &[u8], mut output: &mut [u8]) { Keccak::keccak256(input, &mut output); } +pub fn keccak_256(input: &[u8], mut output: &mut [u8]) { + Keccak::keccak256(input, &mut output); +} -pub fn keccak_512(input: &[u8], mut output: &mut [u8]) { Keccak::keccak512(input, &mut output); } +pub fn keccak_512(input: &[u8], mut output: &mut [u8]) { + Keccak::keccak512(input, &mut output); +} -pub fn write_keccak>(s: T, dest: &mut [u8]) { Keccak::keccak256(s.as_ref(), dest); } +pub fn write_keccak>(s: T, dest: &mut [u8]) { + Keccak::keccak256(s.as_ref(), dest); +} #[cfg(feature = "std")] pub fn keccak_pipe(r: &mut dyn io::BufRead, w: &mut dyn io::Write) -> Result { @@ -106,10 +115,8 @@ mod tests { assert_eq!( keccak([0x41u8; 32]), H256([ - 0x59, 0xca, 0xd5, 0x94, 0x86, 0x73, 0x62, 0x2c, - 0x1d, 0x64, 0xe2, 0x32, 0x24, 0x88, 0xbf, 0x01, - 0x61, 0x9f, 0x7f, 0xf4, 0x57, 0x89, 0x74, 0x1b, - 0x15, 0xa9, 0xf7, 0x82, 0xce, 0x92, 0x90, 0xa8 + 0x59, 0xca, 0xd5, 0x94, 0x86, 0x73, 0x62, 0x2c, 0x1d, 0x64, 0xe2, 0x32, 0x24, 0x88, 0xbf, 0x01, 0x61, + 0x9f, 0x7f, 0xf4, 0x57, 0x89, 0x74, 0x1b, 0x15, 0xa9, 0xf7, 0x82, 0xce, 0x92, 0x90, 0xa8 ]), ); } @@ -118,12 +125,10 @@ mod tests { fn write_keccak_with_content() { let data: Vec = From::from("hello world"); let expected = vec![ - 0x47, 0x17, 0x32, 0x85, 0xa8, 0xd7, 0x34, 0x1e, - 0x5e, 0x97, 0x2f, 0xc6, 0x77, 0x28, 0x63, 0x84, - 0xf8, 0x02, 0xf8, 0xef, 0x42, 0xa5, 0xec, 0x5f, - 0x03, 0xbb, 0xfa, 0x25, 0x4c, 0xb0, 0x1f, 0xad + 0x47, 0x17, 0x32, 0x85, 0xa8, 0xd7, 0x34, 0x1e, 0x5e, 0x97, 0x2f, 0xc6, 0x77, 0x28, 0x63, 0x84, 0xf8, 0x02, + 0xf8, 0xef, 0x42, 0xa5, 0xec, 0x5f, 0x03, 0xbb, 0xfa, 0x25, 0x4c, 0xb0, 0x1f, 0xad, ]; - let mut dest = [0u8;32]; + let mut dest = [0u8; 32]; write_keccak(data, &mut dest); assert_eq!(dest, expected.as_ref()); @@ -133,7 +138,7 @@ mod tests { #[test] fn should_keccak_a_file() { use std::fs; - use std::io::{Write, BufReader}; + use std::io::{BufReader, Write}; // given let tmpdir = tempdir::TempDir::new("keccak").unwrap(); diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md new file mode 100644 index 000000000..927c9dc9c --- /dev/null +++ b/kvdb-memorydb/CHANGELOG.md @@ -0,0 +1,7 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 2762b6081..8491129a0 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -14,9 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::{io, collections::{BTreeMap, HashMap}}; +use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; use parking_lot::RwLock; -use kvdb::{DBValue, DBTransaction, KeyValueDB, DBOp}; +use std::{ + collections::{BTreeMap, HashMap}, + io, +}; /// A key-value database fulfilling the `KeyValueDB` trait, living in memory. /// This is generally intended for tests and is not particularly optimized. @@ -35,9 +38,7 @@ pub fn create(num_cols: u32) -> InMemory { cols.insert(Some(idx), BTreeMap::new()); } - InMemory { - columns: RwLock::new(cols) - } + InMemory { columns: RwLock::new(cols) } } impl KeyValueDB for InMemory { @@ -53,10 +54,9 @@ impl KeyValueDB for InMemory { let columns = self.columns.read(); match columns.get(&col) { None => None, - Some(map) => - map.iter() - .find(|&(ref k ,_)| k.starts_with(prefix)) - .map(|(_, v)| v.to_vec().into_boxed_slice()) + Some(map) => { + map.iter().find(|&(ref k, _)| k.starts_with(prefix)).map(|(_, v)| v.to_vec().into_boxed_slice()) + } } } @@ -69,12 +69,12 @@ impl KeyValueDB for InMemory { if let Some(col) = columns.get_mut(&col) { col.insert(key.into_vec(), value); } - }, + } DBOp::Delete { col, key } => { if let Some(col) = columns.get_mut(&col) { col.remove(&*key); } - }, + } } } } @@ -83,26 +83,27 @@ impl KeyValueDB for InMemory { Ok(()) } - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { match self.columns.read().get(&col) { - Some(map) => Box::new( // TODO: worth optimizing at all? - map.clone() - .into_iter() - .map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())) + Some(map) => Box::new( + // TODO: worth optimizing at all? + map.clone().into_iter().map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())), ), None => Box::new(None.into_iter()), } } - fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) - -> Box, Box<[u8]>)> + 'a> - { + fn iter_from_prefix<'a>( + &'a self, + col: Option, + prefix: &'a [u8], + ) -> Box, Box<[u8]>)> + 'a> { match self.columns.read().get(&col) { Some(map) => Box::new( map.clone() .into_iter() .skip_while(move |&(ref k, _)| !k.starts_with(prefix)) - .map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())) + .map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())), ), None => Box::new(None.into_iter()), } diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md new file mode 100644 index 000000000..cc514b497 --- /dev/null +++ b/kvdb-rocksdb/CHANGELOG.md @@ -0,0 +1,12 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.1.6] - 2019-10-24 +- Updated to 2018 edition idioms (https://github.com/paritytech/parity-common/pull/237) +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 7f137953c..c4a9c0b15 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -14,33 +14,33 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::{ - cmp, fs, io, mem, result, error, - collections::HashMap, marker::PhantomData, path::Path -}; +use std::{cmp, collections::HashMap, error, fs, io, marker::PhantomData, mem, path::Path, result}; -use parking_lot::{Mutex, MutexGuard, RwLock}; +use interleaved_ordered::{interleave_ordered, InterleaveOrdered}; use parity_rocksdb::{ - DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator, - Options, BlockBasedOptions, Direction, Cache, Column, ReadOptions + BlockBasedOptions, Cache, Column, DBIterator, Direction, IteratorMode, Options, ReadOptions, Writable, WriteBatch, + WriteOptions, DB, }; -use interleaved_ordered::{interleave_ordered, InterleaveOrdered}; +use parking_lot::{Mutex, MutexGuard, RwLock}; -use log::{debug, warn}; use elastic_array::ElasticArray32; use fs_swap::{swap, swap_nonatomic}; -use kvdb::{KeyValueDB, DBTransaction, DBValue, DBOp}; +use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; +use log::{debug, warn}; #[cfg(target_os = "linux")] use regex::Regex; #[cfg(target_os = "linux")] -use std::process::Command; -#[cfg(target_os = "linux")] use std::fs::File; #[cfg(target_os = "linux")] use std::path::PathBuf; +#[cfg(target_os = "linux")] +use std::process::Command; -fn other_io_err(e: E) -> io::Error where E: Into> { +fn other_io_err(e: E) -> io::Error +where + E: Into>, +{ io::Error::new(io::ErrorKind::Other, e) } @@ -78,10 +78,12 @@ pub fn rotational_from_df_output(df_out: Vec) -> Option { str::from_utf8(df_out.as_slice()) .ok() // Get the drive name. - .and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})") - .ok() - .and_then(|re| re.captures(df_str)) - .and_then(|captures| captures.get(1))) + .and_then(|df_str| { + Regex::new(r"/dev/(sd[:alpha:]{1,2})") + .ok() + .and_then(|re| re.captures(df_str)) + .and_then(|captures| captures.get(1)) + }) // Generate path e.g. /sys/block/sda/queue/rotational .map(|drive_path| { let mut p = PathBuf::from("/sys/block"); @@ -110,9 +112,13 @@ impl CompactionProfile { let mut buffer = [0; 1]; if file.read_exact(&mut buffer).is_ok() { // 0 means not rotational. - if buffer == [48] { return Self::ssd(); } + if buffer == [48] { + return Self::ssd(); + } // 1 means rotational. - if buffer == [49] { return Self::hdd(); } + if buffer == [49] { + return Self::hdd(); + } } } } @@ -128,11 +134,7 @@ impl CompactionProfile { /// Default profile suitable for SSD storage pub fn ssd() -> CompactionProfile { - CompactionProfile { - initial_file_size: 64 * MB as u64, - block_size: 16 * KB, - write_rate_limit: None, - } + CompactionProfile { initial_file_size: 64 * MB as u64, block_size: 16 * KB, write_rate_limit: None } } /// Slow HDD compaction profile @@ -217,10 +219,11 @@ fn col_config(config: &DatabaseConfig, block_opts: &BlockBasedOptions) -> io::Re opts.set_block_based_table_factory(block_opts); - opts.set_parsed_options( - &format!("block_based_table_factory={{{};{}}}", - "cache_index_and_filter_blocks=true", - "pin_l0_filter_and_index_blocks_in_cache=true")).map_err(other_io_err)?; + opts.set_parsed_options(&format!( + "block_based_table_factory={{{};{}}}", + "cache_index_and_filter_blocks=true", "pin_l0_filter_and_index_blocks_in_cache=true" + )) + .map_err(other_io_err)?; opts.optimize_level_style_compaction(config.memory_budget_per_col() as i32); opts.set_target_file_size_base(config.compaction.initial_file_size); @@ -311,7 +314,7 @@ impl Database { let cfnames: Vec<_> = (0..columns).map(|c| format!("col{}", c)).collect(); let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect(); - for _ in 0 .. config.columns.unwrap_or(0) { + for _ in 0..config.columns.unwrap_or(0) { cf_options.push(col_config(&config, &block_opts)?); } @@ -324,27 +327,30 @@ impl Database { Some(_) => { match DB::open_cf(&opts, path, &cfnames, &cf_options) { Ok(db) => { - cfs = cfnames.iter().map(|n| db.cf_handle(n) - .expect("rocksdb opens a cf_handle for each cfname; qed")).collect(); + cfs = cfnames + .iter() + .map(|n| db.cf_handle(n).expect("rocksdb opens a cf_handle for each cfname; qed")) + .collect(); Ok(db) } Err(_) => { // retry and create CFs match DB::open_cf(&opts, path, &[], &[]) { Ok(mut db) => { - cfs = cfnames.iter() + cfs = cfnames + .iter() .enumerate() .map(|(i, n)| db.create_cf(n, &cf_options[i])) .collect::<::std::result::Result<_, _>>() .map_err(other_io_err)?; Ok(db) - }, + } err => err, } } } - }, - None => DB::open(&opts, path) + } + None => DB::open(&opts, path), }; let db = match db { @@ -357,19 +363,19 @@ impl Database { true => DB::open(&opts, path).map_err(other_io_err)?, false => { let db = DB::open_cf(&opts, path, &cfnames, &cf_options).map_err(other_io_err)?; - cfs = cfnames.iter().map(|n| db.cf_handle(n) - .expect("rocksdb opens a cf_handle for each cfname; qed")).collect(); + cfs = cfnames + .iter() + .map(|n| db.cf_handle(n).expect("rocksdb opens a cf_handle for each cfname; qed")) + .collect(); db - }, + } } - }, - Err(s) => { - return Err(other_io_err(s)) } + Err(s) => return Err(other_io_err(s)), }; let num_cols = cfs.len(); Ok(Database { - db: RwLock::new(Some(DBAndColumns{ db: db, cfs: cfs })), + db: RwLock::new(Some(DBAndColumns { db: db, cfs: cfs })), config: config.clone(), write_opts: write_opts, overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), @@ -399,13 +405,13 @@ impl Database { DBOp::Insert { col, key, value } => { let c = Self::to_overlay_column(col); overlay[c].insert(key, KeyState::Insert(value)); - }, + } DBOp::Delete { col, key } => { let c = Self::to_overlay_column(col); overlay[c].insert(key, KeyState::Delete); - }, + } } - }; + } } /// Commit buffered changes to database. Must be called under `flush_lock` @@ -424,30 +430,28 @@ impl Database { } else { batch.delete(key).map_err(other_io_err)?; } - }, + } KeyState::Insert(ref value) => { if c > 0 { batch.put_cf(cfs[c - 1], key, value).map_err(other_io_err)?; } else { batch.put(key, value).map_err(other_io_err)?; } - }, + } } } } } - check_for_corruption( - &self.path, - db.write_opt(batch, &self.write_opts))?; + check_for_corruption(&self.path, db.write_opt(batch, &self.write_opts))?; for column in self.flushing.write().iter_mut() { column.clear(); column.shrink_to_fit(); } Ok(()) - }, - None => Err(other_io_err("Database is closed")) + } + None => Err(other_io_err("Database is closed")), } } @@ -458,7 +462,7 @@ impl Database { // The value inside the lock is used to detect that. if *lock { // This can only happen if another flushing thread is terminated unexpectedly. - return Err(other_io_err("Database write failure. Running low on memory perhaps?")) + return Err(other_io_err("Database write failure. Running low on memory perhaps?")); } *lock = true; let result = self.write_flushing_with_lock(&mut lock); @@ -484,12 +488,12 @@ impl Database { DBOp::Delete { col, key } => match col { None => batch.delete(&key).map_err(other_io_err)?, Some(c) => batch.delete_cf(cfs[c as usize], &key).map_err(other_io_err)?, - } + }, } } check_for_corruption(&self.path, db.write_opt(batch, &self.write_opts)) - }, + } None => Err(other_io_err("Database is closed")), } } @@ -507,16 +511,19 @@ impl Database { match flushing.get(key) { Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), Some(&KeyState::Delete) => Ok(None), - None => { - col.map_or_else( + None => col + .map_or_else( || db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))), - |c| db.get_cf_opt(cfs[c as usize], key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v)))) - .map_err(other_io_err) - }, + |c| { + db.get_cf_opt(cfs[c as usize], key, &self.read_opts) + .map(|r| r.map(|v| DBValue::from_slice(&v))) + }, + ) + .map_err(other_io_err), } - }, + } } - }, + } None => Ok(None), } } @@ -527,8 +534,14 @@ impl Database { self.iter_from_prefix(col, prefix).and_then(|mut iter| { match iter.next() { // TODO: use prefix_same_as_start read option (not available in C API currently) - Some((k, v)) => if k[0 .. prefix.len()] == prefix[..] { Some(v) } else { None }, - _ => None + Some((k, v)) => { + if k[0..prefix.len()] == prefix[..] { + Some(v) + } else { + None + } + } + _ => None, } }) } @@ -538,25 +551,27 @@ impl Database { match *self.db.read() { Some(DBAndColumns { ref db, ref cfs }) => { let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; - let mut overlay_data = overlay.iter() + let mut overlay_data = overlay + .iter() .filter_map(|(k, v)| match *v { - KeyState::Insert(ref value) => - Some((k.clone().into_vec().into_boxed_slice(), value.clone().into_vec().into_boxed_slice())), + KeyState::Insert(ref value) => { + Some((k.clone().into_vec().into_boxed_slice(), value.clone().into_vec().into_boxed_slice())) + } KeyState::Delete => None, - }).collect::>(); + }) + .collect::>(); overlay_data.sort(); let iter = col.map_or_else( || db.iterator_opt(IteratorMode::Start, &self.read_opts), - |c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts) - .expect("iterator params are valid; qed") + |c| { + db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts) + .expect("iterator params are valid; qed") + }, ); - Some(DatabaseIterator { - iter: interleave_ordered(overlay_data, iter), - _marker: PhantomData, - }) - }, + Some(DatabaseIterator { iter: interleave_ordered(overlay_data, iter), _marker: PhantomData }) + } None => None, } } @@ -564,15 +579,20 @@ impl Database { fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Option> { match *self.db.read() { Some(DBAndColumns { ref db, ref cfs }) => { - let iter = col.map_or_else(|| db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts), - |c| db.iterator_cf_opt(cfs[c as usize], IteratorMode::From(prefix, Direction::Forward), &self.read_opts) - .expect("iterator params are valid; qed")); - - Some(DatabaseIterator { - iter: interleave_ordered(Vec::new(), iter), - _marker: PhantomData, - }) - }, + let iter = col.map_or_else( + || db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts), + |c| { + db.iterator_cf_opt( + cfs[c as usize], + IteratorMode::From(prefix, Direction::Forward), + &self.read_opts, + ) + .expect("iterator params are valid; qed") + }, + ); + + Some(DatabaseIterator { iter: interleave_ordered(Vec::new(), iter), _marker: PhantomData }) + } None => None, } } @@ -593,17 +613,20 @@ impl Database { Ok(_) => { // ignore errors let _ = fs::remove_dir_all(new_db); - }, + } Err(err) => { debug!("DB atomic swap failed: {}", err); match swap_nonatomic(new_db, &self.path) { Ok(_) => { // ignore errors let _ = fs::remove_dir_all(new_db); - }, + } Err(err) => { warn!("Failed to swap DB directories: {:?}", err); - return Err(io::Error::new(io::ErrorKind::Other, "DB restoration failed: could not swap DB directories")); + return Err(io::Error::new( + io::ErrorKind::Other, + "DB restoration failed: could not swap DB directories", + )); } } } @@ -619,8 +642,10 @@ impl Database { /// The number of non-default column families. pub fn num_columns(&self) -> u32 { - self.db.read().as_ref() - .and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) } ) + self.db + .read() + .as_ref() + .and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) }) .map(|n| n as u32) .unwrap_or(0) } @@ -635,7 +660,7 @@ impl Database { db.drop_cf(&name).map_err(other_io_err)?; } Ok(()) - }, + } None => Ok(()), } } @@ -648,7 +673,7 @@ impl Database { let name = format!("col{}", col); cfs.push(db.create_cf(&name, &col_config(&self.config, &self.block_opts)?).map_err(other_io_err)?); Ok(()) - }, + } None => Ok(()), } } @@ -677,14 +702,16 @@ impl KeyValueDB for Database { Database::flush(self) } - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { let unboxed = Database::iter(self, col); Box::new(unboxed.into_iter().flat_map(|inner| inner)) } - fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) - -> Box, Box<[u8]>)> + 'a> - { + fn iter_from_prefix<'a>( + &'a self, + col: Option, + prefix: &'a [u8], + ) -> Box, Box<[u8]>)> + 'a> { let unboxed = Database::iter_from_prefix(self, col, prefix); Box::new(unboxed.into_iter().flat_map(|inner| inner)) } @@ -703,10 +730,10 @@ impl Drop for Database { #[cfg(test)] mod tests { + use super::*; + use ethereum_types::H256; use std::str::FromStr; use tempdir::TempDir; - use ethereum_types::H256; - use super::*; fn test_db(config: &DatabaseConfig) { let tempdir = TempDir::new("").unwrap(); @@ -773,7 +800,13 @@ mod tests { fn df_to_rotational() { use std::path::PathBuf; // Example df output. - let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10]; + let example_df = vec![ + 70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, + 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, + 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, + 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, + 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10, + ]; let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational")); assert_eq!(rotational_from_df_output(example_df), expected_output); } diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md new file mode 100644 index 000000000..437f1ba3e --- /dev/null +++ b/kvdb-web/CHANGELOG.md @@ -0,0 +1,11 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.1.1] - 2019-10-24 +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/kvdb-web/src/error.rs b/kvdb-web/src/error.rs index f45295dc9..cd3916174 100644 --- a/kvdb-web/src/error.rs +++ b/kvdb-web/src/error.rs @@ -18,7 +18,6 @@ use std::fmt; - /// An error that occurred when working with IndexedDB. #[derive(Clone, PartialEq, Debug)] pub enum Error { @@ -48,11 +47,7 @@ impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Error::WindowNotAvailable => write!(f, "Accessing a Window has failed"), - Error::NotSupported(ref err) => write!( - f, - "IndexedDB is not supported by your browser: {}", - err, - ), + Error::NotSupported(ref err) => write!(f, "IndexedDB is not supported by your browser: {}", err,), Error::__Nonexhaustive => unreachable!(), } } diff --git a/kvdb-web/src/indexed_db.rs b/kvdb-web/src/indexed_db.rs index 09664a267..99b06569e 100644 --- a/kvdb-web/src/indexed_db.rs +++ b/kvdb-web/src/indexed_db.rs @@ -16,24 +16,19 @@ //! Utility functions to interact with IndexedDB browser API. -use wasm_bindgen::{JsCast, JsValue, closure::Closure}; -use web_sys::{ - IdbDatabase, IdbRequest, IdbOpenDbRequest, - Event, IdbCursorWithValue, - IdbTransactionMode, -}; -use js_sys::{Array, Uint8Array, ArrayBuffer}; +use js_sys::{Array, ArrayBuffer, Uint8Array}; +use wasm_bindgen::{closure::Closure, JsCast, JsValue}; +use web_sys::{Event, IdbCursorWithValue, IdbDatabase, IdbOpenDbRequest, IdbRequest, IdbTransactionMode}; use futures::channel; use futures::prelude::*; use kvdb::{DBOp, DBTransaction}; -use std::ops::Deref; use log::{debug, warn}; +use std::ops::Deref; - -use crate::{Column, error::Error}; +use crate::{error::Error, Column}; pub struct IndexedDB { pub version: u32, @@ -58,8 +53,7 @@ pub fn open(name: &str, version: Option, columns: u32) -> impl Future idb_factory.open_with_u32(name, version) - .expect("TypeError is not possible with Rust; qed"), + Some(version) => idb_factory.open_with_u32(name, version).expect("TypeError is not possible with Rust; qed"), None => idb_factory.open(name).expect("TypeError is not possible with Rust; qed"), }; @@ -70,9 +64,7 @@ pub fn open(name: &str, version: Option, columns: u32) -> impl Future().expect("Event target is IdbRequest; qed"); - let result = req - .result() - .expect("IndexedDB.onsuccess should have a valid result; qed"); + let result = req.result().expect("IndexedDB.onsuccess should have a valid result; qed"); assert!(result.is_instance_of::()); let db = IdbDatabase::from(result); @@ -81,18 +73,12 @@ pub fn open(name: &str, version: Option, columns: u32) -> impl Future String { @@ -103,7 +89,6 @@ fn column_to_number(column: Column) -> u32 { column.map(|c| c + 1).unwrap_or_default() } - // Returns js objects representing store names for each column fn store_names_js(columns: u32) -> Array { let column_names = (0..=columns).map(store_name); @@ -141,23 +126,23 @@ fn try_create_missing_stores(req: &IdbOpenDbRequest, columns: u32, version: Opti } /// Commit a transaction to the IndexedDB. -pub fn idb_commit_transaction( - idb: &IdbDatabase, - txn: &DBTransaction, - columns: u32, -) -> impl Future { +pub fn idb_commit_transaction(idb: &IdbDatabase, txn: &DBTransaction, columns: u32) -> impl Future { let store_names_js = store_names_js(columns); // Create a transaction let mode = IdbTransactionMode::Readwrite; - let idb_txn = idb.transaction_with_str_sequence_and_mode(&store_names_js, mode) + let idb_txn = idb + .transaction_with_str_sequence_and_mode(&store_names_js, mode) .expect("The provided mode and store names are valid; qed"); // Open object stores (columns) - let object_stores = (0..=columns).map(|n| { - idb_txn.object_store(store_name(n).as_str()) - .expect("Object stores were created in try_create_object_stores; qed") - }).collect::>(); + let object_stores = (0..=columns) + .map(|n| { + idb_txn + .object_store(store_name(n).as_str()) + .expect("Object stores were created in try_create_object_stores; qed") + }) + .collect::>(); for op in &txn.ops { match op { @@ -173,7 +158,7 @@ pub fn idb_commit_transaction( if let Err(err) = res { warn!("error inserting key/values into col_{}: {:?}", column, err); } - }, + } DBOp::Delete { col, key } => { let column = column_to_number(*col) as usize; @@ -185,7 +170,7 @@ pub fn idb_commit_transaction( if let Err(err) = res { warn!("error deleting key from col_{}: {:?}", column, err); } - }, + } } } @@ -206,14 +191,12 @@ pub fn idb_commit_transaction( rx.map(|_| ()) } - /// Returns a cursor to a database column with the given column number. pub fn idb_cursor(idb: &IdbDatabase, col: u32) -> impl Stream, Vec)> { // TODO: we could read all the columns in one db transaction let store_name = store_name(col); let store_name = store_name.as_str(); - let txn = idb.transaction_with_str(store_name) - .expect("The stores were created on open: {}; qed"); + let txn = idb.transaction_with_str(store_name).expect("The stores were created on open: {}; qed"); let store = txn.object_store(store_name).expect("Opening a store shouldn't fail; qed"); let cursor = store.open_cursor().expect("Opening a cursor shouldn't fail; qed"); diff --git a/kvdb-web/src/lib.rs b/kvdb-web/src/lib.rs index f966f69ce..bc9687ed4 100644 --- a/kvdb-web/src/lib.rs +++ b/kvdb-web/src/lib.rs @@ -24,12 +24,12 @@ mod error; mod indexed_db; +use kvdb::{DBTransaction, DBValue}; +use kvdb_memorydb::{self as in_memory, InMemory}; +use send_wrapper::SendWrapper; use std::io; use std::rc::Rc; use std::sync::Mutex; -use kvdb::{DBValue, DBTransaction}; -use kvdb_memorydb::{InMemory, self as in_memory}; -use send_wrapper::SendWrapper; pub use error::Error; pub use kvdb::KeyValueDB; @@ -54,7 +54,6 @@ fn number_to_column(col: u32) -> Column { col.checked_sub(1) } - impl Database { /// Opens the database with the given name, /// and the specified number of columns (not including the default one). @@ -62,58 +61,67 @@ impl Database { // let's try to open the latest version of the db first let open_request = indexed_db::open(name.as_str(), None, columns); let name_clone = name.clone(); - open_request.then(move |db| { - let db = match db { - Ok(db) => db, - Err(err) => return future::Either::Right(future::err(err)), - }; - - // If we need more column than the latest version has, - // then bump the version (+ 1 for the default column). - // In order to bump the version, we close the database - // and reopen it with a higher version than it was opened with previously. - // cf. https://github.com/paritytech/parity-common/pull/202#discussion_r321221751 - if columns + 1 > db.columns { - let next_version = db.version + 1; - drop(db); - future::Either::Left(indexed_db::open(name.as_str(), Some(next_version), columns).boxed()) - } else { - future::Either::Left(future::ok(db).boxed()) - } - // populate the in_memory db from the IndexedDB - }).then(move |db| { - let db = match db { - Ok(db) => db, - Err(err) => return future::Either::Right(future::err(err)), - }; - - let indexed_db::IndexedDB { version, inner, .. } = db; - let rc = Rc::new(inner.take()); - let weak = Rc::downgrade(&rc); - // read the columns from the IndexedDB - future::Either::Left(stream::iter(0..=columns).map(move |n| { - let db = weak.upgrade().expect("rc should live at least as long; qed"); - indexed_db::idb_cursor(&db, n).fold(DBTransaction::new(), move |mut txn, (key, value)| { - let column = number_to_column(n); - txn.put_vec(column, key.as_ref(), value); - future::ready(txn) - }) - // write each column into memory - }).fold(in_memory::create(columns), |m, txn| { - txn.then(|txn| { - m.write_buffered(txn); - future::ready(m) - }) - }).then(move |in_memory| future::ok(Database { - name: name_clone, - version, - columns, - in_memory, - indexed_db: Mutex::new(SendWrapper::new( - Rc::try_unwrap(rc).expect("should have only 1 ref at this point; qed") - )), - }))) - }) + open_request + .then(move |db| { + let db = match db { + Ok(db) => db, + Err(err) => return future::Either::Right(future::err(err)), + }; + + // If we need more column than the latest version has, + // then bump the version (+ 1 for the default column). + // In order to bump the version, we close the database + // and reopen it with a higher version than it was opened with previously. + // cf. https://github.com/paritytech/parity-common/pull/202#discussion_r321221751 + if columns + 1 > db.columns { + let next_version = db.version + 1; + drop(db); + future::Either::Left(indexed_db::open(name.as_str(), Some(next_version), columns).boxed()) + } else { + future::Either::Left(future::ok(db).boxed()) + } + // populate the in_memory db from the IndexedDB + }) + .then(move |db| { + let db = match db { + Ok(db) => db, + Err(err) => return future::Either::Right(future::err(err)), + }; + + let indexed_db::IndexedDB { version, inner, .. } = db; + let rc = Rc::new(inner.take()); + let weak = Rc::downgrade(&rc); + // read the columns from the IndexedDB + future::Either::Left( + stream::iter(0..=columns) + .map(move |n| { + let db = weak.upgrade().expect("rc should live at least as long; qed"); + indexed_db::idb_cursor(&db, n).fold(DBTransaction::new(), move |mut txn, (key, value)| { + let column = number_to_column(n); + txn.put_vec(column, key.as_ref(), value); + future::ready(txn) + }) + // write each column into memory + }) + .fold(in_memory::create(columns), |m, txn| { + txn.then(|txn| { + m.write_buffered(txn); + future::ready(m) + }) + }) + .then(move |in_memory| { + future::ok(Database { + name: name_clone, + version, + columns, + in_memory, + indexed_db: Mutex::new(SendWrapper::new( + Rc::try_unwrap(rc).expect("should have only 1 ref at this point; qed"), + )), + }) + }), + ) + }) } /// Get the database name. @@ -156,14 +164,16 @@ impl KeyValueDB for Database { } // NOTE: clones the whole db - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { self.in_memory.iter(col) } // NOTE: clones the whole db - fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) - -> Box, Box<[u8]>)> + 'a> - { + fn iter_from_prefix<'a>( + &'a self, + col: Option, + prefix: &'a [u8], + ) -> Box, Box<[u8]>)> + 'a> { self.in_memory.iter_from_prefix(col, prefix) } diff --git a/kvdb-web/tests/indexed_db.rs b/kvdb-web/tests/indexed_db.rs index 1286faf66..0824a5760 100644 --- a/kvdb-web/tests/indexed_db.rs +++ b/kvdb-web/tests/indexed_db.rs @@ -21,8 +21,8 @@ use futures::future::{self, FutureExt as _, TryFutureExt as _}; use kvdb_web::{Database, KeyValueDB as _}; -use wasm_bindgen_test::*; use wasm_bindgen::JsValue; +use wasm_bindgen_test::*; wasm_bindgen_test_configure!(run_in_browser); @@ -31,36 +31,37 @@ fn reopen_the_database_with_more_columns() -> impl futures01::Future impl future::Future { - Database::open("MyAsyncTest".into(), col) - .unwrap_or_else(|err| panic!("{}", err)) + Database::open("MyAsyncTest".into(), col).unwrap_or_else(|err| panic!("{}", err)) } - let fut = open_db(1).then(|db| { - // Write a value into the database - let mut batch = db.transaction(); - batch.put(None, b"hello", b"world"); - db.write_buffered(batch); + let fut = open_db(1) + .then(|db| { + // Write a value into the database + let mut batch = db.transaction(); + batch.put(None, b"hello", b"world"); + db.write_buffered(batch); - assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); + assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); - // Check the database version - assert_eq!(db.version(), 1); + // Check the database version + assert_eq!(db.version(), 1); - // Close the database - drop(db); + // Close the database + drop(db); - // Reopen it again with 3 columns - open_db(3) - }).map(|db| { - // The value should still be present - assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); - assert!(db.get(None, b"trash").unwrap().is_none()); + // Reopen it again with 3 columns + open_db(3) + }) + .map(|db| { + // The value should still be present + assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); + assert!(db.get(None, b"trash").unwrap().is_none()); - // The version should be bumped - assert_eq!(db.version(), 2); + // The version should be bumped + assert_eq!(db.version(), 2); - Ok(()) - }); + Ok(()) + }); compat::Compat::new(fut) } diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md new file mode 100644 index 000000000..565fdccf3 --- /dev/null +++ b/kvdb/CHANGELOG.md @@ -0,0 +1,13 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.1.1] - 2019-10-24 +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +### Changed +- Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/205) diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 93a4902fd..46c53e5f0 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -16,11 +16,11 @@ //! Key-Value store abstraction with `RocksDB` backend. +use bytes::Bytes; +use elastic_array::{ElasticArray128, ElasticArray32}; use std::io; use std::path::Path; use std::sync::Arc; -use elastic_array::{ElasticArray128, ElasticArray32}; -use bytes::Bytes; /// Required length of prefixes. pub const PREFIX_LEN: usize = 12; @@ -38,15 +38,8 @@ pub struct DBTransaction { /// Database operation. #[derive(Clone, PartialEq)] pub enum DBOp { - Insert { - col: Option, - key: ElasticArray32, - value: DBValue, - }, - Delete { - col: Option, - key: ElasticArray32, - } + Insert { col: Option, key: ElasticArray32, value: DBValue }, + Delete { col: Option, key: ElasticArray32 }, } impl DBOp { @@ -75,41 +68,28 @@ impl DBTransaction { /// Create new transaction with capacity. pub fn with_capacity(cap: usize) -> DBTransaction { - DBTransaction { - ops: Vec::with_capacity(cap) - } + DBTransaction { ops: Vec::with_capacity(cap) } } /// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write. pub fn put(&mut self, col: Option, key: &[u8], value: &[u8]) { let mut ekey = ElasticArray32::new(); ekey.append_slice(key); - self.ops.push(DBOp::Insert { - col: col, - key: ekey, - value: DBValue::from_slice(value), - }); + self.ops.push(DBOp::Insert { col: col, key: ekey, value: DBValue::from_slice(value) }); } /// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write. pub fn put_vec(&mut self, col: Option, key: &[u8], value: Bytes) { let mut ekey = ElasticArray32::new(); ekey.append_slice(key); - self.ops.push(DBOp::Insert { - col: col, - key: ekey, - value: DBValue::from_vec(value), - }); + self.ops.push(DBOp::Insert { col: col, key: ekey, value: DBValue::from_vec(value) }); } /// Delete value by key. pub fn delete(&mut self, col: Option, key: &[u8]) { let mut ekey = ElasticArray32::new(); ekey.append_slice(key); - self.ops.push(DBOp::Delete { - col: col, - key: ekey, - }); + self.ops.push(DBOp::Delete { col: col, key: ekey }); } } @@ -133,7 +113,9 @@ impl DBTransaction { /// implementation. pub trait KeyValueDB: Sync + Send { /// Helper to create a new transaction. - fn transaction(&self) -> DBTransaction { DBTransaction::new() } + fn transaction(&self) -> DBTransaction { + DBTransaction::new() + } /// Get a value by key. fn get(&self, col: Option, key: &[u8]) -> io::Result>; @@ -154,12 +136,14 @@ pub trait KeyValueDB: Sync + Send { fn flush(&self) -> io::Result<()>; /// Iterate over flushed data for a given column. - fn iter<'a>(&'a self, col: Option) - -> Box, Box<[u8]>)> + 'a>; + fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a>; /// Iterate over flushed data for a given column, starting from a given prefix. - fn iter_from_prefix<'a>(&'a self, col: Option, prefix: &'a [u8]) - -> Box, Box<[u8]>)> + 'a>; + fn iter_from_prefix<'a>( + &'a self, + col: Option, + prefix: &'a [u8], + ) -> Box, Box<[u8]>)> + 'a>; /// Attempt to replace this database with a new one located at the given path. fn restore(&self, new_db: &str) -> io::Result<()>; diff --git a/parity-bytes/CHANGELOG.md b/parity-bytes/CHANGELOG.md new file mode 100644 index 000000000..cc79bd068 --- /dev/null +++ b/parity-bytes/CHANGELOG.md @@ -0,0 +1,13 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.1.1] - 2019-10-24 +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +### Added +- Added no-std support (https://github.com/paritytech/parity-common/pull/154) diff --git a/parity-bytes/src/lib.rs b/parity-bytes/src/lib.rs index 41bfd6134..292a6e160 100644 --- a/parity-bytes/src/lib.rs +++ b/parity-bytes/src/lib.rs @@ -75,7 +75,7 @@ pub enum BytesRef<'a> { /// This is a reference to a vector Flexible(&'a mut Bytes), /// This is a reference to a slice - Fixed(&'a mut [u8]) + Fixed(&'a mut [u8]), } impl<'a> BytesRef<'a> { @@ -91,13 +91,13 @@ impl<'a> BytesRef<'a> { data.resize(offset, 0); data.extend_from_slice(input); wrote - }, + } BytesRef::Fixed(ref mut data) if offset < data.len() => { let max = min(data.len() - offset, input.len()); data[offset..(max + offset)].copy_from_slice(&input[..max]); max - }, - _ => 0 + } + _ => 0, } } } @@ -127,9 +127,9 @@ pub type Bytes = Vec; #[cfg(test)] mod tests { + use super::BytesRef; #[cfg(not(feature = "std"))] use alloc::vec; - use super::BytesRef; #[test] fn should_write_bytes_to_fixed_bytesref() { diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md new file mode 100644 index 000000000..927c9dc9c --- /dev/null +++ b/parity-crypto/CHANGELOG.md @@ -0,0 +1,7 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] diff --git a/parity-crypto/benches/bench.rs b/parity-crypto/benches/bench.rs index aa2e0fb78..e0830bb98 100644 --- a/parity-crypto/benches/bench.rs +++ b/parity-crypto/benches/bench.rs @@ -17,44 +17,40 @@ #[macro_use] extern crate criterion; -use criterion::{Criterion, Bencher}; use crate::parity_crypto::publickey::Generator; +use criterion::{Bencher, Criterion}; -criterion_group!( - benches, - input_len, - ecdh_agree, -); +criterion_group!(benches, input_len, ecdh_agree,); criterion_main!(benches); /// general benches for multiple input size fn input_len(c: &mut Criterion) { - - c.bench_function_over_inputs("ripemd", + c.bench_function_over_inputs( + "ripemd", |b: &mut Bencher, size: &usize| { let data = vec![0u8; *size]; b.iter(|| parity_crypto::digest::ripemd160(&data[..])); }, - vec![100, 500, 1_000, 10_000, 100_000] + vec![100, 500, 1_000, 10_000, 100_000], ); - c.bench_function_over_inputs("aes_ctr", + c.bench_function_over_inputs( + "aes_ctr", |b: &mut Bencher, size: &usize| { let data = vec![0u8; *size]; let mut dest = vec![0; *size]; let k = [0; 16]; let iv = [0; 16]; - b.iter(||{ + b.iter(|| { parity_crypto::aes::encrypt_128_ctr(&k[..], &iv[..], &data[..], &mut dest[..]).unwrap(); // same as encrypt but add it just in case parity_crypto::aes::decrypt_128_ctr(&k[..], &iv[..], &data[..], &mut dest[..]).unwrap(); }); }, - vec![100, 500, 1_000, 10_000, 100_000] + vec![100, 500, 1_000, 10_000, 100_000], ); - } fn ecdh_agree(c: &mut Criterion) { @@ -63,4 +59,4 @@ fn ecdh_agree(c: &mut Criterion) { let secret = keypair.secret().clone(); c.bench_function("ecdh_agree", move |b| b.iter(|| parity_crypto::publickey::ecdh::agree(&secret, &public))); -} \ No newline at end of file +} diff --git a/parity-crypto/src/aes.rs b/parity-crypto/src/aes.rs index e13300524..de643dceb 100644 --- a/parity-crypto/src/aes.rs +++ b/parity-crypto/src/aes.rs @@ -14,24 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use aes::block_cipher_trait::generic_array::GenericArray; +use aes::{Aes128, Aes256}; +use aes_ctr::stream_cipher::{NewStreamCipher, SyncStreamCipher}; use block_modes::{ - BlockMode, - Cbc, - Ecb, - block_padding::{Pkcs7, ZeroPadding} + block_padding::{Pkcs7, ZeroPadding}, + BlockMode, Cbc, Ecb, }; -use aes_ctr::stream_cipher::{ NewStreamCipher, SyncStreamCipher }; -use aes::{ Aes128, Aes256 }; -use aes::block_cipher_trait::generic_array::GenericArray; use crate::error::SymmError; - /// One time encoder/decoder for Ecb mode Aes256 with zero padding pub struct AesEcb256(Ecb); impl AesEcb256 { - /// New encoder/decoder, no iv for ecb pub fn new(key: &[u8]) -> Result { Ok(AesEcb256(Ecb::new_var(key, &[])?)) @@ -40,7 +36,7 @@ impl AesEcb256 { /// Encrypt data in place without padding. The data length must be a multiple /// of the block size. pub fn encrypt(self, content: &mut [u8]) -> Result<(), SymmError> { - let len = content.len(); + let len = content.len(); self.0.encrypt(content, len)?; Ok(()) } @@ -53,29 +49,25 @@ impl AesEcb256 { } } - /// Reusable encoder/decoder for Aes256 in Ctr mode and no padding pub struct AesCtr256(aes_ctr::Aes256Ctr); impl AesCtr256 { - /// New encoder/decoder pub fn new(key: &[u8], iv: &[u8]) -> Result { - Ok(AesCtr256( - aes_ctr::Aes256Ctr::new(GenericArray::from_slice(key), GenericArray::from_slice(iv)) - )) + Ok(AesCtr256(aes_ctr::Aes256Ctr::new(GenericArray::from_slice(key), GenericArray::from_slice(iv)))) } /// In place encrypt a content without padding, the content length must be a multiple /// of the block size. - pub fn encrypt(&mut self, content: &mut[u8]) -> Result<(), SymmError> { + pub fn encrypt(&mut self, content: &mut [u8]) -> Result<(), SymmError> { self.0.try_apply_keystream(content)?; Ok(()) } /// In place decrypt a content without padding, the content length must be a multiple /// of the block size. - pub fn decrypt(&mut self, content: &mut[u8]) -> Result<(), SymmError> { + pub fn decrypt(&mut self, content: &mut [u8]) -> Result<(), SymmError> { self.0.try_apply_keystream(content)?; Ok(()) } @@ -87,14 +79,10 @@ impl AesCtr256 { /// An error is returned if the input lengths are invalid. /// If possible prefer `inplace_encrypt_128_ctr` to avoid a slice copy. pub fn encrypt_128_ctr(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new( - GenericArray::from_slice(k), - GenericArray::from_slice(iv), - ); + let mut encryptor = aes_ctr::Aes128Ctr::new(GenericArray::from_slice(k), GenericArray::from_slice(iv)); &mut dest[..plain.len()].copy_from_slice(plain); encryptor.try_apply_keystream(dest)?; Ok(()) - } /// Encrypt a message (CTR mode). @@ -102,13 +90,9 @@ pub fn encrypt_128_ctr(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) -> Re /// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. /// An error is returned if the input lengths are invalid. pub fn inplace_encrypt_128_ctr(k: &[u8], iv: &[u8], data: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new( - GenericArray::from_slice(k), - GenericArray::from_slice(iv), - ); + let mut encryptor = aes_ctr::Aes128Ctr::new(GenericArray::from_slice(k), GenericArray::from_slice(iv)); encryptor.try_apply_keystream(data)?; Ok(()) - } /// Decrypt a message (CTR mode). @@ -117,10 +101,7 @@ pub fn inplace_encrypt_128_ctr(k: &[u8], iv: &[u8], data: &mut [u8]) -> Result<( /// An error is returned if the input lengths are invalid. /// If possible prefer `inplace_decrypt_128_ctr` instead. pub fn decrypt_128_ctr(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new( - GenericArray::from_slice(k), - GenericArray::from_slice(iv), - ); + let mut encryptor = aes_ctr::Aes128Ctr::new(GenericArray::from_slice(k), GenericArray::from_slice(iv)); &mut dest[..encrypted.len()].copy_from_slice(encrypted); encryptor.try_apply_keystream(dest)?; @@ -132,16 +113,12 @@ pub fn decrypt_128_ctr(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) - /// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. /// An error is returned if the input lengths are invalid. pub fn inplace_decrypt_128_ctr(k: &[u8], iv: &[u8], data: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new( - GenericArray::from_slice(k), - GenericArray::from_slice(iv), - ); + let mut encryptor = aes_ctr::Aes128Ctr::new(GenericArray::from_slice(k), GenericArray::from_slice(iv)); encryptor.try_apply_keystream(data)?; Ok(()) } - /// Decrypt a message (CBC mode). /// /// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. @@ -149,13 +126,10 @@ pub fn inplace_decrypt_128_ctr(k: &[u8], iv: &[u8], data: &mut [u8]) -> Result<( pub fn decrypt_128_cbc(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result { let encryptor = Cbc::::new_var(k, iv)?; &mut dest[..encrypted.len()].copy_from_slice(encrypted); - let unpad_length = { - encryptor.decrypt(&mut dest[..encrypted.len()])?.len() - }; + let unpad_length = { encryptor.decrypt(&mut dest[..encrypted.len()])?.len() }; Ok(unpad_length) } - #[cfg(test)] mod tests { @@ -170,27 +144,39 @@ mod tests { } #[test] - pub fn test_aes_short() -> Result<(),SymmError> { - let key = [97, 110, 121, 99, 111, 110, 116, 101, 110, 116, 116, 111, 114, 101, 97, 99, 104, 49, 50, 56, 98, 105, 116, 115, 105, 122, 101, 10]; - let salt = [109, 121, 115, 97, 108, 116, 115, 104, 111, 117, 108, 100, 102, 105, 108, 108, 115, 111, 109, 109, 101, 98, 121, 116, 101, 108, 101, 110, 103, 116, 104, 10]; - let content = [83, 111, 109, 101, 32, 99, 111, 110, 116, 101, 110, 116, 32, 116, 111, 32, 116, 101, 115, 116, - 32, 97, 101, 115, 44, 10, 110, 111, 116, 32, 116, 111, 32, 109, 117, 99, 104, 32, 44, 32, 111, 110, 108, 121, - 32, 118, 101, 114, 121, 32, 98, 97, 115, 105, 99, 32, 116, 101, 115, 116, 32, 116, 111, 32, 97, 118, 111, 105, - 100, 32, 111, 98, 118, 105, 111, 117, 115, 32, 114, 101, 103, 114, 101, 115, 115, 105, 111, 110, 32, 119, 104, - 101, 110, 32, 115, 119, 105, 116, 99, 104, 105, 110, 103, 32, 108, 105, 98, 115, 46, 10]; - let ctr_enc = [65, 55, 246, 75, 24, 117, 30, 233, 218, 139, 91, 251, 251, 179, 171, 69, 60, 244, 249, 44, 238, 60, - 10, 66, 71, 10, 199, 111, 54, 24, 124, 223, 153, 250, 159, 154, 164, 109, 232, 82, 20, 199, 182, 40, 174, 104, 64, - 203, 236, 94, 222, 184, 117, 54, 234, 189, 253, 122, 135, 121, 100, 44, 227, 241, 123, 120, 110, 188, 109, 148, 112, - 160, 131, 205, 116, 104, 232, 8, 22, 170, 80, 231, 155, 246, 255, 115, 101, 5, 234, 104, 220, 199, 192, 166, 181, 156, - 113, 255, 187, 51, 38, 128, 75, 29, 237, 178, 205, 98, 101, 110]; - let cbc_enc = [167, 248, 5, 90, 11, 140, 215, 138, 165, 125, 137, 76, 47, 243, 191, 48, 183, 247, 109, 86, 24, 45, - 81, 215, 0, 51, 221, 185, 131, 97, 234, 189, 244, 255, 107, 210, 70, 60, 41, 221, 43, 137, 185, 166, 42, 65, 18, 200, - 151, 233, 255, 192, 109, 25, 105, 115, 161, 209, 126, 235, 99, 192, 241, 241, 19, 249, 87, 244, 28, 146, 186, 189, 108, - 9, 243, 132, 4, 105, 53, 162, 8, 235, 84, 107, 213, 59, 158, 113, 227, 120, 162, 50, 237, 123, 70, 187, 83, 73, 146, 13, - 44, 191, 53, 4, 125, 207, 176, 45, 8, 153, 175, 198]; - let mut dest = vec![0;110]; - let mut dest_padded = vec![0;112]; - let mut dest_padded2 = vec![0;128]; // TODO RustLib need an extra 16bytes in dest : looks extra buggy but function is not currently use (keep it private for now) + pub fn test_aes_short() -> Result<(), SymmError> { + let key = [ + 97, 110, 121, 99, 111, 110, 116, 101, 110, 116, 116, 111, 114, 101, 97, 99, 104, 49, 50, 56, 98, 105, 116, + 115, 105, 122, 101, 10, + ]; + let salt = [ + 109, 121, 115, 97, 108, 116, 115, 104, 111, 117, 108, 100, 102, 105, 108, 108, 115, 111, 109, 109, 101, 98, + 121, 116, 101, 108, 101, 110, 103, 116, 104, 10, + ]; + let content = [ + 83, 111, 109, 101, 32, 99, 111, 110, 116, 101, 110, 116, 32, 116, 111, 32, 116, 101, 115, 116, 32, 97, 101, + 115, 44, 10, 110, 111, 116, 32, 116, 111, 32, 109, 117, 99, 104, 32, 44, 32, 111, 110, 108, 121, 32, 118, + 101, 114, 121, 32, 98, 97, 115, 105, 99, 32, 116, 101, 115, 116, 32, 116, 111, 32, 97, 118, 111, 105, 100, + 32, 111, 98, 118, 105, 111, 117, 115, 32, 114, 101, 103, 114, 101, 115, 115, 105, 111, 110, 32, 119, 104, + 101, 110, 32, 115, 119, 105, 116, 99, 104, 105, 110, 103, 32, 108, 105, 98, 115, 46, 10, + ]; + let ctr_enc = [ + 65, 55, 246, 75, 24, 117, 30, 233, 218, 139, 91, 251, 251, 179, 171, 69, 60, 244, 249, 44, 238, 60, 10, 66, + 71, 10, 199, 111, 54, 24, 124, 223, 153, 250, 159, 154, 164, 109, 232, 82, 20, 199, 182, 40, 174, 104, 64, + 203, 236, 94, 222, 184, 117, 54, 234, 189, 253, 122, 135, 121, 100, 44, 227, 241, 123, 120, 110, 188, 109, + 148, 112, 160, 131, 205, 116, 104, 232, 8, 22, 170, 80, 231, 155, 246, 255, 115, 101, 5, 234, 104, 220, + 199, 192, 166, 181, 156, 113, 255, 187, 51, 38, 128, 75, 29, 237, 178, 205, 98, 101, 110, + ]; + let cbc_enc = [ + 167, 248, 5, 90, 11, 140, 215, 138, 165, 125, 137, 76, 47, 243, 191, 48, 183, 247, 109, 86, 24, 45, 81, + 215, 0, 51, 221, 185, 131, 97, 234, 189, 244, 255, 107, 210, 70, 60, 41, 221, 43, 137, 185, 166, 42, 65, + 18, 200, 151, 233, 255, 192, 109, 25, 105, 115, 161, 209, 126, 235, 99, 192, 241, 241, 19, 249, 87, 244, + 28, 146, 186, 189, 108, 9, 243, 132, 4, 105, 53, 162, 8, 235, 84, 107, 213, 59, 158, 113, 227, 120, 162, + 50, 237, 123, 70, 187, 83, 73, 146, 13, 44, 191, 53, 4, 125, 207, 176, 45, 8, 153, 175, 198, + ]; + let mut dest = vec![0; 110]; + let mut dest_padded = vec![0; 112]; + let mut dest_padded2 = vec![0; 128]; // TODO RustLib need an extra 16bytes in dest : looks extra buggy but function is not currently use (keep it private for now) encrypt_128_cbc(&key[..16], &salt[..16], &content, &mut dest_padded2)?; assert!(&dest_padded2[..112] == &cbc_enc[..]); encrypt_128_ctr(&key[..16], &salt[..16], &content, &mut dest)?; diff --git a/parity-crypto/src/digest.rs b/parity-crypto/src/digest.rs index 1851ac01d..caf57dd0c 100644 --- a/parity-crypto/src/digest.rs +++ b/parity-crypto/src/digest.rs @@ -17,7 +17,10 @@ use std::marker::PhantomData; use std::ops::Deref; -use digest::generic_array::{GenericArray, typenum::{U20, U32, U64}}; +use digest::generic_array::{ + typenum::{U20, U32, U64}, + GenericArray, +}; use sha2::Digest as RDigest; /// The message digest. @@ -74,7 +77,7 @@ pub struct Hasher(Inner, PhantomData); enum Inner { Sha256(sha2::Sha256), Sha512(sha2::Sha512), - Ripemd160(ripemd160::Ripemd160) + Ripemd160(ripemd160::Ripemd160), } impl Hasher { @@ -98,29 +101,17 @@ impl Hasher { impl Hasher { pub fn update(&mut self, data: &[u8]) { match self.0 { - Inner::Sha256(ref mut ctx) => { - ctx.input(data) - }, - Inner::Sha512(ref mut ctx) => { - ctx.input(data) - }, - Inner::Ripemd160(ref mut ctx) => { - ctx.input(data) - } + Inner::Sha256(ref mut ctx) => ctx.input(data), + Inner::Sha512(ref mut ctx) => ctx.input(data), + Inner::Ripemd160(ref mut ctx) => ctx.input(data), } } pub fn finish(self) -> Digest { match self.0 { - Inner::Sha256(ctx) => { - Digest(InnerDigest::Sha256(ctx.result()), PhantomData) - }, - Inner::Sha512(ctx) => { - Digest(InnerDigest::Sha512(ctx.result()), PhantomData) - }, - Inner::Ripemd160(ctx) => { - Digest(InnerDigest::Ripemd160(ctx.result()), PhantomData) - } + Inner::Sha256(ctx) => Digest(InnerDigest::Sha256(ctx.result()), PhantomData), + Inner::Sha512(ctx) => Digest(InnerDigest::Sha512(ctx.result()), PhantomData), + Inner::Ripemd160(ctx) => Digest(InnerDigest::Ripemd160(ctx.result()), PhantomData), } } } diff --git a/parity-crypto/src/error.rs b/parity-crypto/src/error.rs index 110c62cc5..16d67f504 100644 --- a/parity-crypto/src/error.rs +++ b/parity-crypto/src/error.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::{fmt, result, error::Error as StdError}; +use std::{error::Error as StdError, fmt, result}; #[derive(Debug)] pub enum Error { @@ -74,7 +74,7 @@ impl StdError for SymmError { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> result::Result<(), fmt::Error> { match self { - Error::Scrypt(err)=> write!(f, "scrypt error: {}", err), + Error::Scrypt(err) => write!(f, "scrypt error: {}", err), Error::Symm(err) => write!(f, "symm error: {}", err), } } @@ -103,7 +103,7 @@ impl fmt::Display for SymmError { impl Into for Error { fn into(self) -> std::io::Error { - std::io::Error::new(std::io::ErrorKind::Other, format!("Crypto error: {}",self)) + std::io::Error::new(std::io::ErrorKind::Other, format!("Crypto error: {}", self)) } } @@ -148,4 +148,3 @@ impl From for Error { Error::Symm(e) } } - diff --git a/parity-crypto/src/hmac/mod.rs b/parity-crypto/src/hmac/mod.rs index 571df7b59..2b082dd45 100644 --- a/parity-crypto/src/hmac/mod.rs +++ b/parity-crypto/src/hmac/mod.rs @@ -17,7 +17,10 @@ use std::marker::PhantomData; use std::ops::Deref; -use digest::generic_array::{GenericArray, typenum::{U32, U64}}; +use digest::generic_array::{ + typenum::{U32, U64}, + GenericArray, +}; use hmac::{Hmac, Mac as _}; use zeroize::Zeroize; @@ -77,19 +80,13 @@ enum KeyInner { impl SigKey { pub fn sha256(key: &[u8]) -> SigKey { - SigKey( - KeyInner::Sha256(DisposableBox::from_slice(key)), - PhantomData - ) + SigKey(KeyInner::Sha256(DisposableBox::from_slice(key)), PhantomData) } } impl SigKey { pub fn sha512(key: &[u8]) -> SigKey { - SigKey( - KeyInner::Sha512(DisposableBox::from_slice(key)), - PhantomData - ) + SigKey(KeyInner::Sha512(DisposableBox::from_slice(key)), PhantomData) } } @@ -111,23 +108,14 @@ enum SignerInner { impl Signer { pub fn with(key: &SigKey) -> Signer { match &key.0 { - KeyInner::Sha256(key_bytes) => { - Signer( - SignerInner::Sha256( - Hmac::::new_varkey(&key_bytes.0) - .expect("always returns Ok; qed") - ), - PhantomData - ) - }, - KeyInner::Sha512(key_bytes) => { - Signer( - SignerInner::Sha512( - Hmac::::new_varkey(&key_bytes.0) - .expect("always returns Ok; qed") - ), PhantomData - ) - }, + KeyInner::Sha256(key_bytes) => Signer( + SignerInner::Sha256(Hmac::::new_varkey(&key_bytes.0).expect("always returns Ok; qed")), + PhantomData, + ), + KeyInner::Sha512(key_bytes) => Signer( + SignerInner::Sha512(Hmac::::new_varkey(&key_bytes.0).expect("always returns Ok; qed")), + PhantomData, + ), } } @@ -151,19 +139,13 @@ pub struct VerifyKey(KeyInner, PhantomData); impl VerifyKey { pub fn sha256(key: &[u8]) -> VerifyKey { - VerifyKey( - KeyInner::Sha256(DisposableBox::from_slice(key)), - PhantomData - ) + VerifyKey(KeyInner::Sha256(DisposableBox::from_slice(key)), PhantomData) } } impl VerifyKey { pub fn sha512(key: &[u8]) -> VerifyKey { - VerifyKey( - KeyInner::Sha512(DisposableBox::from_slice(key)), - PhantomData - ) + VerifyKey(KeyInner::Sha512(DisposableBox::from_slice(key)), PhantomData) } } @@ -171,17 +153,15 @@ impl VerifyKey { pub fn verify(key: &VerifyKey, data: &[u8], sig: &[u8]) -> bool { match &key.0 { KeyInner::Sha256(key_bytes) => { - let mut ctx = Hmac::::new_varkey(&key_bytes.0) - .expect("always returns Ok; qed"); + let mut ctx = Hmac::::new_varkey(&key_bytes.0).expect("always returns Ok; qed"); ctx.input(data); ctx.verify(sig).is_ok() - }, + } KeyInner::Sha512(key_bytes) => { - let mut ctx = Hmac::::new_varkey(&key_bytes.0) - .expect("always returns Ok; qed"); + let mut ctx = Hmac::::new_varkey(&key_bytes.0).expect("always returns Ok; qed"); ctx.input(data); ctx.verify(sig).is_ok() - }, + } } } diff --git a/parity-crypto/src/hmac/test.rs b/parity-crypto/src/hmac/test.rs index 9e0c34e12..1633cf37c 100644 --- a/parity-crypto/src/hmac/test.rs +++ b/parity-crypto/src/hmac/test.rs @@ -20,10 +20,10 @@ use hex_literal::hex; #[test] fn simple_mac_and_verify() { let input = b"Some bytes"; - let big_input = vec![7u8;2000]; + let big_input = vec![7u8; 2000]; - let key1 = vec![3u8;64]; - let key2 = vec![4u8;128]; + let key1 = vec![3u8; 64]; + let key2 = vec![4u8; 128]; let sig_key1 = SigKey::sha256(&key1[..]); let sig_key2 = SigKey::sha512(&key2[..]); @@ -32,14 +32,27 @@ fn simple_mac_and_verify() { let mut signer2 = Signer::with(&sig_key2); signer1.update(&input[..]); - for i in 0 .. big_input.len() / 33 { - signer2.update(&big_input[i*33..(i+1)*33]); + for i in 0..big_input.len() / 33 { + signer2.update(&big_input[i * 33..(i + 1) * 33]); } - signer2.update(&big_input[(big_input.len() / 33)*33..]); + signer2.update(&big_input[(big_input.len() / 33) * 33..]); let sig1 = signer1.sign(); - assert_eq!(&sig1[..], [223, 208, 90, 69, 144, 95, 145, 180, 56, 155, 78, 40, 86, 238, 205, 81, 160, 245, 88, 145, 164, 67, 254, 180, 202, 107, 93, 249, 64, 196, 86, 225]); + assert_eq!( + &sig1[..], + [ + 223, 208, 90, 69, 144, 95, 145, 180, 56, 155, 78, 40, 86, 238, 205, 81, 160, 245, 88, 145, 164, 67, 254, + 180, 202, 107, 93, 249, 64, 196, 86, 225 + ] + ); let sig2 = signer2.sign(); - assert_eq!(&sig2[..], &[29, 63, 46, 122, 27, 5, 241, 38, 86, 197, 91, 79, 33, 107, 152, 195, 118, 221, 117, 119, 84, 114, 46, 65, 243, 157, 105, 12, 147, 176, 190, 37, 210, 164, 152, 8, 58, 243, 59, 206, 80, 10, 230, 197, 255, 110, 191, 180, 93, 22, 255, 0, 99, 79, 237, 229, 209, 199, 125, 83, 15, 179, 134, 89][..]); + assert_eq!( + &sig2[..], + &[ + 29, 63, 46, 122, 27, 5, 241, 38, 86, 197, 91, 79, 33, 107, 152, 195, 118, 221, 117, 119, 84, 114, 46, 65, + 243, 157, 105, 12, 147, 176, 190, 37, 210, 164, 152, 8, 58, 243, 59, 206, 80, 10, 230, 197, 255, 110, 191, + 180, 93, 22, 255, 0, 99, 79, 237, 229, 209, 199, 125, 83, 15, 179, 134, 89 + ][..] + ); assert_eq!(&sig1[..], &sign(&sig_key1, &input[..])[..]); assert_eq!(&sig2[..], &sign(&sig_key2, &big_input[..])[..]); let verif_key1 = VerifyKey::sha256(&key1[..]); @@ -48,12 +61,7 @@ fn simple_mac_and_verify() { assert!(verify(&verif_key2, &big_input[..], &sig2[..])); } -fn check_test_vector( - key: &[u8], - data: &[u8], - expected_256: &[u8], - expected_512: &[u8], -) { +fn check_test_vector(key: &[u8], data: &[u8], expected_256: &[u8], expected_512: &[u8]) { // Sha-256 let sig_key = SigKey::sha256(&key); let mut signer = Signer::with(&sig_key); @@ -62,7 +70,7 @@ fn check_test_vector( assert_eq!(&signature[..], expected_256); assert_eq!(&signature[..], &sign(&sig_key, data)[..]); let ver_key = VerifyKey::sha256(&key); - assert!(verify(&ver_key, data,&signature)); + assert!(verify(&ver_key, data, &signature)); // Sha-512 let sig_key = SigKey::sha512(&key); @@ -72,7 +80,7 @@ fn check_test_vector( assert_eq!(&signature[..], expected_512); assert_eq!(&signature[..], &sign(&sig_key, data)[..]); let ver_key = VerifyKey::sha512(&key); - assert!(verify(&ver_key, data,&signature)); + assert!(verify(&ver_key, data, &signature)); } #[test] @@ -83,64 +91,83 @@ fn ietf_test_vectors() { check_test_vector( &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"), &hex!("4869205468657265"), - &hex!(" + &hex!( + " b0344c61d8db38535ca8afceaf0bf12b - 881dc200c9833da726e9376c2e32cff7"), - &hex!(" + 881dc200c9833da726e9376c2e32cff7" + ), + &hex!( + " 87aa7cdea5ef619d4ff0b4241a1d6cb0 2379f4e2ce4ec2787ad0b30545e17cde daa833b7d6b8a702038b274eaea3f4e4 - be9d914eeb61f1702e696c203a126854") + be9d914eeb61f1702e696c203a126854" + ), ); // Test Case 2 check_test_vector( &hex!("4a656665"), &hex!("7768617420646f2079612077616e7420666f72206e6f7468696e673f"), - &hex!(" + &hex!( + " 5bdcc146bf60754e6a042426089575c7 - 5a003f089d2739839dec58b964ec3843"), - &hex!(" + 5a003f089d2739839dec58b964ec3843" + ), + &hex!( + " 164b7a7bfcf819e2e395fbe73b56e0a3 87bd64222e831fd610270cd7ea250554 9758bf75c05a994a6d034f65f8f0e6fd - caeab1a34d4a6b4b636e070a38bce737") + caeab1a34d4a6b4b636e070a38bce737" + ), ); // Test Case 3 check_test_vector( &hex!("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), &hex!("dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"), - &hex!(" + &hex!( + " 773ea91e36800e46854db8ebd09181a7 - 2959098b3ef8c122d9635514ced565fe"), - &hex!(" + 2959098b3ef8c122d9635514ced565fe" + ), + &hex!( + " fa73b0089d56a284efb0f0756c890be9 b1b5dbdd8ee81a3655f83e33b2279d39 bf3e848279a722c806b485a47e67c807 - b946a337bee8942674278859e13292fb") + b946a337bee8942674278859e13292fb" + ), ); // Test Case 4 check_test_vector( &hex!("0102030405060708090a0b0c0d0e0f10111213141516171819"), - &hex!(" + &hex!( + " cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd - cdcd"), - &hex!(" + cdcd" + ), + &hex!( + " 82558a389a443c0ea4cc819899f2083a - 85f0faa3e578f8077a2e3ff46729665b"), - &hex!(" + 85f0faa3e578f8077a2e3ff46729665b" + ), + &hex!( + " b0ba465637458c6990e5a8c5f61d4af7 e576d97ff94b872de76f8050361ee3db a91ca5c11aa25eb4d679275cc5788063 - a5f19741120c4f2de2adebeb10a298dd") + a5f19741120c4f2de2adebeb10a298dd" + ), ); // Test Case 6 check_test_vector( - &hex!(" + &hex!( + " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa @@ -149,25 +176,33 @@ fn ietf_test_vectors() { aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaa"), - &hex!(" + aaaaaa" + ), + &hex!( + " 54657374205573696e67204c61726765 72205468616e20426c6f636b2d53697a 65204b6579202d2048617368204b6579 - 204669727374"), - &hex!(" + 204669727374" + ), + &hex!( + " 60e431591ee0b67f0d8a26aacbf5b77f - 8e0bc6213728c5140546040f0ee37f54"), - &hex!(" + 8e0bc6213728c5140546040f0ee37f54" + ), + &hex!( + " 80b24263c7c1a3ebb71493c1dd7be8b4 9b46d1f41b4aeec1121b013783f8f352 6b56d037e05f2598bd0fd2215d6a1e52 - 95e64f73f63f0aec8b915a985d786598") + 95e64f73f63f0aec8b915a985d786598" + ), ); // Test Case 7 check_test_vector( - &hex!(" + &hex!( + " aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa @@ -176,8 +211,10 @@ fn ietf_test_vectors() { aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaa"), - &hex!(" + aaaaaa" + ), + &hex!( + " 54686973206973206120746573742075 73696e672061206c6172676572207468 616e20626c6f636b2d73697a65206b65 @@ -187,15 +224,20 @@ fn ietf_test_vectors() { 647320746f2062652068617368656420 6265666f7265206265696e6720757365 642062792074686520484d414320616c - 676f726974686d2e"), - &hex!(" + 676f726974686d2e" + ), + &hex!( + " 9b09ffa71b942fcb27635fbcd5b0e944 - bfdc63644f0713938a7f51535c3a35e2"), - &hex!(" + bfdc63644f0713938a7f51535c3a35e2" + ), + &hex!( + " e37b6a775dc87dbaa4dfa9f96e5e3ffd debd71f8867289865df5a32d20cdc944 b6022cac3c4982b10d5eeb55c3e4de15 - 134676fb6de0446065c97440fa8c6a58") + 134676fb6de0446065c97440fa8c6a58" + ), ); } diff --git a/parity-crypto/src/lib.rs b/parity-crypto/src/lib.rs index 309c4c803..d1d146355 100644 --- a/parity-crypto/src/lib.rs +++ b/parity-crypto/src/lib.rs @@ -17,18 +17,18 @@ //! Crypto utils used by ethstore and network. pub mod aes; -pub mod error; -pub mod scrypt; pub mod digest; +pub mod error; pub mod hmac; pub mod pbkdf2; #[cfg(feature = "publickey")] pub mod publickey; +pub mod scrypt; pub use crate::error::Error; -use tiny_keccak::Keccak; use subtle::ConstantTimeEq; +use tiny_keccak::Keccak; pub const KEY_LENGTH: usize = 32; pub const KEY_ITERATIONS: usize = 10240; @@ -38,10 +38,15 @@ pub const KEY_LENGTH_AES: usize = KEY_LENGTH / 2; pub const DEFAULT_MAC: [u8; 2] = [0, 0]; pub trait Keccak256 { - fn keccak256(&self) -> T where T: Sized; + fn keccak256(&self) -> T + where + T: Sized; } -impl Keccak256<[u8; 32]> for T where T: AsRef<[u8]> { +impl Keccak256<[u8; 32]> for T +where + T: AsRef<[u8]>, +{ fn keccak256(&self) -> [u8; 32] { let mut keccak = Keccak::new_keccak256(); let mut result = [0u8; 32]; diff --git a/parity-crypto/src/pbkdf2/test.rs b/parity-crypto/src/pbkdf2/test.rs index 0aca66967..ff8cc685c 100644 --- a/parity-crypto/src/pbkdf2/test.rs +++ b/parity-crypto/src/pbkdf2/test.rs @@ -18,10 +18,13 @@ use super::*; #[test] fn basic_test() { - let mut dest = [0;32]; - let salt = [5;32]; - let secret = [7;32]; + let mut dest = [0; 32]; + let salt = [5; 32]; + let secret = [7; 32]; sha256(3, Salt(&salt[..]), Secret(&secret[..]), &mut dest); - let res = [242, 33, 31, 124, 36, 223, 179, 185, 206, 175, 190, 253, 85, 33, 23, 126, 141, 29, 23, 97, 66, 63, 51, 196, 27, 255, 135, 206, 74, 137, 172, 87]; + let res = [ + 242, 33, 31, 124, 36, 223, 179, 185, 206, 175, 190, 253, 85, 33, 23, 126, 141, 29, 23, 97, 66, 63, 51, 196, 27, + 255, 135, 206, 74, 137, 172, 87, + ]; assert_eq!(res, dest); } diff --git a/parity-crypto/src/publickey/ec_math_utils.rs b/parity-crypto/src/publickey/ec_math_utils.rs index cbc2e3f81..1aa55db7e 100644 --- a/parity-crypto/src/publickey/ec_math_utils.rs +++ b/parity-crypto/src/publickey/ec_math_utils.rs @@ -16,26 +16,21 @@ //! Multiple primitives for work with public and secret keys and with secp256k1 curve points -use super::{SECP256K1, Public, Secret, Error}; -use secp256k1::key; -use secp256k1::constants::{CURVE_ORDER as SECP256K1_CURVE_ORDER}; -use ethereum_types::{BigEndianHash as _, U256, H256}; +use super::{Error, Public, Secret, SECP256K1}; +use ethereum_types::{BigEndianHash as _, H256, U256}; use lazy_static::lazy_static; +use secp256k1::constants::CURVE_ORDER as SECP256K1_CURVE_ORDER; +use secp256k1::key; /// Generation point array combined from X and Y coordinates /// Equivalent to uncompressed form, see https://tools.ietf.org/id/draft-jivsov-ecc-compact-05.html#rfc.section.3 pub const BASE_POINT_BYTES: [u8; 65] = [ - 0x4, - // The X coordinate of the generator - 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac, - 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07, - 0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, - 0x59, 0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98, + 0x4, // The X coordinate of the generator + 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac, 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07, 0x02, 0x9b, 0xfc, + 0xdb, 0x2d, 0xce, 0x28, 0xd9, 0x59, 0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98, // The Y coordinate of the generator - 0x48, 0x3a, 0xda, 0x77, 0x26, 0xa3, 0xc4, 0x65, - 0x5d, 0xa4, 0xfb, 0xfc, 0x0e, 0x11, 0x08, 0xa8, - 0xfd, 0x17, 0xb4, 0x48, 0xa6, 0x85, 0x54, 0x19, - 0x9c, 0x47, 0xd0, 0x8f, 0xfb, 0x10, 0xd4, 0xb8, + 0x48, 0x3a, 0xda, 0x77, 0x26, 0xa3, 0xc4, 0x65, 0x5d, 0xa4, 0xfb, 0xfc, 0x0e, 0x11, 0x08, 0xa8, 0xfd, 0x17, 0xb4, + 0x48, 0xa6, 0x85, 0x54, 0x19, 0x9c, 0x47, 0xd0, 0x8f, 0xfb, 0x10, 0xd4, 0xb8, ]; lazy_static! { @@ -44,8 +39,7 @@ lazy_static! { /// Whether the public key is valid. pub fn public_is_valid(public: &Public) -> bool { - to_secp256k1_public(public).ok() - .map_or(false, |p| p.is_valid()) + to_secp256k1_public(public).ok().map_or(false, |p| p.is_valid()) } /// In-place multiply public key by secret key (EC point * scalar) @@ -87,8 +81,8 @@ pub fn public_negate(public: &mut Public) -> Result<(), Error> { /// Return the generation point (aka base point) of secp256k1 pub fn generation_point() -> Public { - let public_key = key::PublicKey::from_slice(&SECP256K1, &BASE_POINT_BYTES) - .expect("constructed using constants; qed"); + let public_key = + key::PublicKey::from_slice(&SECP256K1, &BASE_POINT_BYTES).expect("constructed using constants; qed"); let mut public = Public::default(); set_public(&mut public, &public_key); public @@ -111,8 +105,8 @@ fn set_public(public: &mut Public, key_public: &key::PublicKey) { #[cfg(test)] mod tests { - use super::super::{Random, Generator, Secret}; - use super::{public_add, public_sub, public_negate, public_is_valid, generation_point, public_mul_secret}; + use super::super::{Generator, Random, Secret}; + use super::{generation_point, public_add, public_is_valid, public_mul_secret, public_negate, public_sub}; use std::str::FromStr; #[test] diff --git a/parity-crypto/src/publickey/ecdh.rs b/parity-crypto/src/publickey/ecdh.rs index 73d25491c..ab22c2a09 100644 --- a/parity-crypto/src/publickey/ecdh.rs +++ b/parity-crypto/src/publickey/ecdh.rs @@ -16,8 +16,8 @@ //! ECDH key agreement scheme implemented as a free function. +use super::{Error, Public, Secret, SECP256K1}; use secp256k1::{self, ecdh, key}; -use super::{Error, Secret, Public, SECP256K1}; /// Agree on a shared secret pub fn agree(secret: &Secret, public: &Public) -> Result { @@ -32,6 +32,5 @@ pub fn agree(secret: &Secret, public: &Public) -> Result { let sec = key::SecretKey::from_slice(context, secret.as_bytes())?; let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec); - Secret::import_key(&shared[0..32]) - .map_err(|_| Error::Secp(secp256k1::Error::InvalidSecretKey)) + Secret::import_key(&shared[0..32]).map_err(|_| Error::Secp(secp256k1::Error::InvalidSecretKey)) } diff --git a/parity-crypto/src/publickey/ecdsa_signature.rs b/parity-crypto/src/publickey/ecdsa_signature.rs index 421fa9b61..6801adf10 100644 --- a/parity-crypto/src/publickey/ecdsa_signature.rs +++ b/parity-crypto/src/publickey/ecdsa_signature.rs @@ -16,16 +16,16 @@ //! Signature based on ECDSA, algorithm's description: https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm -use std::ops::{Deref, DerefMut}; +use super::{public_to_address, Address, Error, Message, Public, Secret, SECP256K1}; +use ethereum_types::{H256, H520}; +use rustc_hex::{FromHex, ToHex}; +use secp256k1::key::{PublicKey, SecretKey}; +use secp256k1::{Error as SecpError, Message as SecpMessage, RecoverableSignature, RecoveryId}; use std::cmp::PartialEq; use std::fmt; -use std::str::FromStr; use std::hash::{Hash, Hasher}; -use secp256k1::{Message as SecpMessage, RecoverableSignature, RecoveryId, Error as SecpError}; -use secp256k1::key::{SecretKey, PublicKey}; -use rustc_hex::{ToHex, FromHex}; -use ethereum_types::{H520, H256}; -use super::{Secret, Public, SECP256K1, Message, public_to_address, Address, Error}; +use std::ops::{Deref, DerefMut}; +use std::str::FromStr; /// Signature encoded as RSV components #[repr(C)] @@ -81,10 +81,8 @@ impl Signature { /// This condition may be required by some verification algorithms pub fn is_low_s(&self) -> bool { const LOW_SIG_THRESHOLD: H256 = H256([ - 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0x5D, 0x57, 0x6E, 0x73, 0x57, 0xA4, 0x50, 0x1D, - 0xDF, 0xE9, 0x2F, 0x46, 0x68, 0x1B, 0x20, 0xA0, + 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x5D, 0x57, + 0x6E, 0x73, 0x57, 0xA4, 0x50, 0x1D, 0xDF, 0xE9, 0x2F, 0x46, 0x68, 0x1B, 0x20, 0xA0, ]); H256::from_slice(self.s()) <= LOW_SIG_THRESHOLD } @@ -97,22 +95,16 @@ impl Signature { /// used here as the upper bound for a valid (r, s, v) tuple pub fn is_valid(&self) -> bool { const UPPER_BOUND: H256 = H256([ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, - 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, + 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41, ]); const ONE: H256 = H256([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, ]); let r = H256::from_slice(self.r()); let s = H256::from_slice(self.s()); - self.v() <= 1 && - r < UPPER_BOUND && r >= ONE && - s < UPPER_BOUND && s >= ONE + self.v() <= 1 && r < UPPER_BOUND && r >= ONE && s < UPPER_BOUND && s >= ONE } } @@ -125,7 +117,7 @@ impl PartialEq for Signature { } // manual implementation required in Rust 1.13+, see `std::cmp::AssertParamIsEq`. -impl Eq for Signature { } +impl Eq for Signature {} // also manual for the same reason, but the pretty printing might be useful. impl fmt::Debug for Signature { @@ -134,7 +126,7 @@ impl fmt::Debug for Signature { .field("r", &self.0[0..32].to_hex::()) .field("s", &self.0[32..64].to_hex::()) .field("v", &self.0[64..65].to_hex::()) - .finish() + .finish() } } @@ -153,8 +145,8 @@ impl FromStr for Signature { let mut data = [0; 65]; data.copy_from_slice(&hex[0..65]); Ok(Signature(data)) - }, - _ => Err(Error::InvalidSignature) + } + _ => Err(Error::InvalidSignature), } } } @@ -233,7 +225,8 @@ pub fn sign(secret: &Secret, message: &Message) -> Result { /// Performs verification of the signature for the given message with corresponding public key pub fn verify_public(public: &Public, signature: &Signature, message: &Message) -> Result { let context = &SECP256K1; - let rsig = RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; + let rsig = + RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; let sig = rsig.to_standard(context); let pdata: [u8; 65] = { @@ -246,7 +239,7 @@ pub fn verify_public(public: &Public, signature: &Signature, message: &Message) match context.verify(&SecpMessage::from_slice(&message[..])?, &sig, &publ) { Ok(_) => Ok(true), Err(SecpError::IncorrectSignature) => Ok(false), - Err(x) => Err(Error::from(x)) + Err(x) => Err(Error::from(x)), } } @@ -260,7 +253,8 @@ pub fn verify_address(address: &Address, signature: &Signature, message: &Messag /// Recovers the public key from the signature for the message pub fn recover(signature: &Signature, message: &Message) -> Result { let context = &SECP256K1; - let rsig = RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; + let rsig = + RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; let pubkey = context.recover(&SecpMessage::from_slice(&message[..])?, &rsig)?; let serialized = pubkey.serialize_vec(context, false); @@ -271,9 +265,9 @@ pub fn recover(signature: &Signature, message: &Message) -> Result usize { 4 } + fn len() -> usize { + 4 + } fn store(&self, target: &mut [u8]) { let bytes = self.to_be_bytes(); @@ -57,15 +59,16 @@ impl From for Derivation { // See module's documentation for more details if index < (2 << 30) { Derivation::Soft(index) - } - else { + } else { Derivation::Hard(index) } } } impl Label for H256 { - fn len() -> usize { 32 } + fn len() -> usize { + 32 + } fn store(&self, target: &mut [u8]) { (&mut target[0..32]).copy_from_slice(self.as_bytes()); @@ -81,10 +84,7 @@ pub struct ExtendedSecret { impl ExtendedSecret { /// New extended key from given secret and chain code. pub fn with_code(secret: Secret, chain_code: H256) -> ExtendedSecret { - ExtendedSecret { - secret: secret, - chain_code: chain_code, - } + ExtendedSecret { secret: secret, chain_code: chain_code } } /// New extended key from given secret with the random chain code. @@ -100,7 +100,10 @@ impl ExtendedSecret { } /// Derive new private key - pub fn derive(&self, index: Derivation) -> ExtendedSecret where T: Label { + pub fn derive(&self, index: Derivation) -> ExtendedSecret + where + T: Label, + { let (derived_key, next_chain_code) = derivation::private(*self.secret, self.chain_code, index); let derived_secret = Secret::from(derived_key.0); @@ -128,17 +131,15 @@ impl ExtendedPublic { /// Create new extended public key from known secret pub fn from_secret(secret: &ExtendedSecret) -> Result { - Ok( - ExtendedPublic::new( - derivation::point(**secret.as_raw())?, - secret.chain_code.clone(), - ) - ) + Ok(ExtendedPublic::new(derivation::point(**secret.as_raw())?, secret.chain_code.clone())) } /// Derive new public key /// Operation is defined only for index belongs [0..2^31) - pub fn derive(&self, index: Derivation) -> Result where T: Label { + pub fn derive(&self, index: Derivation) -> Result + where + T: Label, + { let (derived_key, next_chain_code) = derivation::public(self.public, self.chain_code, index)?; Ok(ExtendedPublic::new(derived_key, next_chain_code)) } @@ -156,12 +157,9 @@ pub struct ExtendedKeyPair { impl ExtendedKeyPair { pub fn new(secret: Secret) -> Self { let extended_secret = ExtendedSecret::new(secret); - let extended_public = ExtendedPublic::from_secret(&extended_secret) - .expect("Valid `Secret` always produces valid public; qed"); - ExtendedKeyPair { - secret: extended_secret, - public: extended_public, - } + let extended_public = + ExtendedPublic::from_secret(&extended_secret).expect("Valid `Secret` always produces valid public; qed"); + ExtendedKeyPair { secret: extended_secret, public: extended_public } } pub fn with_code(secret: Secret, public: Public, chain_code: H256) -> Self { @@ -173,12 +171,9 @@ impl ExtendedKeyPair { pub fn with_secret(secret: Secret, chain_code: H256) -> Self { let extended_secret = ExtendedSecret::with_code(secret, chain_code); - let extended_public = ExtendedPublic::from_secret(&extended_secret) - .expect("Valid `Secret` always produces valid public; qed"); - ExtendedKeyPair { - secret: extended_secret, - public: extended_public, - } + let extended_public = + ExtendedPublic::from_secret(&extended_secret).expect("Valid `Secret` always produces valid public; qed"); + ExtendedKeyPair { secret: extended_secret, public: extended_public } } pub fn with_seed(seed: &[u8]) -> Result { @@ -197,13 +192,13 @@ impl ExtendedKeyPair { &self.public } - pub fn derive(&self, index: Derivation) -> Result where T: Label { + pub fn derive(&self, index: Derivation) -> Result + where + T: Label, + { let derived = self.secret.derive(index); - Ok(ExtendedKeyPair { - public: ExtendedPublic::from_secret(&derived)?, - secret: derived, - }) + Ok(ExtendedKeyPair { public: ExtendedPublic::from_secret(&derived)?, secret: derived }) } } @@ -211,12 +206,12 @@ impl ExtendedKeyPair { // Work is based on BIP0032 // https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki mod derivation { - use crate::{hmac, Keccak256}; - use super::super::SECP256K1; - use ethereum_types::{BigEndianHash, U256, U512, H512, H256}; - use secp256k1::key::{SecretKey, PublicKey}; use super::super::ec_math_utils::CURVE_ORDER; - use super::{Label, Derivation}; + use super::super::SECP256K1; + use super::{Derivation, Label}; + use crate::{hmac, Keccak256}; + use ethereum_types::{BigEndianHash, H256, H512, U256, U512}; + use secp256k1::key::{PublicKey, SecretKey}; use std::convert::TryInto; #[derive(Debug)] @@ -233,7 +228,10 @@ mod derivation { // // Can panic if passed `private_key` is not a valid secp256k1 private key // (outside of (0..curve_order()]) field - pub fn private(private_key: H256, chain_code: H256, index: Derivation) -> (H256, H256) where T: Label { + pub fn private(private_key: H256, chain_code: H256, index: Derivation) -> (H256, H256) + where + T: Label, + { match index { Derivation::Soft(index) => private_soft(private_key, chain_code, index), Derivation::Hard(index) => private_hard(private_key, chain_code, index), @@ -258,13 +256,16 @@ mod derivation { // Can panic if passed `private_key` is not a valid secp256k1 private key // (outside of (0..curve_order()]) field - fn private_soft(private_key: H256, chain_code: H256, index: T) -> (H256, H256) where T: Label { + fn private_soft(private_key: H256, chain_code: H256, index: T) -> (H256, H256) + where + T: Label, + { let mut data = vec![0u8; 33 + T::len()]; - let sec_private = SecretKey::from_slice(&SECP256K1, private_key.as_bytes()) - .expect("Caller should provide valid private key"); - let sec_public = PublicKey::from_secret_key(&SECP256K1, &sec_private) - .expect("Caller should provide valid private key"); + let sec_private = + SecretKey::from_slice(&SECP256K1, private_key.as_bytes()).expect("Caller should provide valid private key"); + let sec_public = + PublicKey::from_secret_key(&SECP256K1, &sec_private).expect("Caller should provide valid private key"); let public_serialized = sec_public.serialize_vec(&SECP256K1, true); // curve point (compressed public key) -- index @@ -278,7 +279,10 @@ mod derivation { // Deterministic derivation of the key using secp256k1 elliptic curve // This is hardened derivation and does not allow to associate // corresponding public keys of the original and derived private keys - fn private_hard(private_key: H256, chain_code: H256, index: T) -> (H256, H256) where T: Label { + fn private_hard(private_key: H256, chain_code: H256, index: T) -> (H256, H256) + where + T: Label, + { let mut data: Vec = vec![0u8; 33 + T::len()]; let private: U256 = private_key.into_uint(); @@ -301,10 +305,15 @@ mod derivation { m.try_into().expect("U512 modulo U256 should fit into U256; qed") } - pub fn public(public_key: H512, chain_code: H256, derivation: Derivation) -> Result<(H512, H256), Error> where T: Label { + pub fn public(public_key: H512, chain_code: H256, derivation: Derivation) -> Result<(H512, H256), Error> + where + T: Label, + { let index = match derivation { Derivation::Soft(index) => index, - Derivation::Hard(_) => { return Err(Error::InvalidHardenedUse); } + Derivation::Hard(_) => { + return Err(Error::InvalidHardenedUse); + } }; let mut public_sec_raw = [0u8; 65]; @@ -327,22 +336,21 @@ mod derivation { let new_chain_code = H256::from_slice(&i_512[32..64]); // Generated private key can (extremely rarely) be out of secp256k1 key field - if *CURVE_ORDER <= new_private.into_uint() { return Err(Error::MissingIndex); } - let new_private_sec = SecretKey::from_slice(&SECP256K1, new_private.as_bytes()) - .expect("Private key belongs to the field [0..CURVE_ORDER) (checked above); So initializing can never fail; qed"); + if *CURVE_ORDER <= new_private.into_uint() { + return Err(Error::MissingIndex); + } + let new_private_sec = SecretKey::from_slice(&SECP256K1, new_private.as_bytes()).expect( + "Private key belongs to the field [0..CURVE_ORDER) (checked above); So initializing can never fail; qed", + ); let mut new_public = PublicKey::from_secret_key(&SECP256K1, &new_private_sec) .expect("Valid private key produces valid public key"); // Adding two points on the elliptic curves (combining two public keys) - new_public.add_assign(&SECP256K1, &public_sec) - .expect("Addition of two valid points produce valid point"); + new_public.add_assign(&SECP256K1, &public_sec).expect("Addition of two valid points produce valid point"); let serialized = new_public.serialize_vec(&SECP256K1, false); - Ok(( - H512::from_slice(&serialized[1..65]), - new_chain_code, - )) + Ok((H512::from_slice(&serialized[1..65]), new_chain_code)) } fn sha3(slc: &[u8]) -> H256 { @@ -352,15 +360,15 @@ mod derivation { pub fn chain_code(secret: H256) -> H256 { // 10,000 rounds of sha3 let mut running_sha3 = sha3(secret.as_bytes()); - for _ in 0..99999 { running_sha3 = sha3(running_sha3.as_bytes()); } + for _ in 0..99999 { + running_sha3 = sha3(running_sha3.as_bytes()); + } running_sha3 } pub fn point(secret: H256) -> Result { - let sec = SecretKey::from_slice(&SECP256K1, secret.as_bytes()) - .map_err(|_| Error::InvalidPoint)?; - let public_sec = PublicKey::from_secret_key(&SECP256K1, &sec) - .map_err(|_| Error::InvalidPoint)?; + let sec = SecretKey::from_slice(&SECP256K1, secret.as_bytes()).map_err(|_| Error::InvalidPoint)?; + let public_sec = PublicKey::from_secret_key(&SECP256K1, &sec).map_err(|_| Error::InvalidPoint)?; let serialized = public_sec.serialize_vec(&SECP256K1, false); Ok(H512::from_slice(&serialized[1..65])) } @@ -378,22 +386,23 @@ mod derivation { #[cfg(test)] mod tests { - use super::{ExtendedSecret, ExtendedPublic, ExtendedKeyPair}; use super::super::Secret; - use std::str::FromStr; - use ethereum_types::{H128, H256, H512}; use super::{derivation, Derivation}; + use super::{ExtendedKeyPair, ExtendedPublic, ExtendedSecret}; + use ethereum_types::{H128, H256, H512}; + use std::str::FromStr; fn master_chain_basic() -> (H256, H256) { - let seed = H128::from_str("000102030405060708090a0b0c0d0e0f") - .expect("Seed should be valid H128") - .as_bytes() - .to_vec(); + let seed = + H128::from_str("000102030405060708090a0b0c0d0e0f").expect("Seed should be valid H128").as_bytes().to_vec(); derivation::seed_pair(&*seed) } - fn test_extended(f: F, test_private: H256) where F: Fn(ExtendedSecret) -> ExtendedSecret { + fn test_extended(f: F, test_private: H256) + where + F: Fn(ExtendedSecret) -> ExtendedSecret, + { let (private_seed, chain_code) = master_chain_basic(); let extended_secret = ExtendedSecret::with_code(Secret::from(private_seed.0), chain_code); let derived = f(extended_secret); @@ -417,9 +426,18 @@ mod tests { ); // normal - assert_eq!(**extended_secret.derive(0.into()).as_raw(), H256::from_str("bf6a74e3f7b36fc4c96a1e12f31abc817f9f5904f5a8fc27713163d1f0b713f6").unwrap()); - assert_eq!(**extended_secret.derive(1.into()).as_raw(), H256::from_str("bd4fca9eb1f9c201e9448c1eecd66e302d68d4d313ce895b8c134f512205c1bc").unwrap()); - assert_eq!(**extended_secret.derive(2.into()).as_raw(), H256::from_str("86932b542d6cab4d9c65490c7ef502d89ecc0e2a5f4852157649e3251e2a3268").unwrap()); + assert_eq!( + **extended_secret.derive(0.into()).as_raw(), + H256::from_str("bf6a74e3f7b36fc4c96a1e12f31abc817f9f5904f5a8fc27713163d1f0b713f6").unwrap() + ); + assert_eq!( + **extended_secret.derive(1.into()).as_raw(), + H256::from_str("bd4fca9eb1f9c201e9448c1eecd66e302d68d4d313ce895b8c134f512205c1bc").unwrap() + ); + assert_eq!( + **extended_secret.derive(2.into()).as_raw(), + H256::from_str("86932b542d6cab4d9c65490c7ef502d89ecc0e2a5f4852157649e3251e2a3268").unwrap() + ); let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); let derived_public = extended_public.derive(0.into()).expect("First derivation of public should succeed"); @@ -441,15 +459,19 @@ mod tests { #[test] fn h256_soft_match() { let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let derivation_secret = H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); + let derivation_secret = + H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::zero()); let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); let derived_secret0 = extended_secret.derive(Derivation::Soft(derivation_secret)); - let derived_public0 = extended_public.derive(Derivation::Soft(derivation_secret)).expect("First derivation of public should succeed"); + let derived_public0 = extended_public + .derive(Derivation::Soft(derivation_secret)) + .expect("First derivation of public should succeed"); - let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created"); + let public_from_secret0 = + ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created"); assert_eq!(public_from_secret0.public(), derived_public0.public()); } @@ -457,7 +479,8 @@ mod tests { #[test] fn h256_hard() { let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let derivation_secret = H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); + let derivation_secret = + H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::from_low_u64_be(1)); assert_eq!( @@ -475,17 +498,16 @@ mod tests { let derived_secret0 = extended_secret.derive(0.into()); let derived_public0 = extended_public.derive(0.into()).expect("First derivation of public should succeed"); - let public_from_secret0 = ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created"); + let public_from_secret0 = + ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created"); assert_eq!(public_from_secret0.public(), derived_public0.public()); } #[test] fn test_seeds() { - let seed = H128::from_str("000102030405060708090a0b0c0d0e0f") - .expect("Seed should be valid H128") - .as_bytes() - .to_vec(); + let seed = + H128::from_str("000102030405060708090a0b0c0d0e0f").expect("Seed should be valid H128").as_bytes().to_vec(); // private key from bitcoin test vector // xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs @@ -504,7 +526,7 @@ mod tests { test_extended( |secret| secret.derive(2147483648.into()), H256::from_str("edb2e14f9ee77d26dd93b4ecede8d16ed408ce149b6cd80b0715a2d911a0afea") - .expect("Private should be decoded ok") + .expect("Private should be decoded ok"), ); } @@ -515,7 +537,7 @@ mod tests { test_extended( |secret| secret.derive(2147483648.into()).derive(1.into()), H256::from_str("3c6cb8d0f6a264c91ea8b5030fadaa8e538b020f0a387421a12de9319dc93368") - .expect("Private should be decoded ok") + .expect("Private should be decoded ok"), ); } } diff --git a/parity-crypto/src/publickey/keypair.rs b/parity-crypto/src/publickey/keypair.rs index 04a9dbadd..a9dc05cab 100644 --- a/parity-crypto/src/publickey/keypair.rs +++ b/parity-crypto/src/publickey/keypair.rs @@ -16,10 +16,10 @@ //! Key pair (public + secrect) description -use std::fmt; -use secp256k1::key; -use super::{Secret, Public, Address, SECP256K1, Error}; +use super::{Address, Error, Public, Secret, SECP256K1}; use crate::Keccak256; +use secp256k1::key; +use std::fmt; /// Convert public key into the address pub fn public_to_address(public: &Public) -> Address { @@ -55,10 +55,7 @@ impl KeyPair { let mut public = Public::default(); public.as_bytes_mut().copy_from_slice(&serialized[1..65]); - let keypair = KeyPair { - secret: secret, - public: public, - }; + let keypair = KeyPair { secret: secret, public: public }; Ok(keypair) } @@ -76,10 +73,7 @@ impl KeyPair { let mut public = Public::default(); public.as_bytes_mut().copy_from_slice(&serialized[1..65]); - KeyPair { - secret, - public, - } + KeyPair { secret, public } } /// Returns secret part of the keypair @@ -100,8 +94,8 @@ impl KeyPair { #[cfg(test)] mod tests { - use std::str::FromStr; use super::{KeyPair, Secret}; + use std::str::FromStr; #[test] fn from_secret() { diff --git a/parity-crypto/src/publickey/keypair_generator.rs b/parity-crypto/src/publickey/keypair_generator.rs index e816bc6df..2ae91db6e 100644 --- a/parity-crypto/src/publickey/keypair_generator.rs +++ b/parity-crypto/src/publickey/keypair_generator.rs @@ -16,9 +16,9 @@ //! Random key pair generator. Relies on the secp256k1 C-library to generate random data. +use super::{Generator, KeyPair, SECP256K1}; use rand::rngs::OsRng; use std::convert::Infallible; -use super::{Generator, KeyPair, SECP256K1}; /// Randomly generates new keypair, instantiating the RNG each time. pub struct Random; @@ -38,8 +38,7 @@ impl Generator for OsRng { type Error = Infallible; fn generate(&mut self) -> Result { - let (sec, publ) = SECP256K1.generate_keypair(self) - .expect("context always created with full capabilities; qed"); + let (sec, publ) = SECP256K1.generate_keypair(self).expect("context always created with full capabilities; qed"); Ok(KeyPair::from_keypair(sec, publ)) } diff --git a/parity-crypto/src/publickey/mod.rs b/parity-crypto/src/publickey/mod.rs index c983f0112..12b07d176 100644 --- a/parity-crypto/src/publickey/mod.rs +++ b/parity-crypto/src/publickey/mod.rs @@ -17,24 +17,24 @@ //! Submodule of crypto utils for working with public key crypto primitives //! If you are looking for git history please refer to the `ethkey` crate in the `parity-ethereum` repository. +mod ecdsa_signature; +mod extended_keys; mod keypair; mod keypair_generator; -mod ecdsa_signature; mod secret_key; -mod extended_keys; +pub mod ec_math_utils; pub mod ecdh; pub mod ecies; -pub mod ec_math_utils; pub mod error; -pub use self::keypair::{KeyPair, public_to_address}; pub use self::ec_math_utils::public_is_valid; -pub use self::keypair_generator::Random; +pub use self::ecdsa_signature::{recover, sign, verify_address, verify_public, Signature}; pub use self::error::Error; -pub use self::ecdsa_signature::{sign, verify_public, verify_address, recover, Signature}; +pub use self::extended_keys::{Derivation, DerivationError, ExtendedKeyPair, ExtendedPublic, ExtendedSecret}; +pub use self::keypair::{public_to_address, KeyPair}; +pub use self::keypair_generator::Random; pub use self::secret_key::Secret; -pub use self::extended_keys::{ExtendedPublic, ExtendedSecret, ExtendedKeyPair, DerivationError, Derivation}; use ethereum_types::H256; use lazy_static::lazy_static; diff --git a/parity-crypto/src/publickey/secret_key.rs b/parity-crypto/src/publickey/secret_key.rs index ef8590c1d..e721ec66b 100644 --- a/parity-crypto/src/publickey/secret_key.rs +++ b/parity-crypto/src/publickey/secret_key.rs @@ -16,15 +16,15 @@ //! Secret key implementation +use super::{Error, SECP256K1}; +use ethereum_types::H256; +use secp256k1::constants::SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE; +use secp256k1::key; +use std::convert::TryFrom; use std::fmt; use std::ops::Deref; use std::str::FromStr; -use std::convert::TryFrom; -use secp256k1::constants::{SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE}; -use secp256k1::key; -use ethereum_types::H256; use zeroize::Zeroize; -use super::{SECP256K1, Error}; /// Represents secret key #[derive(Clone, PartialEq, Eq)] @@ -60,7 +60,7 @@ impl Secret { /// Creates a `Secret` from the given slice, returning `None` if the slice length != 32. pub fn copy_from_slice(key: &[u8]) -> Option { if key.len() != 32 { - return None + return None; } let mut h = H256::zero(); h.as_bytes_mut().copy_from_slice(&key[0..32]); @@ -95,7 +95,7 @@ impl Secret { (true, false) => { *self = other.clone(); Ok(()) - }, + } (false, false) => { let mut key_secret = self.to_secp256k1_secret()?; let other_secret = other.to_secp256k1_secret()?; @@ -103,7 +103,7 @@ impl Secret { *self = key_secret.into(); Ok(()) - }, + } } } @@ -114,7 +114,7 @@ impl Secret { (true, false) => { *self = other.clone(); self.neg() - }, + } (false, false) => { let mut key_secret = self.to_secp256k1_secret()?; let mut other_secret = other.to_secp256k1_secret()?; @@ -123,7 +123,7 @@ impl Secret { *self = key_secret.into(); Ok(()) - }, + } } } @@ -133,14 +133,14 @@ impl Secret { true => { *self = key::MINUS_ONE_KEY.into(); Ok(()) - }, + } false => { let mut key_secret = self.to_secp256k1_secret()?; key_secret.add_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; *self = key_secret.into(); Ok(()) - }, + } } } @@ -151,7 +151,7 @@ impl Secret { (false, true) => { *self = Self::zero(); Ok(()) - }, + } (false, false) => { let mut key_secret = self.to_secp256k1_secret()?; let other_secret = other.to_secp256k1_secret()?; @@ -159,7 +159,7 @@ impl Secret { *self = key_secret.into(); Ok(()) - }, + } } } @@ -173,7 +173,7 @@ impl Secret { *self = key_secret.into(); Ok(()) - }, + } } } @@ -200,7 +200,7 @@ impl Secret { for _ in 1..pow { self.mul(&c)?; } - }, + } } Ok(()) @@ -242,7 +242,7 @@ impl TryFrom<&str> for Secret { impl From for Secret { fn from(key: key::SecretKey) -> Self { let mut a = [0; SECP256K1_SECRET_KEY_SIZE]; - a.copy_from_slice(&key[0 .. SECP256K1_SECRET_KEY_SIZE]); + a.copy_from_slice(&key[0..SECP256K1_SECRET_KEY_SIZE]); a.into() } } @@ -257,9 +257,9 @@ impl Deref for Secret { #[cfg(test)] mod tests { - use std::str::FromStr; - use super::super::{Random, Generator}; + use super::super::{Generator, Random}; use super::Secret; + use std::str::FromStr; #[test] fn multiplicating_secret_inversion_with_secret_gives_one() { @@ -267,7 +267,10 @@ mod tests { let mut inversion = secret.clone(); inversion.inv().unwrap(); inversion.mul(&secret).unwrap(); - assert_eq!(inversion, Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap()); + assert_eq!( + inversion, + Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap() + ); } #[test] diff --git a/parity-crypto/src/scrypt.rs b/parity-crypto/src/scrypt.rs index 9c8443146..eea1931b3 100644 --- a/parity-crypto/src/scrypt.rs +++ b/parity-crypto/src/scrypt.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use super::{KEY_LENGTH, KEY_LENGTH_AES}; use crate::error::ScryptError; -use super::{KEY_LENGTH_AES, KEY_LENGTH}; use scrypt::{scrypt, ScryptParams}; #[cfg(test)] @@ -28,7 +28,7 @@ pub fn derive_key(pass: &[u8], salt: &[u8], n: u32, p: u32, r: u32) -> Result<(V return Err(ScryptError::InvalidN); } - if p as u64 > ((u32::max_value() as u64 - 1) * 32)/(128 * (r as u64)) { + if p as u64 > ((u32::max_value() as u64 - 1) * 32) / (128 * (r as u64)) { return Err(ScryptError::InvalidP); } @@ -40,23 +40,24 @@ pub fn derive_key(pass: &[u8], salt: &[u8], n: u32, p: u32, r: u32) -> Result<(V Ok((derived_right_bits.to_vec(), derived_left_bits.to_vec())) } - // test is build from previous crypto lib behaviour, values may be incorrect // if previous crypto lib got a bug. #[test] -pub fn test_derive() -> Result<(),Error> { +pub fn test_derive() -> Result<(), Error> { let pass = [109, 121, 112, 97, 115, 115, 10]; - let salt = [109, 121, 115, 97, 108, 116, 115, 104, 111, 117, 108, 100, 102, 105, - 108, 108, 115, 111, 109, 109, 101, 98, 121, 116, 101, 108, 101, 110, 103, 116, 104, 10]; + let salt = [ + 109, 121, 115, 97, 108, 116, 115, 104, 111, 117, 108, 100, 102, 105, 108, 108, 115, 111, 109, 109, 101, 98, + 121, 116, 101, 108, 101, 110, 103, 116, 104, 10, + ]; let r1 = [93, 134, 79, 68, 223, 27, 44, 174, 236, 184, 179, 203, 74, 139, 73, 66]; let r2 = [2, 24, 239, 131, 172, 164, 18, 171, 132, 207, 22, 217, 150, 20, 203, 37]; let l1 = [6, 90, 119, 45, 67, 2, 99, 151, 81, 88, 166, 210, 244, 19, 123, 208]; let l2 = [253, 123, 132, 12, 188, 89, 196, 2, 107, 224, 239, 231, 135, 177, 125, 62]; - let (l,r) = derive_key(&pass[..],&salt, 262, 1, 8).unwrap(); + let (l, r) = derive_key(&pass[..], &salt, 262, 1, 8).unwrap(); assert!(l == r1); assert!(r == l1); - let (l,r) = derive_key(&pass[..],&salt, 144, 4, 4).unwrap(); + let (l, r) = derive_key(&pass[..], &salt, 144, 4, 4).unwrap(); assert!(l == r2); assert!(r == l2); Ok(()) diff --git a/parity-path/CHANGELOG.md b/parity-path/CHANGELOG.md new file mode 100644 index 000000000..927c9dc9c --- /dev/null +++ b/parity-path/CHANGELOG.md @@ -0,0 +1,7 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] diff --git a/parity-path/src/lib.rs b/parity-path/src/lib.rs index ffd4e9a0c..c286678ba 100644 --- a/parity-path/src/lib.rs +++ b/parity-path/src/lib.rs @@ -62,7 +62,9 @@ pub mod ethereum { use std::path::PathBuf; /// Default path for ethereum installation on Mac Os - pub fn default() -> PathBuf { super::config_path("Ethereum") } + pub fn default() -> PathBuf { + super::config_path("Ethereum") + } /// Default path for ethereum installation (testnet) pub fn test() -> PathBuf { @@ -89,14 +91,15 @@ pub mod ethereum { /// Restricts the permissions of given path only to the owner. #[cfg(unix)] -pub fn restrict_permissions_owner(file_path: &Path, write: bool, executable: bool) -> Result<(), String> { - let perms = ::std::os::unix::fs::PermissionsExt::from_mode(0o400 + write as u32 * 0o200 + executable as u32 * 0o100); +pub fn restrict_permissions_owner(file_path: &Path, write: bool, executable: bool) -> Result<(), String> { + let perms = + ::std::os::unix::fs::PermissionsExt::from_mode(0o400 + write as u32 * 0o200 + executable as u32 * 0o100); ::std::fs::set_permissions(file_path, perms).map_err(|e| format!("{:?}", e)) } /// Restricts the permissions of given path only to the owner. #[cfg(not(unix))] -pub fn restrict_permissions_owner(_file_path: &Path, _write: bool, _executable: bool) -> Result<(), String> { +pub fn restrict_permissions_owner(_file_path: &Path, _write: bool, _executable: bool) -> Result<(), String> { //TODO: implement me Ok(()) } diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md new file mode 100644 index 000000000..8f69530b7 --- /dev/null +++ b/parity-util-mem/CHANGELOG.md @@ -0,0 +1,11 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.2.1] - 2019-10-24 +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs index 9193dab9c..d2953ecfb 100644 --- a/parity-util-mem/src/allocators.rs +++ b/parity-util-mem/src/allocators.rs @@ -41,87 +41,86 @@ //! - jemalloc: compile error //! - mimalloc: compile error (until https://github.com/microsoft/mimalloc/pull/32 is merged) - -use crate::malloc_size::{MallocSizeOfOps, VoidPtrToSizeFn, MallocSizeOf}; #[cfg(feature = "std")] use crate::malloc_size::MallocUnconditionalSizeOf; -#[cfg(feature = "std")] -use std::os::raw::c_void; +use crate::malloc_size::{MallocSizeOf, MallocSizeOfOps, VoidPtrToSizeFn}; #[cfg(not(feature = "std"))] use core::ffi::c_void; +#[cfg(feature = "std")] +use std::os::raw::c_void; mod usable_size { use super::*; -cfg_if::cfg_if! { + cfg_if::cfg_if! { - if #[cfg(any( - target_arch = "wasm32", - feature = "estimate-heapsize", - feature = "weealloc-global", - feature = "dlmalloc-global", - ))] { + if #[cfg(any( + target_arch = "wasm32", + feature = "estimate-heapsize", + feature = "weealloc-global", + feature = "dlmalloc-global", + ))] { - // do not try system allocator + // do not try system allocator - /// Warning this is for compatibility only. - /// This function does panic: `estimate-heapsize` feature needs to be activated - /// to avoid this function call. - pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { - unreachable!("estimate heapsize only") - } + /// Warning this is for compatibility only. + /// This function does panic: `estimate-heapsize` feature needs to be activated + /// to avoid this function call. + pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { + unreachable!("estimate heapsize only") + } - } else if #[cfg(target_os = "windows")] { + } else if #[cfg(target_os = "windows")] { - use winapi::um::heapapi::{GetProcessHeap, HeapSize, HeapValidate}; + use winapi::um::heapapi::{GetProcessHeap, HeapSize, HeapValidate}; - /// Get the size of a heap block. - /// Call windows allocator through `winapi` crate - pub unsafe extern "C" fn malloc_usable_size(mut ptr: *const c_void) -> usize { + /// Get the size of a heap block. + /// Call windows allocator through `winapi` crate + pub unsafe extern "C" fn malloc_usable_size(mut ptr: *const c_void) -> usize { - let heap = GetProcessHeap(); + let heap = GetProcessHeap(); - if HeapValidate(heap, 0, ptr) == 0 { - ptr = *(ptr as *const *const c_void).offset(-1); + if HeapValidate(heap, 0, ptr) == 0 { + ptr = *(ptr as *const *const c_void).offset(-1); + } + + HeapSize(heap, 0, ptr) as usize } - HeapSize(heap, 0, ptr) as usize - } + } else if #[cfg(feature = "jemalloc-global")] { - } else if #[cfg(feature = "jemalloc-global")] { + /// Use of jemalloc usable size C function through jemallocator crate call. + pub unsafe extern "C" fn malloc_usable_size(ptr: *const c_void) -> usize { + jemallocator::usable_size(ptr) + } - /// Use of jemalloc usable size C function through jemallocator crate call. - pub unsafe extern "C" fn malloc_usable_size(ptr: *const c_void) -> usize { - jemallocator::usable_size(ptr) - } + } else if #[cfg(feature = "mimalloc-global")] { - } else if #[cfg(feature = "mimalloc-global")] { + /// Use of mimalloc usable size C function through mimalloc_sys crate call. + pub unsafe extern "C" fn malloc_usable_size(ptr: *const c_void) -> usize { + // mimalloc doesn't actually mutate the value ptr points to, + // but requires a mut pointer in the API + mimalloc_sys::mi_usable_size(ptr as *mut _) + } - /// Use of mimalloc usable size C function through mimalloc_sys crate call. - pub unsafe extern "C" fn malloc_usable_size(ptr: *const c_void) -> usize { - // mimalloc doesn't actually mutate the value ptr points to, - // but requires a mut pointer in the API - mimalloc_sys::mi_usable_size(ptr as *mut _) - } + } else if #[cfg(target_os = "linux")] { - } else if #[cfg(target_os = "linux")] { + /// Linux call system allocator (currently malloc). + extern "C" { + pub fn malloc_usable_size(ptr: *const c_void) -> usize; + } - /// Linux call system allocator (currently malloc). - extern "C" { - pub fn malloc_usable_size(ptr: *const c_void) -> usize; - } + } else { + // default allocator for non linux or windows system use estimate + pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { + unreachable!("estimate heapsize or feature allocator needed") + } - } else { - // default allocator for non linux or windows system use estimate - pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { - unreachable!("estimate heapsize or feature allocator needed") } } -} - /// No enclosing function defined. #[inline] pub fn new_enclosing_size_fn() -> Option { @@ -131,11 +130,7 @@ cfg_if::cfg_if! { /// Get a new instance of a MallocSizeOfOps pub fn new_malloc_size_ops() -> MallocSizeOfOps { - MallocSizeOfOps::new( - usable_size::malloc_usable_size, - usable_size::new_enclosing_size_fn(), - None, - ) + MallocSizeOfOps::new(usable_size::malloc_usable_size, usable_size::new_enclosing_size_fn(), None) } /// Extension methods for `MallocSizeOf` trait, do not implement @@ -151,7 +146,7 @@ pub trait MallocSizeOfExt: MallocSizeOf { } } -impl MallocSizeOfExt for T { } +impl MallocSizeOfExt for T {} #[cfg(feature = "std")] impl MallocSizeOf for std::sync::Arc { diff --git a/parity-util-mem/src/impls.rs b/parity-util-mem/src/impls.rs index b6fd44d97..ca36ce193 100644 --- a/parity-util-mem/src/impls.rs +++ b/parity-util-mem/src/impls.rs @@ -19,27 +19,13 @@ //! - elastic_array arrays //! - parking_lot mutex structures -use ethereum_types::{ - U64, U128, U256, U512, H32, H64, - H128, H160, H256, H264, H512, H520, - Bloom -}; +use super::{MallocSizeOf, MallocSizeOfOps}; use elastic_array::{ - ElasticArray2, - ElasticArray4, - ElasticArray8, - ElasticArray16, - ElasticArray32, - ElasticArray36, - ElasticArray64, - ElasticArray128, - ElasticArray256, - ElasticArray512, - ElasticArray1024, - ElasticArray2048, + ElasticArray1024, ElasticArray128, ElasticArray16, ElasticArray2, ElasticArray2048, ElasticArray256, + ElasticArray32, ElasticArray36, ElasticArray4, ElasticArray512, ElasticArray64, ElasticArray8, }; +use ethereum_types::{Bloom, H128, H160, H256, H264, H32, H512, H520, H64, U128, U256, U512, U64}; use parking_lot::{Mutex, RwLock}; -use super::{MallocSizeOf, MallocSizeOfOps}; #[cfg(not(feature = "std"))] use core as std; @@ -48,21 +34,19 @@ use core as std; malloc_size_of_is_0!(std::time::Instant); malloc_size_of_is_0!(std::time::Duration); -malloc_size_of_is_0!( - U64, U128, U256, U512, H32, H64, - H128, H160, H256, H264, H512, H520, - Bloom -); +malloc_size_of_is_0!(U64, U128, U256, U512, H32, H64, H128, H160, H256, H264, H512, H520, Bloom); macro_rules! impl_elastic_array { - ($name: ident, $dummy: ident, $size: expr) => ( + ($name: ident, $dummy: ident, $size: expr) => { impl MallocSizeOf for $name - where T: MallocSizeOf { + where + T: MallocSizeOf, + { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { self[..].size_of(ops) } } - ) + }; } impl_elastic_array!(ElasticArray2, ElasticArray2Dummy, 2); @@ -78,7 +62,6 @@ impl_elastic_array!(ElasticArray512, ElasticArray512Dummy, 512); impl_elastic_array!(ElasticArray1024, ElasticArray1024Dummy, 1024); impl_elastic_array!(ElasticArray2048, ElasticArray2048Dummy, 2048); - impl MallocSizeOf for Mutex { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { (*self.lock()).size_of(ops) diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index b55e5dc9e..43fbc4ab6 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -25,7 +25,6 @@ extern crate alloc; use malloc_size_of_derive as malloc_size_derive; - cfg_if::cfg_if! { if #[cfg(all( feature = "jemalloc-global", @@ -57,13 +56,7 @@ cfg_if::cfg_if! { pub mod allocators; -#[cfg(any( - all( - target_os = "macos", - not(feature = "jemalloc-global"), - ), - feature = "estimate-heapsize" -))] +#[cfg(any(all(target_os = "macos", not(feature = "jemalloc-global"),), feature = "estimate-heapsize"))] pub mod sizeof; /// This is a copy of patched crate `malloc_size_of` as a module. @@ -71,23 +64,21 @@ pub mod sizeof; /// if at some point the trait become standard enough we could use the right way of doing it /// by implementing it in our type traits crates. At this time moving this trait to the primitive /// types level would impact too much of the dependencies to be easily manageable. -#[macro_use] mod malloc_size; +#[macro_use] +mod malloc_size; #[cfg(feature = "ethereum-impls")] pub mod impls; -pub use malloc_size_derive::*; -pub use malloc_size::{ - MallocSizeOfOps, - MallocSizeOf, -}; pub use allocators::MallocSizeOfExt; +pub use malloc_size::{MallocSizeOf, MallocSizeOfOps}; +pub use malloc_size_derive::*; #[cfg(feature = "std")] #[cfg(test)] mod test { - use std::sync::Arc; use super::MallocSizeOfExt; + use std::sync::Arc; #[test] fn test_arc() { diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index 8c2e06deb..a3018af1f 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -43,10 +43,8 @@ //! measured as well as the thing it points to. E.g. //! ` as MallocSizeOf>::size_of(field, ops)`. - // This file is patched at commit 5bdea7dc1c80790a852a3fb03edfb2b8fbd403dc DO NOT EDIT. - #[cfg(not(feature = "std"))] use alloc::vec::Vec; #[cfg(feature = "std")] @@ -55,7 +53,7 @@ mod rstd { } #[cfg(not(feature = "std"))] mod rstd { - pub use core::*; + pub use core::*; pub mod collections { pub use alloc::collections::*; pub use vec_deque::VecDeque; @@ -65,18 +63,18 @@ mod rstd { #[cfg(feature = "std")] use std::sync::Arc; -#[cfg(feature = "std")] -use std::hash::BuildHasher; +#[cfg(not(feature = "std"))] +pub use alloc::boxed::Box; +#[cfg(not(feature = "std"))] +use core::ffi::c_void; use rstd::hash::Hash; use rstd::mem::size_of; use rstd::ops::Range; use rstd::ops::{Deref, DerefMut}; #[cfg(feature = "std")] +use std::hash::BuildHasher; +#[cfg(feature = "std")] use std::os::raw::c_void; -#[cfg(not(feature = "std"))] -use core::ffi::c_void; -#[cfg(not(feature = "std"))] -pub use alloc::boxed::Box; /// A C function that takes a pointer to a heap allocation and returns its size. pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize; @@ -86,428 +84,412 @@ pub type VoidPtrToBoolFnMut = dyn FnMut(*const c_void) -> bool; /// Operations used when measuring heap usage of data structures. pub struct MallocSizeOfOps { - /// A function that returns the size of a heap allocation. - size_of_op: VoidPtrToSizeFn, + /// A function that returns the size of a heap allocation. + size_of_op: VoidPtrToSizeFn, - /// Like `size_of_op`, but can take an interior pointer. Optional because - /// not all allocators support this operation. If it's not provided, some - /// memory measurements will actually be computed estimates rather than - /// real and accurate measurements. - enclosing_size_of_op: Option, + /// Like `size_of_op`, but can take an interior pointer. Optional because + /// not all allocators support this operation. If it's not provided, some + /// memory measurements will actually be computed estimates rather than + /// real and accurate measurements. + enclosing_size_of_op: Option, - /// Check if a pointer has been seen before, and remember it for next time. - /// Useful when measuring `Rc`s and `Arc`s. Optional, because many places - /// don't need it. - have_seen_ptr_op: Option>, + /// Check if a pointer has been seen before, and remember it for next time. + /// Useful when measuring `Rc`s and `Arc`s. Optional, because many places + /// don't need it. + have_seen_ptr_op: Option>, } impl MallocSizeOfOps { - pub fn new( - size_of: VoidPtrToSizeFn, - malloc_enclosing_size_of: Option, - have_seen_ptr: Option>, - ) -> Self { - MallocSizeOfOps { - size_of_op: size_of, - enclosing_size_of_op: malloc_enclosing_size_of, - have_seen_ptr_op: have_seen_ptr, - } - } - - /// Check if an allocation is empty. This relies on knowledge of how Rust - /// handles empty allocations, which may change in the future. - fn is_empty(ptr: *const T) -> bool { - // The correct condition is this: - // `ptr as usize <= ::std::mem::align_of::()` - // But we can't call align_of() on a ?Sized T. So we approximate it - // with the following. 256 is large enough that it should always be - // larger than the required alignment, but small enough that it is - // always in the first page of memory and therefore not a legitimate - // address. - return ptr as *const usize as usize <= 256; - } - - /// Call `size_of_op` on `ptr`, first checking that the allocation isn't - /// empty, because some types (such as `Vec`) utilize empty allocations. - pub unsafe fn malloc_size_of(&self, ptr: *const T) -> usize { - if MallocSizeOfOps::is_empty(ptr) { - 0 - } else { - (self.size_of_op)(ptr as *const c_void) - } - } - - /// Is an `enclosing_size_of_op` available? - pub fn has_malloc_enclosing_size_of(&self) -> bool { - self.enclosing_size_of_op.is_some() - } - - /// Call `enclosing_size_of_op`, which must be available, on `ptr`, which - /// must not be empty. - pub unsafe fn malloc_enclosing_size_of(&self, ptr: *const T) -> usize { - assert!(!MallocSizeOfOps::is_empty(ptr)); - (self.enclosing_size_of_op.unwrap())(ptr as *const c_void) - } - - /// Call `have_seen_ptr_op` on `ptr`. - pub fn have_seen_ptr(&mut self, ptr: *const T) -> bool { - let have_seen_ptr_op = self - .have_seen_ptr_op - .as_mut() - .expect("missing have_seen_ptr_op"); - have_seen_ptr_op(ptr as *const c_void) - } + pub fn new( + size_of: VoidPtrToSizeFn, + malloc_enclosing_size_of: Option, + have_seen_ptr: Option>, + ) -> Self { + MallocSizeOfOps { + size_of_op: size_of, + enclosing_size_of_op: malloc_enclosing_size_of, + have_seen_ptr_op: have_seen_ptr, + } + } + + /// Check if an allocation is empty. This relies on knowledge of how Rust + /// handles empty allocations, which may change in the future. + fn is_empty(ptr: *const T) -> bool { + // The correct condition is this: + // `ptr as usize <= ::std::mem::align_of::()` + // But we can't call align_of() on a ?Sized T. So we approximate it + // with the following. 256 is large enough that it should always be + // larger than the required alignment, but small enough that it is + // always in the first page of memory and therefore not a legitimate + // address. + return ptr as *const usize as usize <= 256; + } + + /// Call `size_of_op` on `ptr`, first checking that the allocation isn't + /// empty, because some types (such as `Vec`) utilize empty allocations. + pub unsafe fn malloc_size_of(&self, ptr: *const T) -> usize { + if MallocSizeOfOps::is_empty(ptr) { + 0 + } else { + (self.size_of_op)(ptr as *const c_void) + } + } + + /// Is an `enclosing_size_of_op` available? + pub fn has_malloc_enclosing_size_of(&self) -> bool { + self.enclosing_size_of_op.is_some() + } + + /// Call `enclosing_size_of_op`, which must be available, on `ptr`, which + /// must not be empty. + pub unsafe fn malloc_enclosing_size_of(&self, ptr: *const T) -> usize { + assert!(!MallocSizeOfOps::is_empty(ptr)); + (self.enclosing_size_of_op.unwrap())(ptr as *const c_void) + } + + /// Call `have_seen_ptr_op` on `ptr`. + pub fn have_seen_ptr(&mut self, ptr: *const T) -> bool { + let have_seen_ptr_op = self.have_seen_ptr_op.as_mut().expect("missing have_seen_ptr_op"); + have_seen_ptr_op(ptr as *const c_void) + } } /// Trait for measuring the "deep" heap usage of a data structure. This is the /// most commonly-used of the traits. pub trait MallocSizeOf { - /// Measure the heap usage of all descendant heap-allocated structures, but - /// not the space taken up by the value itself. - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize; + /// Measure the heap usage of all descendant heap-allocated structures, but + /// not the space taken up by the value itself. + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize; } /// Trait for measuring the "shallow" heap usage of a container. pub trait MallocShallowSizeOf { - /// Measure the heap usage of immediate heap-allocated descendant - /// structures, but not the space taken up by the value itself. Anything - /// beyond the immediate descendants must be measured separately, using - /// iteration. - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; + /// Measure the heap usage of immediate heap-allocated descendant + /// structures, but not the space taken up by the value itself. Anything + /// beyond the immediate descendants must be measured separately, using + /// iteration. + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; } /// Like `MallocSizeOf`, but with a different name so it cannot be used /// accidentally with derive(MallocSizeOf). For use with types like `Rc` and /// `Arc` when appropriate (e.g. when measuring a "primary" reference). pub trait MallocUnconditionalSizeOf { - /// Measure the heap usage of all heap-allocated descendant structures, but - /// not the space taken up by the value itself. - fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; + /// Measure the heap usage of all heap-allocated descendant structures, but + /// not the space taken up by the value itself. + fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; } /// `MallocUnconditionalSizeOf` combined with `MallocShallowSizeOf`. pub trait MallocUnconditionalShallowSizeOf { - /// `unconditional_size_of` combined with `shallow_size_of`. - fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; + /// `unconditional_size_of` combined with `shallow_size_of`. + fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; } /// Like `MallocSizeOf`, but only measures if the value hasn't already been /// measured. For use with types like `Rc` and `Arc` when appropriate (e.g. /// when there is no "primary" reference). pub trait MallocConditionalSizeOf { - /// Measure the heap usage of all heap-allocated descendant structures, but - /// not the space taken up by the value itself, and only if that heap usage - /// hasn't already been measured. - fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; + /// Measure the heap usage of all heap-allocated descendant structures, but + /// not the space taken up by the value itself, and only if that heap usage + /// hasn't already been measured. + fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; } /// `MallocConditionalSizeOf` combined with `MallocShallowSizeOf`. pub trait MallocConditionalShallowSizeOf { - /// `conditional_size_of` combined with `shallow_size_of`. - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; + /// `conditional_size_of` combined with `shallow_size_of`. + fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; } -#[cfg(not(any( - all( - target_os = "macos", - not(feature = "jemalloc-global"), - ), - feature = "estimate-heapsize" -)))] +#[cfg(not(any(all(target_os = "macos", not(feature = "jemalloc-global"),), feature = "estimate-heapsize")))] pub mod inner_allocator_use { -use super::*; + use super::*; -#[cfg(not(feature = "std"))] -use alloc::string::String; + #[cfg(not(feature = "std"))] + use alloc::string::String; -impl MallocShallowSizeOf for Box { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(&**self) } - } -} - -impl MallocShallowSizeOf for Vec { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(self.as_ptr()) } - } -} + impl MallocShallowSizeOf for Box { + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + unsafe { ops.malloc_size_of(&**self) } + } + } -// currently this seems only fine with jemalloc -#[cfg(feature = "std")] -#[cfg(all(feature = "jemalloc-global", not(target_os = "windows")))] -impl MallocUnconditionalShallowSizeOf for Arc { - fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(arc_ptr(self)) } - } -} + impl MallocShallowSizeOf for Vec { + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + unsafe { ops.malloc_size_of(self.as_ptr()) } + } + } -#[cfg(feature = "std")] -#[cfg(not(all(feature = "jemalloc-global", not(target_os = "windows"))))] -impl MallocUnconditionalShallowSizeOf for Arc { - fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - size_of::() - } -} + // currently this seems only fine with jemalloc + #[cfg(feature = "std")] + #[cfg(all(feature = "jemalloc-global", not(target_os = "windows")))] + impl MallocUnconditionalShallowSizeOf for Arc { + fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + unsafe { ops.malloc_size_of(arc_ptr(self)) } + } + } -impl MallocSizeOf for String { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(self.as_ptr()) } - } -} + #[cfg(feature = "std")] + #[cfg(not(all(feature = "jemalloc-global", not(target_os = "windows"))))] + impl MallocUnconditionalShallowSizeOf for Arc { + fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { + size_of::() + } + } + impl MallocSizeOf for String { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + unsafe { ops.malloc_size_of(self.as_ptr()) } + } + } } impl<'a, T: ?Sized> MallocSizeOf for &'a T { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - // Zero makes sense for a non-owning reference. - 0 - } + fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { + // Zero makes sense for a non-owning reference. + 0 + } } impl MallocSizeOf for Box { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.shallow_size_of(ops) + (**self).size_of(ops) - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.shallow_size_of(ops) + (**self).size_of(ops) + } } impl MallocSizeOf for () { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - 0 - } + fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { + 0 + } } impl MallocSizeOf for (T1, T2) where - T1: MallocSizeOf, - T2: MallocSizeOf, + T1: MallocSizeOf, + T2: MallocSizeOf, { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.0.size_of(ops) + self.1.size_of(ops) - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.0.size_of(ops) + self.1.size_of(ops) + } } impl MallocSizeOf for (T1, T2, T3) where - T1: MallocSizeOf, - T2: MallocSizeOf, - T3: MallocSizeOf, + T1: MallocSizeOf, + T2: MallocSizeOf, + T3: MallocSizeOf, { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.0.size_of(ops) + self.1.size_of(ops) + self.2.size_of(ops) - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.0.size_of(ops) + self.1.size_of(ops) + self.2.size_of(ops) + } } impl MallocSizeOf for (T1, T2, T3, T4) where - T1: MallocSizeOf, - T2: MallocSizeOf, - T3: MallocSizeOf, - T4: MallocSizeOf, + T1: MallocSizeOf, + T2: MallocSizeOf, + T3: MallocSizeOf, + T4: MallocSizeOf, { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.0.size_of(ops) + self.1.size_of(ops) + self.2.size_of(ops) + self.3.size_of(ops) - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.0.size_of(ops) + self.1.size_of(ops) + self.2.size_of(ops) + self.3.size_of(ops) + } } impl MallocSizeOf for Option { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if let Some(val) = self.as_ref() { - val.size_of(ops) - } else { - 0 - } - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if let Some(val) = self.as_ref() { + val.size_of(ops) + } else { + 0 + } + } } impl MallocSizeOf for Result { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - match *self { - Ok(ref x) => x.size_of(ops), - Err(ref e) => e.size_of(ops), - } - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + match *self { + Ok(ref x) => x.size_of(ops), + Err(ref e) => e.size_of(ops), + } + } } impl MallocSizeOf for rstd::cell::Cell { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.get().size_of(ops) - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.get().size_of(ops) + } } impl MallocSizeOf for rstd::cell::RefCell { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.borrow().size_of(ops) - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.borrow().size_of(ops) + } } #[cfg(feature = "std")] impl<'a, B: ?Sized + ToOwned> MallocSizeOf for std::borrow::Cow<'a, B> where - B::Owned: MallocSizeOf, + B::Owned: MallocSizeOf, { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - match *self { - std::borrow::Cow::Borrowed(_) => 0, - std::borrow::Cow::Owned(ref b) => b.size_of(ops), - } - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + match *self { + std::borrow::Cow::Borrowed(_) => 0, + std::borrow::Cow::Owned(ref b) => b.size_of(ops), + } + } } impl MallocSizeOf for [T] { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = 0; - for elem in self.iter() { - n += elem.size_of(ops); - } - n - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = 0; + for elem in self.iter() { + n += elem.size_of(ops); + } + n + } } impl MallocSizeOf for Vec { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - for elem in self.iter() { - n += elem.size_of(ops); - } - n - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + for elem in self.iter() { + n += elem.size_of(ops); + } + n + } } impl MallocShallowSizeOf for rstd::collections::VecDeque { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.has_malloc_enclosing_size_of() { - if let Some(front) = self.front() { - // The front element is an interior pointer. - unsafe { ops.malloc_enclosing_size_of(&*front) } - } else { - // This assumes that no memory is allocated when the VecDeque is empty. - 0 - } - } else { - // An estimate. - self.capacity() * size_of::() - } - } + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.has_malloc_enclosing_size_of() { + if let Some(front) = self.front() { + // The front element is an interior pointer. + unsafe { ops.malloc_enclosing_size_of(&*front) } + } else { + // This assumes that no memory is allocated when the VecDeque is empty. + 0 + } + } else { + // An estimate. + self.capacity() * size_of::() + } + } } impl MallocSizeOf for rstd::collections::VecDeque { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - for elem in self.iter() { - n += elem.size_of(ops); - } - n - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + for elem in self.iter() { + n += elem.size_of(ops); + } + n + } } #[cfg(feature = "std")] impl MallocShallowSizeOf for std::collections::HashSet where - T: Eq + Hash, - S: BuildHasher, + T: Eq + Hash, + S: BuildHasher, { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.has_malloc_enclosing_size_of() { - // The first value from the iterator gives us an interior pointer. - // `ops.malloc_enclosing_size_of()` then gives us the storage size. - // This assumes that the `HashSet`'s contents (values and hashes) - // are all stored in a single contiguous heap allocation. - self.iter() - .next() - .map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) }) - } else { - // An estimate. - self.capacity() * (size_of::() + size_of::()) - } - } + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.has_malloc_enclosing_size_of() { + // The first value from the iterator gives us an interior pointer. + // `ops.malloc_enclosing_size_of()` then gives us the storage size. + // This assumes that the `HashSet`'s contents (values and hashes) + // are all stored in a single contiguous heap allocation. + self.iter().next().map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) }) + } else { + // An estimate. + self.capacity() * (size_of::() + size_of::()) + } + } } #[cfg(feature = "std")] impl MallocSizeOf for std::collections::HashSet where - T: Eq + Hash + MallocSizeOf, - S: BuildHasher, + T: Eq + Hash + MallocSizeOf, + S: BuildHasher, { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - for t in self.iter() { - n += t.size_of(ops); - } - n - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + for t in self.iter() { + n += t.size_of(ops); + } + n + } } #[cfg(feature = "std")] impl MallocShallowSizeOf for std::collections::HashMap where - K: Eq + Hash, - S: BuildHasher, + K: Eq + Hash, + S: BuildHasher, { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - // See the implementation for std::collections::HashSet for details. - if ops.has_malloc_enclosing_size_of() { - self.values() - .next() - .map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) - } else { - self.capacity() * (size_of::() + size_of::() + size_of::()) - } - } + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + // See the implementation for std::collections::HashSet for details. + if ops.has_malloc_enclosing_size_of() { + self.values().next().map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) + } else { + self.capacity() * (size_of::() + size_of::() + size_of::()) + } + } } #[cfg(feature = "std")] impl MallocSizeOf for std::collections::HashMap where - K: Eq + Hash + MallocSizeOf, - V: MallocSizeOf, - S: BuildHasher, + K: Eq + Hash + MallocSizeOf, + V: MallocSizeOf, + S: BuildHasher, { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - for (k, v) in self.iter() { - n += k.size_of(ops); - n += v.size_of(ops); - } - n - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + for (k, v) in self.iter() { + n += k.size_of(ops); + n += v.size_of(ops); + } + n + } } impl MallocShallowSizeOf for rstd::collections::BTreeMap where - K: Eq + Hash, + K: Eq + Hash, { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.has_malloc_enclosing_size_of() { - self.values() - .next() - .map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) - } else { - self.len() * (size_of::() + size_of::() + size_of::()) - } - } + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.has_malloc_enclosing_size_of() { + self.values().next().map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) + } else { + self.len() * (size_of::() + size_of::() + size_of::()) + } + } } impl MallocSizeOf for rstd::collections::BTreeMap where - K: Eq + Hash + MallocSizeOf, - V: MallocSizeOf, + K: Eq + Hash + MallocSizeOf, + V: MallocSizeOf, { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - for (k, v) in self.iter() { - n += k.size_of(ops); - n += v.size_of(ops); - } - n - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + for (k, v) in self.iter() { + n += k.size_of(ops); + n += v.size_of(ops); + } + n + } } // PhantomData is always 0. impl MallocSizeOf for rstd::marker::PhantomData { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - 0 - } + fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { + 0 + } } // XXX: we don't want MallocSizeOf to be defined for Rc and Arc. If negative @@ -518,37 +500,37 @@ impl MallocSizeOf for rstd::marker::PhantomData { //impl !MallocShallowSizeOf for Arc { } #[cfg(feature = "std")] -fn arc_ptr(s: &Arc) -> * const T { - &(**s) as *const T +fn arc_ptr(s: &Arc) -> *const T { + &(**s) as *const T } #[cfg(feature = "std")] impl MallocUnconditionalSizeOf for Arc { - fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.unconditional_shallow_size_of(ops) + (**self).size_of(ops) - } + fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.unconditional_shallow_size_of(ops) + (**self).size_of(ops) + } } #[cfg(feature = "std")] impl MallocConditionalShallowSizeOf for Arc { - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_shallow_size_of(ops) - } - } + fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.have_seen_ptr(arc_ptr(self)) { + 0 + } else { + self.unconditional_shallow_size_of(ops) + } + } } #[cfg(feature = "std")] impl MallocConditionalSizeOf for Arc { - fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_size_of(ops) - } - } + fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.have_seen_ptr(arc_ptr(self)) { + 0 + } else { + self.unconditional_size_of(ops) + } + } } /// If a mutex is stored directly as a member of a data type that is being measured, @@ -559,9 +541,9 @@ impl MallocConditionalSizeOf for Arc { /// contents. #[cfg(feature = "std")] impl MallocSizeOf for std::sync::Mutex { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - (*self.lock().unwrap()).size_of(ops) - } + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + (*self.lock().unwrap()).size_of(ops) + } } #[macro_export] @@ -607,15 +589,15 @@ malloc_size_of_is_0!(Range, Range); pub struct Measurable(pub T); impl Deref for Measurable { - type Target = T; + type Target = T; - fn deref(&self) -> &T { - &self.0 - } + fn deref(&self) -> &T { + &self.0 + } } impl DerefMut for Measurable { - fn deref_mut(&mut self) -> &mut T { - &mut self.0 - } + fn deref_mut(&mut self) -> &mut T { + &mut self.0 + } } diff --git a/parity-util-mem/src/sizeof.rs b/parity-util-mem/src/sizeof.rs index fb917b1b6..ef63e1000 100644 --- a/parity-util-mem/src/sizeof.rs +++ b/parity-util-mem/src/sizeof.rs @@ -17,23 +17,17 @@ //! Estimation for heapsize calculation. Usable to replace call to allocator method (for some //! allocators or simply because we just need a deterministic cunsumption measurement). - -use crate::malloc_size::{ - MallocSizeOf, - MallocShallowSizeOf, - MallocUnconditionalShallowSizeOf, - MallocSizeOfOps -}; +use crate::malloc_size::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps, MallocUnconditionalShallowSizeOf}; #[cfg(not(feature = "std"))] use alloc::boxed::Box; #[cfg(not(feature = "std"))] -use alloc::vec::Vec; -#[cfg(not(feature = "std"))] use alloc::string::String; #[cfg(not(feature = "std"))] -use core::mem::{size_of, size_of_val}; -#[cfg(not(feature = "std"))] use alloc::sync::Arc; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +#[cfg(not(feature = "std"))] +use core::mem::{size_of, size_of_val}; #[cfg(feature = "std")] use std::mem::{size_of, size_of_val}; diff --git a/plain_hasher/CHANGELOG.md b/plain_hasher/CHANGELOG.md new file mode 100644 index 000000000..a4bd19586 --- /dev/null +++ b/plain_hasher/CHANGELOG.md @@ -0,0 +1,12 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.2.2] - 2019-10-24 +- Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/213) +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/plain_hasher/benches/bench.rs b/plain_hasher/benches/bench.rs index d5701ef87..4ba53bb1a 100644 --- a/plain_hasher/benches/bench.rs +++ b/plain_hasher/benches/bench.rs @@ -21,20 +21,24 @@ use criterion::{criterion_group, criterion_main, Criterion}; use plain_hasher::PlainHasher; fn bench_write_hasher(c: &mut Criterion) { - c.bench_function("write_plain_hasher", |b| b.iter(|| { - (0..100u8).fold(PlainHasher::default(), |mut old, new| { - let bb = [new; 32]; - old.write(&bb); - old - }); - })); - c.bench_function("write_default_hasher", |b| b.iter(|| { - (0..100u8).fold(DefaultHasher::default(), |mut old, new| { - let bb = [new; 32]; - old.write(&bb); - old - }); - })); + c.bench_function("write_plain_hasher", |b| { + b.iter(|| { + (0..100u8).fold(PlainHasher::default(), |mut old, new| { + let bb = [new; 32]; + old.write(&bb); + old + }); + }) + }); + c.bench_function("write_default_hasher", |b| { + b.iter(|| { + (0..100u8).fold(DefaultHasher::default(), |mut old, new| { + let bb = [new; 32]; + old.write(&bb); + old + }); + }) + }); } criterion_group!(benches, bench_write_hasher); diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md new file mode 100644 index 000000000..ed6699fde --- /dev/null +++ b/primitive-types/CHANGELOG.md @@ -0,0 +1,11 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.6.1] - 2019-10-24 +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/primitive-types/impls/codec/CHANGELOG.md b/primitive-types/impls/codec/CHANGELOG.md new file mode 100644 index 000000000..927c9dc9c --- /dev/null +++ b/primitive-types/impls/codec/CHANGELOG.md @@ -0,0 +1,7 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] diff --git a/primitive-types/impls/codec/src/lib.rs b/primitive-types/impls/codec/src/lib.rs index 961081793..ea478de98 100644 --- a/primitive-types/impls/codec/src/lib.rs +++ b/primitive-types/impls/codec/src/lib.rs @@ -28,14 +28,11 @@ macro_rules! impl_uint_codec { impl $crate::codec::EncodeLike for $name {} impl $crate::codec::Decode for $name { - fn decode(input: &mut I) - -> core::result::Result - { - <[u8; $len * 8] as $crate::codec::Decode>::decode(input) - .map(|b| $name::from_little_endian(&b)) + fn decode(input: &mut I) -> core::result::Result { + <[u8; $len * 8] as $crate::codec::Decode>::decode(input).map(|b| $name::from_little_endian(&b)) } } - } + }; } /// Add Parity Codec serialization support to a fixed-sized hash type created by `construct_fixed_hash!`. @@ -51,11 +48,9 @@ macro_rules! impl_fixed_hash_codec { impl $crate::codec::EncodeLike for $name {} impl $crate::codec::Decode for $name { - fn decode(input: &mut I) - -> core::result::Result - { + fn decode(input: &mut I) -> core::result::Result { <[u8; $len] as $crate::codec::Decode>::decode(input).map($name) } } - } + }; } diff --git a/primitive-types/impls/rlp/CHANGELOG.md b/primitive-types/impls/rlp/CHANGELOG.md new file mode 100644 index 000000000..927c9dc9c --- /dev/null +++ b/primitive-types/impls/rlp/CHANGELOG.md @@ -0,0 +1,7 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] diff --git a/primitive-types/impls/rlp/src/lib.rs b/primitive-types/impls/rlp/src/lib.rs index dffbfe96b..da82d0d79 100644 --- a/primitive-types/impls/rlp/src/lib.rs +++ b/primitive-types/impls/rlp/src/lib.rs @@ -42,7 +42,7 @@ macro_rules! impl_uint_rlp { }) } } - } + }; } /// Add RLP serialization support to a fixed-sized hash type created by `construct_fixed_hash!`. @@ -68,5 +68,5 @@ macro_rules! impl_fixed_hash_rlp { }) } } - } + }; } diff --git a/primitive-types/impls/serde/CHANGELOG.md b/primitive-types/impls/serde/CHANGELOG.md new file mode 100644 index 000000000..a63cfb2f1 --- /dev/null +++ b/primitive-types/impls/serde/CHANGELOG.md @@ -0,0 +1,11 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.2.3] - 2019-10-29 +### Fixed +- Fixed a bug in empty slice serialization (https://github.com/paritytech/parity-common/pull/253) diff --git a/primitive-types/impls/serde/benches/impl_serde.rs b/primitive-types/impls/serde/benches/impl_serde.rs index 5e8935710..d19a97fda 100644 --- a/primitive-types/impls/serde/benches/impl_serde.rs +++ b/primitive-types/impls/serde/benches/impl_serde.rs @@ -12,11 +12,11 @@ //! cargo bench //! ``` -use criterion::{black_box, criterion_main, criterion_group, Criterion, ParameterizedBenchmark}; +use criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark}; use serde_derive::{Deserialize, Serialize}; // TODO(niklasad1): use `uint::construct_uint` when a new version of `uint` is released -use uint::*; use impl_serde::impl_uint_serde; +use uint::*; mod input; @@ -29,13 +29,7 @@ impl_uint_serde!(U256, 4); #[derive(Debug, Deserialize, Serialize)] struct Bytes(#[serde(with = "impl_serde::serialize")] Vec); -criterion_group!( - impl_serde, - u256_to_hex, - hex_to_u256, - bytes_to_hex, - hex_to_bytes, -); +criterion_group!(impl_serde, u256_to_hex, hex_to_u256, bytes_to_hex, hex_to_bytes,); criterion_main!(impl_serde); fn u256_to_hex(c: &mut Criterion) { @@ -43,11 +37,7 @@ fn u256_to_hex(c: &mut Criterion) { "u256_to_hex", ParameterizedBenchmark::new( "", - |b, x| { - b.iter(|| { - black_box(serde_json::to_string(&x)) - }) - }, + |b, x| b.iter(|| black_box(serde_json::to_string(&x))), vec![ U256::from(0), U256::from(100), @@ -71,15 +61,7 @@ fn hex_to_u256(c: &mut Criterion) { c.bench( "hex_to_u256", - ParameterizedBenchmark::new( - "", - |b, x| { - b.iter(|| { - black_box(serde_json::from_str::(&x)) - }) - }, - parameters - ) + ParameterizedBenchmark::new("", |b, x| b.iter(|| black_box(serde_json::from_str::(&x))), parameters), ); } @@ -95,15 +77,7 @@ fn bytes_to_hex(c: &mut Criterion) { c.bench( "bytes to hex", - ParameterizedBenchmark::new( - "", - |b, x| { - b.iter(|| { - black_box(serde_json::to_string(&x)) - }) - }, - parameters - ) + ParameterizedBenchmark::new("", |b, x| b.iter(|| black_box(serde_json::to_string(&x))), parameters), ); } @@ -119,14 +93,6 @@ fn hex_to_bytes(c: &mut Criterion) { c.bench( "hex to bytes", - ParameterizedBenchmark::new( - "", - |b, x| { - b.iter(|| { - black_box(serde_json::from_str::(&x)) - }) - }, - parameters - ) + ParameterizedBenchmark::new("", |b, x| b.iter(|| black_box(serde_json::from_str::(&x))), parameters), ); } diff --git a/primitive-types/impls/serde/benches/input.rs b/primitive-types/impls/serde/benches/input.rs index 060b430d8..00d5efdc5 100644 --- a/primitive-types/impls/serde/benches/input.rs +++ b/primitive-types/impls/serde/benches/input.rs @@ -14,5 +14,4 @@ pub const HEX_4096_CHARS: &str = "\"0x40966ce6355f1dbbfef6ea6f1e131dbc6425bccdf6 pub const HEX_16384_CHARS: &str = "\"0x163845494c2b0c18961062bab4aaca8139fc0d25421880bb44321d6632cf7f4c4504f62524f7a86ae4475cf99b5b4cda8737a27f14ddae73a8c7815a60603e6f732dad7b42a94ef4cc9674b7c73c93b6baace28b61879755ad8261917798e9b158219efacd1c25689e1d1c085cf314808e5c3c1e92ea386a1e0c8a5fb1ae88e2156052ecde6aad91a78cba4cda9632fa7c41d64845d48a51a1ec922567e530184cd9fab6d8c1b19612b350ebacd2daf44dc32a969496f0e0cfea47362fbc387bc8cfa7f8a9efd797bed8d1a317ca217226aaecb0f58cc29cc4c2f4d3c91f6367b2db2c4ffc44243486bd6e5ceae007bfd6d1bc44f784af00df58f88b1a999911c256038e04a56360655b2ba92841fac013ef8cf902720c23991292b6e540f95a7ac9b7d0dfd92dd7ab7e757a80cb170acf0466812f141bad1b8047b10ed98f51fe1a28851b8019a73721c0b29815b6642587a77d2711fb93238a6005ca3613a049546db4b6626ffcca2c8352b02a4bcd514e76bd2c9e4f0a6ac557deca69ccfa41caf16d276e06b272d6bf2c680828dead23c3b6d3111201f2ca98afd36ae03da4b8a620e48cb8ae05ecf8d37d21840052ff426543a141b7f25e27a7cf94240945b0fa69e9e4e566e2255f677789cb26fc129b7021221af3588711cf9402a750d89e4f9288eacadc2bb64a6782b9c3cefd3faa57c3796ab150a9126d9959853bf93039dbcf8a0965dec9e295d5fa7e7f8d63f115e14d9e9eb531eacbcdd392cfa8c6eb499f55a6660b1bd9c2d40ebcf31581a720b0b2576ef202f739ac809a7ba7c7fe87817b6e76f02f8613111621d615aa57c9fb914cc8587a6e7f1e91a82381174e8ab011d50e86b826cd945c41cfe3608b5785c6c496b2f6e4ec9f25059bda78a3af9899426076b07039e5072b464b94ec5b5544ab15151ee583a3645819caf2df574d81079cd7ada1baf296db81e5c046f4385aad4554903cd107c0f78759c5dfd327a022a3a986de84a96e03858c3cbd476e4fe25645c437dd4986c31cb7fe6fe82d80f83887de253a6601160ccf4ef82d5e556c5fccb983bd8da0556983520b866768505e73a5eb9ffefea262dfa6218ddb143eff7f9a9b2715c526d3bcc4f1d1dfdfcea8ab68a005d0192ccdf68d2da67cbb156ada92df641a5d9f18af4b76485aa84565e9deb41c6b29ddef705c53801dcf6540dd2c2ceaa7da6bade03ce40e1e44fafe7edab9fe2a89ef77c25a4efcfc456f472a6502020a196beb2c17c25977ef7c328a19c3636ef52ee8afa78f5d31002bda28f33156a3520b4e40a6740fca6bcc003b2fddfc33e7bff9c443350d4e2e571a0f7b465142c905c1d98c5ecdf887b494f1956a1f83d5952b4618748852937b900edaae2e9b8445eec1346a7b95b5ca36bafb9c45bdb7a895f1bf4852ec217886b7d67bfdf2711f73b56397c3b5f7138fbc91434d3ec3a1d8ca584d32691b82fc0e59f8ccc0ea9aef5ab92eb2f93fec2122142ba912832bb22a1fa594fe4b14de7f3a429bb6325fa6db1d1744f878fa2ccc630c02bf4a4a6c8a8fa42e22771dc2cd252f14c2cc6f0fc3a6fdd8c7ffd62adc26efee754b0fdfff4678eaf6dc2d8e59778b9cf1b691bedc4395b0680353be17acdc18d33101a8a2e6bcfe8878151b4480a677d47a536b41e974362d5d7e9dde2e6122b22d6f056b4e1aadbc6c776ce7c2d6248c66a3cecd2587e9595548661b86dd6bf356b2d28d1cee23813580b25cc863ef8da98cb8c59b120fb6c371474cde9bf971ed5366d51419da90edfec0b56127cb7d7d51d836cae2b1f83a3563f216bdc5bc69161d91af42441b22e76e80f91a51e8635296ed4e9ae26b627f298f973066eed623374d062ad3a67be902ac50a96634437b56db882a0f24d058213187c740260eff87ef41b5e987fd18c96acde50908eec87efd3e0ae75da18113f86f04b4dbbc197e6faee36121f2f03ddc995e07eab68a099931aa288ebc1c0889ffcd793a9d780b736b85d036bee14b5874c669e425540c7d2702e0c833a5ba8ff9b17b7f44b0612dcdd1321d8dc674a56c66abed9cf920c2b5c8eed06fadd784ebd0335e89c7916148b0f28e8a54e6881cdfc2a655246179a23ee328e436bc1890c821beefc4af0ca86923404ee9ba01cf42ceda8801e9a6507e4bca2528953dd9ddd6b4564ab7d18b9d40bb2f29e5f9a38bec25aeaf89f432ff295e496e2cf76ade09b5c9f65dc5807e1c1ac104d6f8593e9957504fea1cddd5add90506c277f0a2939955306d9fd9f5b83ca7c0a7e538de323aac21fddc9f181024c60c7ac17e593704345fd1d51bf336e96afc23fb7704f0fd78c17e5ea09633a1a4e81eb09f2c7cd8366181f8437aac37dcdd23136d38e311da09f3132a6caf29b6a2428c103d6a0de54e4f74deeb4da8142aa405653c634bc5fb288a62a310949aa03f8474b79cc4d578b14fa9ef9f970e28e747cd89d0f3dd0850f1a34056e70596e1ba5e5a3525d0fda0c1e1e7a40b0e65df62532f2c43c067126c050037c192d31808ca7deaa42eaeff832c9301751a949906d7ff030fcccc510fc3790c6984976bb8580169f0de5db4b21ca214916059fba0ce6e255d6624eda87adcd5424d24e5506d0100ee0aa8d0baf89c2535358a7c244496de98f6533f8c6a4c35890217876c4254dd3b98b707694b8b42c6705bedea7b125a5cee9eb280826b6a722eaf18e4180c5f7b30211d1e983a5055714defbbd2f225b1cc2219201dc1783aa8ef36d8bf5ee33ec0326a127d9a7fb7b50fda765e3a11092104d45e64e07ae4ce215bf5cb27ea333d1a37db8a105041805e1e2eba47135aff7abc33175aa373a1d3621756c82c186d62df1586d62281b93795adf3c3ae233ee701bae359589502b6b4b2fa6672cd2b4f3d064118a53e2beac5379d05a623cc95537fb14fa583bf0f03366a9651cd91e81f20aaf36098cbc0bc8c52c5073196ae40828e01a266e106c3da91ae60bb9571a610622715b1e5be5e0954bd948e441b1bca32823900324c4b8ad21ee701c4c0c6e4b3e9fb71683b9bf1a6e4cc2cb3a21bcfc5153f5835a309722e4567d7e66eebad6abd0d2e721e8513fc7f266c9d8230cced279e5f6615b25648e6ee5d71b9836fdb0cd27e99d9884f288cb3d64d7f6aba59ed5ca44b645c489a497363fe48e965a03c738771e2f3f180278ceeecb60d69c3391d445c0c2c2af8d9e72fe2973961a5c75cbcd1dd75ee3417900e70ebb4c6f625e5cce11e4a1e64644aa524156f5cc0966a1a07e0c82cca92a54f7549136f2858c66f5add0c90735ec9e8e8b89d99730242437cb565a79d3c850a43d50f4af8791bb68284e395e4d97ed183ee555fb81f871f6735dc5da96f8e9954335695f2f637fcb939250bd869374d5929ba298d8873c8f9687f22d4bd45184d96a0e26d178a1e413364d62e49f8381db668959f3b8676b8e5e08cb363f3a51b91d27738efd10fb897b1918895b75585174a272934ce2a8eca9d13019ea4b9abd86573fbebb45443ec9302a65d3594f6da39ded12ba4aad681cdc4e504ed7dc69d37a7f27836ee24bd1471884acc6e8194cb7d66e099e455ea72c211620f723ba7089be12fbcb93bdb9751d7718d2c63972afb3be135d83f413c1e1697fa3ee4a20177933973870b40e5bdb73ea8401e4846238306b904fd028dd7706c26909f96f1087fcbbe87aa053befab4b6ec8e07486dea54a4bc8c3c139b3d010fb6aed3563f2174f6eb46f8eea68109738ad14f381f519b39b45a50cfbc24fe84eef9ef99241ef53474561831faa89499dca951f662dadef973837ff6f72cd442563b801df3a5114aac4dd85403b0600698b09096d5c90daef2b1b84d7a1e9ccba1265d8893de06b1e8c642f95eb8dd26877fc21fb1c828aa8eff954553addbaf88f266b25b5ce2472c46a38eee97a52296bb6b66dde1823960da350e69f7672f8689b281266413f0932042b4fcf0b8a546292e492e45e2d94f8e4f0115130a9a464e2236b954875aecf6fe0925ecc1a010f490292bacb1f2bcb1790845c7dad561bed04a403340d1efe1cdb4d83e00bc688d5877b7b2ab52bb16d11d03bb1e1190e9531f8a7380aaf2447f1f9f6bf1d389b05c897705f52bf215d1e421983d2bcaadd2064908f84d344d66ea470580248c704417aa2488736cbd72a7c0a436fb7b9e6f8aa76f8c9dd780d222a4652674f624fdf9a4b375c9568db0369f37d0a636568980ef3a411b49720096ba31b3b5604fb8437e007b092faae0eeaea1549745c629a92fda7195c5a28ed014d3452b472851638a0d042ad5eeb447ceff57204ba469dc870c2c31f7e2f35319666ca6c296383360fd56df57bab17ea26c438ed0eac012a4bbbb4f7fc0b7a0126243b676f89e4108c88e0dde8799006a68540c294d244b7de1eae3217e0cc6b5769800db8aea252114ecb17ca31ee3520281a5aa6885fcacb5c33ac32a68dd2555d527e611cab46e54cb514ef43a6c0b7b087246efb89fc7afb1336970479a64cfafca89d09565b03b1b260ae09a950d5828e14fc6439576a60c3f348ade4857a151b024f32a1f67318c67dbe9532a4767882331ceb6ad429bcceae5eaa93867244b2ee38515099b674a90643625b6ef54f3ad687476ef973d1fb7f92e00fcc3f572b6b913de830dcf0af8b1567758e1fc5fe7b529783d72b656fe29bf7704976ec4aaddb3968f6b8410cb204b600ae552206c929a1d589f76de8f00fdee424a39de97a162c84c17890327a9b0ccd96cf5f2784a9053512294a4b2a88dea2dcdcd98eebb1f33a26465f531986d1515f5704eb782723a3c757ecd5c0d6cafb75d8444cbb825275754fe373c4575668cf8249c028347d4372e4cfb57a408b2e4ab268fd1d14c2d34afd8a8dfed7428bf9d5678038fa1dfc9f47ed9c974a959fcb3965c20bd8f288c5b29e9322977792d6079487dbcafef61deff4001f82c8e1e31e030ff920bd58d69d9d3d0e5b0f3c9b8477d15354329bc485dbf280f51379cf8d604736d4777e720ce6ce8df339661c3e89b10341f40935c82ea1f5e0c336d859aed5e9de49f916c409a31434fd18a17b06b157b2cf8fd4b1bd715498dd8f5c57eb7328814b58686ce09d77da69566a6562a6bbf319c2ababc45c9f8a01a4834daaaedc77698a1758638cf4083dcbf0ef3e60544824bedd389ffa4c5d75a97ee56eb1a6a88155228800295e079e4717014431d2820867e46b25287842a7083e2a6a8561c26d206612babfdc34b1df9ca4a32a4f19b0f9d98b07b9d7a0dcd82edde6b0454e063345b637750da75a239a9341558e1e3540f961f46d8efebe216293e24401801ac65f82bc1b702b38995a3c69399645ff242bca25efd4a2e85330ef9023c8d1b44d9264532e647f1190730cf24abb1dd3b0dd567d4d3c2ab707ee74dcb1aa7d0eb7488ac711d325a1eb3475526c41f74ff8f91bd0ee5507d50349496792a2461fab14cdedfc222165d0438deea93bacf5818a0af53cc2de188f28794a591b3a11289e2e0a201f54e30994aeee5b46ab5abda521777e8fb15a53472bf63dc526c8bd75d8514fba0c29da4a79e957a66ea77c37aeb7025f24b2eac7782356521827e60293d713cf6cad48cccc83bb9454dd22ba43fd23c87812e239701dff8c949d22d07209e94264b427688c9970d9bc9510a415ab58f08c86ac38d80ad0271635528757b07babb8c31cc0e1e9c99bf7a07d99d920835ce72e85ce5ed1602aacb778962e6aa93af14057efd051561f77996243b06c8da26fb46e696216620dcc9bae5abf202192d86a1b6d6c002225fceed3be9a4e2a9c611392245a6b15da0444a24f6f2a72210fa863ada30f7f27e98eedff1ac101c4f3b2d98bd09dc5947f51cc24039050ca52efcb2b5633a84bf1873733547f5b96106b74de88b88917be5faace63e906e3a6800bd41666be2c3ac79204286435fe7bf763de9ba28c966ca555e77e41ec831b1caa9cfae788a3cd7c05b7244349b22bd9424627352f8db369e2acf511907c88838a27b7fccd545623f5673f50dc794a6b7bdd4582198682a4f8bac58f5d3333d9cc205e447efd4630280c4083758e44a4a41bbf4c09deb7f48d3865b9138e51fa19a4423d1e40dbf7999e0883427b75e5fdc72e137d6d1a64938dcf1453559ce972c22a6e2f8fc4c8dca1c71cf25b6f442a0525613f6e3cc88068ca52b567dfc1e1ab19de44ec5562bbb47b4face30b46b1ec34c0370e0687a6aea111a887549c49227e9468309cf0330d68a72f7f2620eb0eac1380aaa277fbe86c516ff0c3731419cc103c535975b64114099769bef5e9fee5396a2063b11657a8968431b7831fe5161f8e006144d0a3a015367d0a9ae601cd52814efe1d8e3638a8a7c326017cef14c5bcbfddc1c96e7fa91d21a626ed1295ca149100219a6d5f7c3d1afa77fcb499bcc3223a5b32d2aafb367f467a0015380f00b1db790d0bc18beaa64cd409a0f94d7fc2fb02726ab95d4d61a4e0c940a835cc8ba7d25d466e399077c843fa033e1d600276034f0b61c31eb3a183784e112c89f2e8a64129ff5b4e09fc61d8d463d01b34557d45717db21e1d0ef85750abd487cc276c45a1f81ce06912896bf45fcc80345c27faaf0c63e303be7b44f001990ec3ab919cd8e7f78cc1cde691f4ce27a7632a983da200bc9d24e5870cb4712571e71f418bbafc40ea6555ce4da37eadf5efbeab4f6a280530b674346a5f0a0a90133e37072e5d94dc7d26642b152978b47685c7c973257045e3d0b6dd96cb5845d5be690794fc3b6f3f1867d1e646ce422e16c2da8258391314f51a91770600dd4faca5cf48ba1284c96ef164cde29e48ca17ede11561aceaf851ea2011586d27f93a00b020c9f91598cb6505e7697f2d114da19acd0d4078c2638cfc8a871fb6e5ea61eafe9b2b83f5b32c7bfbeb99e5bd598464507678089ba1965e9920e2594aead6366309ee4b2e1d597362017d1868463ef38281aff30835c255b50d34ccf87afb4bce0045b81a39ededcd77789e083052c8e03a68a507053eced7bde2b0d5fd35f3081ebbdd3df2120d16c1dc892c06b6f19cfd32e4bbfebb0a985329ee1013ee96c3817ea8514311041847fa45af7d4750e3e17b666b343b92aa5078f9f544d45ed312b6500d124d86388bb7f4c36de5439d9983cb216945e0fd389a76d7b467d4a14414ddca775f694374784ab251a35c26646c14db98d85c7d8e2b318f60d643d879d5830993636a579a333742c20ecbc6049e1c7c8bd58f5009200c26707eb7acf5a4ebadf88946d3260ed5011e4dae9b7efdebbc2251a69462805ec71bd3667e094f618124fdc68dd7e38863e3077b9e07428067d7dba602ab4fab22c691359164ba1bc9b9b0ffd1fa946cd0d28aab6bb46e424b476b836e5f8f01346a7aeb2ed7f999d4d75d206412e318423b541968c2ef2af0a275b97dac972d6c367b93deaa204339d28cef59752850b616c26be87d3d4592154164b8c0a950b9dcea06069dd2d480433b1783b2e048eef04137443c1b77d980cbc81818d97ef6f06de604691b29ae55f0d749ac65668a024423803eccd533e015537ead53d2c8fbbd11376fe3509fd3fd52aef61c45dfda36983de690278b65ff7af994465b7fc815a597f660ba443b8a5daae31b5ae35ea808fa77e3c115a7abe067e9745be0a21d23aa2e22c54459b477f1ca016fef63fb2f22e7a7a7deee5879417eb7dd0ad763246e64c8de5ebe501264d7b46f200a0734e8ba38cae4f7870777ad15e278cd0a031df412d220458aa2e4bab93424f447c6a5987d96553e1d357c9ae57461d6f7f4a8e694371673a04e1463b523998d435cdd863b7cc8971b00b4e08e7bc6bf4926c3cf4670cd613794287316536e33ec5eafb793cd05be39f5ef9a9dbdcfa26d0efb24eeba5d4befccc6c12e5a97012453c7bb42991f705cb773a6c8ae8b440b29eebd8fa770d57ed28f05e452cebd5785ac71594f5a5b8e5338d0ccfce3092dadf7b093615c7276b408305fca2edf355a08015e5db91a5a0182788b0fe007a99bd808c36d501e33ede3f6c06b82db8a52e20a66ade5f7ea162ceeedd5748266661b9afae3c115495039bb7a903cf9649a5a13d299631c8957d966c18fecef06d7e8f73ab61412fe9dcf7ec29468ecf84256e1940d0e9df74d38f75fe248692d8f012793f0989c447e0b5d9653a5f03952389393556bcd55d93e24870cfa874bc530a04678f64f674ad029d8d0a50f7467a193b0499e6b7a823199915216e428e33b9fc6f3d7db6a0e0ba5d7e2690bbf489abe48c0fb85cae65f2de835f71ff181f912bfe345518ec8f8251c47febac277bf8337d185df491dad9885076ccc9e5bdb1a69bf2c3163571bbbbacd994c051c551b4ae6074d911fa6a15dfa4429b4f23dd79ccf5ce290f5a1351b4dfaf7f12ae15b566989c00d34e255c9b05228f9ee6031fca417d71ac75996bba84438da99a9d1bbdabcb7cc0ee0c595dcb045c1b45115a8516c7eea6f7707d6bcadd18d0909f8d3956ce64a9028ff00458d2d415eb54276f63191b72ed2c023598255f962e3ab29bb5bca6636ac72502cbc1bce19237f0967395db40fc6e018301bbf3a5b9c3cc4d03c7d98aacf37d4e2ef6d8e5200598ce5a5b4725b169337318f2d036ecf4918a656d204efc2bae2db3073b4098bf7b2ddaafd87e2742063e63fe68da5b1a68655effa3e389e0387a3b658cef44e4c025d454d056d44bcfafffca0c5101213639324694db4ba7cd1fbb6adb983c714cec5b9a8c075dadea62def609f4eb500e676c2603ce356ffe2c816dd3a839c6e877f177d88559d077d3452591d25a5fa3946f2b0844a22d58027a3b0e7d0c275cab0638075ec17e8ce9e76e3789dba14c9905e21e3c0cf500bf913741785d3437167150e271a7d1b192bdafdb17d5324ace1fdd4a243614293172594ebc5765aa4efb42414adefbab07ea17b5decbc9cd8a35223ddcdf1254af01ec491f107e6302c17da3a570010b9ab49d7d803b8f533a17e32dfde8cbed1ecb2494e3ace8f19ab9eedf8e9fc2d9e600c8160024d9a53f4b2276cff3a43e6d9f53fc3212d95e90f2eb5e17a9b40be5bbb71b927577ae64ea796b5b4c7a52629065133bc179f100f9bcd61f74dcc3bc4673fa97b263af4a7ae0500856e832212a81a371055c27849cf658f765bbab2c7fdaa40cf1ff663318167b339e303832c48f7fe7b4833e8cdf75529a7815986cbd43c17f9214c89ff215564f69596061082838027157de5a4111ca21b92efa9bab7a692c4c0feac3bb7ff36d2603f4d501d09a3b1134b51b6257f164643490e4fbda1dff16bc9d20359dfd8ad59ae8a8d2d74495f0263092edbd8b9be2e5bc6ed913f2776a294025f0b1decf479376cc9662c5912102fd10705ffb8e40c92c9defc85d27e363b80f4eb90cc1fc65791e673f8e3230526e433cbaac95fc9a5a13e99180ee5dd3707f6970932bc6e0e224d94fe6b869ad86e495e235f2cbc860900b1dd031b9286208617756d0d1813e1daa66c2f3e733541820ac880058a99fa58f677eb626dfd3a8d4a504049a47c35e8ec14f1a737339cb710ce3a83cc001b84511baa9e1aae1b1df909839344dac908f4070e5f13b48e1fc1a5c045922a8f03b721d2368634890c137c1c16b837ba5c89ed8628dac3d6bb4d717eb26646a1c5f3d0d438add2b4a42a8d88ded1ddbfa6b5deed6e1a37e3b9209542de006e261a13f734c8e4aa2fa31ed167b8ddcd91227737c406b344dd44ab844f9823bba1eeaf58145c7d8a6392bb02480109f4c3dc9e86a84675dc2de6104c1bf9424a4ee13b5fef7e81062d56415a29d999c80802859d307e213c3bfc904502dbe89796f67d1e68c090bac68f2655c8c49788820ec1a9ee5299e49cd1fce1a8751fa11226bc3898417fea48d969f9b1e6b33a079be36c0f6ef08682e2901edaf9c9216d1c5778e17b70a7307c93509ba2efb6b1def5a84cad094d865d5757d55234cbdd87d45f9efe9548d54b641dc6eaae8671498452e6f5954eb1b0021d349eaf7dfc2652153a6a7dcf603a392dbe359d119342043172cff2fcaa22f41e5e4b26cc034398b1fc9e60c57b8659339b4718aac5b1d9f9d47103b4fb0fc3660e0e2044943dec1ac4f6b504127e5ae77275cf287367c324081ab7e3a4405966a372ae32a09bf978322a79d7d1ce45aa22b14eb6e66d47b8c702b80d5692e6a7d380866f61e6589a080fbe720e20d10a636feb1ebc0996fc1c0fb6e2f83ea9bab7a6ab3a19880a05061e11d3ac66bc0e10cd43bd304e956acc8bdeba60bdab0572359e26ec9dcfd77f3e5f4287f88fa77479a2053e15fbd2bc0cd8f9a8294cafc55ed4820661b3046cc2e71fc081945585b60958ea67a69d8b55cc18b39ced4e96453f00aceff758a4403d762d78c634777ae8780d11311885e41a5837511e53e2321ee46f33ea553353bd825c4c66776e1b4a99b2e649445327911e7adb6a5f3a8a402be0c6c42a8019ae0c75d6b1aceeaf35227e79685bf799f5eda354d68768c8c1b7403adbe65577be3b233afa8f8ae0740c41483698388995b84a04338e702ade301f70e0e137ff503779f85f30d2d83410f041bbb608f429de659640b9bf03b86bc4ca6bfe74d0b24aecd0021c1bc3b048d7a6aa72fa4a3116578cf910c96828e647dfab2e7b1a88ffb9fcebfe6e3f2ecb499e61feaa0a87cc8271d9ca0857c2803b5ada76dbe1c163fa9454331e3292d6582c5c399f9cb5794b43ded257ce812b96fdc4c01362d45a2c26a4a4cbc230355fcdd70ba502894c7ed7c93b07e227988deaabe0f594e581ec63f60a7102c81d96723130ab4960e641b6281b04c538f7d37bddb39949aac4646c17a1455a592bd564f3608303162c395e961e0249f6badbc74b19b883e77480361c7b26d51deb88698f91266f0fee2eb30f38b9dbdf9b34c2de5fefc0d441a7510ae4488a70faa929fc938ad1d3116ae9847a94727dd607340fb2786b12898c980f3c5a1a1ec3a140598cc5df65e8f2cee0e9847440898c28507e215e29d4032e425e3c9140bb67e9a017c089cfcdf2cfc1cf74d000e30a40acca9dcb0beccb2ac6935c152de31a2d27b63b7fa4744e19a6865ef72594e98930650d3174e58e3dca0c94dcf1ce33558f2d20eb6dc519431d823b4acbd22db19ef0ee8c99a9d3aa1799f25defe64a13661daa5b5a1d4be59a970ec87ac6533561be444d1492480067ef4b147bd535611b171a19cd6090ce73e248a9a1e34716466453278333e0493a840f89f81c27f0216ffab3cd5516a2da6dd97b7c2ba1ce83db582731fa703bf630116612562960a7c94322532abe9867ba4990138d207ad8b78f03ae49be80081b9e4489800817a4923d379f7ca03404d2ec75754ccb17e71fb6b9f507de00576f89274cc3ef0409ae012bf1f5018e17590db2542480a29f4e593913e1cb63b32787789751d1802b0cf6b2c5e9fa44223601e2b3fcb491ccea517bacf5236aa9ba871aef52af37c6963c39fd622dfa386e6ae0c36548395104a53b70\""; /// Hexadecimal string 65536 chars (32768 bytes) -pub const HEX_65536_CHARS: &str = "\"0x65536b37b273ac2f126b11185ef91c1cff07c9b5081fae332ef45c4e90dc0bad4868ee6713acc2eac2d7071fa5885f8b2a9988a07e01bc7de1ad0cb0f06a3467905a2e8723987bbc96c86ac4f1503479e1089d8d90cf4d836c4d12816a1a39dd8c379add349bc3f2f543a81051140483f61e81c3f18ff1c4049aee78e1c245846ec12f1b8392028c6a2e9e3fb110d20f0877110b2f266609a3d92f0cd8b59b7385cbd2e70417ee062ee356ca191f8c68aa68243f5ab62c5f4b237033c32ff7813405c8a4bea82f73380818dbd169cd1060f4f6791b96a402bc4dc83f6a6fde353e3a5de2e626706ae897b7da6b1e3522ed2f4b0f340378c70bfcaf10a7fc805d696822a76f4d8d2206a9a8aacc132dbe770ac870b9a406bde566ac665c942978e5edc1efada06bc13781315eea26f9977f58327a188a9051c6eaa00ca4e2e8e04f26f8dc2b589c399a6160c1345816f35e33b4a0db6d33322c3ccbf82cfc5c1b28be7da5d834d1720343c1bfd8b8964552bae373ed2a6b1b6f02766f679a2c9f1007610014cd71810e8117255b0a07e5e5e87711ac4713e13c6adc899350c4b35d9a22925bd46cf2cfe04fab2fee013e3560380de3b879b01a2e03347c784eba5b1f0367aa5a51cce5dd0cce8f983a1c0876887d679ca207faac11fff8e8a64c81ea02f0aa7ef2cbba80f75eb37a3975963b757f7fcc805adaf7611d729f1a47df9e85df5b2a2a8eed2b69d657a4513472ca6b6a44a1b695c8183dd178dcd621b2066ecccdc1fb88fea080735bb2edb104b88547ad6de9cea81a2afd04f245d1d15c92955648c248501a9b837ab549030860417f7ddff056eb73e90b1263923fd6e5ebba43ccd9f839817967df5c8a6b2b7fcdc62e4a55978c0baedb046059e4754aa043fc975d57c09ce6dcf56b1cdd24a85272faa1d821853f71b0bfbff2967f4b2dae326e4d990c3417f6c990559d25ff5b3d0b2573f49212bcf33d06fabd8a971f90060b396e178c6b52f55667958b1307460827b78884022187a85ea89cd2981f88365b5e2e66e0fb55926d7eed93edb19afdef4217f64a33d1c9318cc05cfe774e0f09428ec4a9dd38ac42a48751cefea3c9a8935ab04fdf7b24b834261def53839cb68a89cc61ebaab7047cca9d5727a86d79d5944737b0642e8770974cf2128c663d0133d9555cb4aa141e39dfc10c70e2fdfae954a7dfb39af8b7419909255d2d81fc069071fc29d1763c7579e729d87fd148a9bf26262a9332b2994a024adbb8f43ca76389fcead15a14ae24f005cfec16d4da950c8c1647b8e2357ab59be554f1faad237e492d6a5de11618817da26a611f122fe6c5f1ef9a826d3905ed4c8841f9f5e3431b994d1f4b62d10877c973aa1daf9614bbd55cdd865e36e25b9241c08e3b6627846113d6fdf4bdef35eacd48ca2d76959baa1484adb988262b38013926c388102bf0551c35616970f9d5fb1fded91a25e12f1f12afcce8295aea0ae7608163798af65025a84ef6e1f3d60a5d354d55e0fe408023894f68c2e523be63725b5cf33ace0828cf89a69ee428d073bca15014e7faa83014f1bbf6797ecb80bf75ae012fbd35b48f3d7e213d85c3ef991933021baaa7984e7c92073fcb720911ee736479e79eb55046d3ce4433b669c34e38c14b232402222788c3df8aeb42ee8b487c3cfe6eaece11d9b3aa681b143f9f360c7dc0f157d613c2b85ec407f6d85b1b1876c298c0ee50ecb9e0351738e6eac1e0e5749ab6a5f7aeb7fcb150af97b0c6b4b6df19dd0d06dd44764dfc10361e6217c8808b9b34b62311c5a93b4e6e0f7ef3ae52d4ca2689ffe94a04a4405442059bce9df2d919299f1bf75e74d5d51b963b19a0133e6904cc3c0310fb50a7c0c04c0e9a7d259c12c12cbf84dc72e3e460d74db9a2a3890d551b9451c84ec4a5e565c17692124a0ad25b47bad7cbf8c07ff8df7b40b3226309b783f939baeb02767c829bfe6bbf285b66b235b301372d8896521774d45d5c28ca6593b6f81f9aa9d1c74014fad0877f8134751210ecd6348555bb033d30a2d07a438b111c0ff0ebed28e22b81ddb4b9fc36cc4048afb3c876f62bba6f26b882cb2410d5f8e2895c35b90bd12010f424c64e53d33af47fe5657a2cd25e80b08e8a26274fdec89a6388dfe80bb6c48522115b6f0c78245e18b42a7f175ebf6816c4c45593a9f40dcbd881a1c9f8a9a010a886a319b6fe97e01347f6ebcb64d41687a08c869b0043c6cf684d11e75161c15f1037351c3b51f135c02935815fe9f5e382f4623a3e34ee42946e774c0717d6299a4c3720f26adcac65105771621ff851b47a1f2b96aa6a0d664021e398de908b83d64719da4dc9c6d20304d00c9fe89433f6a787abd9c282212a1c087781c40f5a615fa8a31affac834cd19e98e7b26c115e6f48f397f74e12d1b214ae427f13fad70b62e051d3fe4f90fafa426bd12321e17fa97bb0fd211401db77ebd2ad2bc5586f9a84ca02af969b1bc4c7229db28e02bd5ca60aa60d3ce159dac61bbfdc6dbe3b58cad672bb64a2cf8e30093d2be04b0f84be835aec191fdfa517d671b2bfadd7a76d45b0dc99ac383a53e324d734bee7738f093a6714437cb70a4d93af9dfe71b930158216a5e19e237276f9ba536b3cf0e063fe7101edffa5827b38b5cb74abbbbecc823ad1df97f8b638eba5a75ae875dbcb1077530c06e56ebd5df1956fb93f3a605b8c1b69fe0cdc42fc6ecdc0794057b1c6e756ad5f6f105727b21074ab582e430ce12fbc1b234b4fe9b5ded642378fea32006d6ca4c539e07163c7fec74848db5d164d4f4f2619aad89f246bba33e7cee1bb434c3c4baa73448226e4711e0a13b433dbea7b474248fea8fb3290c6ae92e42dc4a365e6562381bb1d7e903336c8cfef2863d383fbc0b45f015b0b831636724d6390efe15617333cdce3efb5d0c6c99a9b77ac6f6bce8008e85ad4c5cad08aa9232c312f5df1dd55bea468587a58b0a5d966e37f85acfe260b26016e2c2ff7a5f0fe8c296e3c9f21a69067fbb828dda563ae945ff5bcdcff950b53b1890c37add5ef49c3d77c51f046e7d39f1ce013ff1f95b1ea5c66b37e7a244605ac3e8a5d33c521d7286f6bc22b7fe4076d080d8e66f8921772887233e6c883c5705862d2e4894efe63f475460a10f66a19fe50ff82a2b3c03084515bbcd25aaaf506c9fd90c4eaa150a24fabd3331220d83a48961cf9c4e9da384959935f79d84a2218348d70d394b522ec0787e934aaa472633c75c98909d91576a322198980c06f0ea370573ff2c2e2daa3f71f0f8d92eaf202fdda79d56bcfbd5dcfa435f8300c25a179338bc0d85f9ac297d283aa763892fcfb96167ad6d803eedca540bf05d9980531c135e553bf5c27442e228c761863ff43950e66526a1805e0671afa5ff0eb021fdf175fff6a92841d639b7c88bf7195a8f32b43871a648048ec0fda674cf79239865eda8dfed6d74968400a53a647b66e5b00276d088d2dc91b57f20fafc5103ced1a56ec7f6bc8e56f8bbe42bbd034ed6aeeba802e3f8ae05426758bee5b0e583e34e4df9632b636b79b467c84618ce7aa026dcd7d1ecfc54a87e6bd6aa5c3d5159693fed90a6b59ed65e0bb6d4c4a2ba045fb539652e7df48386bc2ed72e5c29f8d7e8e45233992fd18c473a39cc04fc8cffe0d70cbc0f75fdc7daa1d9f4ed0434732051968cc6123a5ac63e49a8984aface0787dfd84dd5f16573034c95a6b10b980bca6868c903a8a6d4119acfd7c844c1bc5b6c86977e2de33440e4b31f472d6b7b58bf4394a079a529587d25f2ad0fa1bf22e196d859bb45b81788b5789384dad71b23d2a226d75e0679e1cfe9efb1c4d66463a42796877cfc9e5e2435b072d3f4453e23fdc6555d2cba5ebdcf4ad536b85d30286f3df49cb5972abad4019159adc68abfc34c04e46a5bdc182e622a08694db464760c5a4505021abe6b0dfe15c8eb5fa686fe0cf8a337dd14274884cafaffa3127ca9311de8cc04d19b0ac632ea8ba34c4308436cca4e25efe3796279f3e9da9f697448a9d4a0bcf4673f3e3e6a3d630d9ddae3b6a1ff95a545db4a5fad2f17bcc6c21be224b3a038e346d6e6db12ff12ccd6d573bba31807b31ae12056ad5b77cfc5047df67a48392389665ba2c1e28f9889b8448f0fb14c30ff2050ebca2546af586aeb5d83a785eaa54874521c5ac3daa5e9b365d620801e6384b6d72122dc72ede0ca03001af82d5650011abb62769a1a55ee09aed875fd706f14946248fb4b82d12548107db353b6f39a9da939acce756e7fb9c1866c2c00de1a79a82ee91b46122bb8f30d584619cb21a247cbffc8b7b9b09d76eb13bfa48c8f28ea335c9ba9230a030ae4a69fd7ec47a5eef8c5f9851bb1142092f173239dde5c059a728590f1741dc9f8aad1590501ba5616b7bc188d257009190356f968bbceacf77ff23f014f8310c9f0a89279d02c8d225754ce535000f79ec86455757319f65c4202df8831dad4c8e2b94a67c8949e46b733991f783d9368282bcc00475da4a1be720974d972544c810c5d789cba5e7b0935ed4f849e47207cf9d54342247a7702b8176a164bd7816fdb2bfd52f4c0d6e528b4080150d0eca5707dc5c1c220cb5c0508edc220d14e84b5386ba229b422558d49c9ed3dbe128058ca59aebdee15c9f040f10ef8d77345dcd95e2c500fa995330b6c0b2769c086d03049ef3efb98999265395a2c0ae3cec18c13d270bd91c1feb53588812942c9ad73f6d89eae4e84160abea950ad7da47e2c82dc9fb06d194642c2abf3af0b30e5d04ce6e028d0f522d4639cae0bfbcb2649d2e64a6a55f7437ffbbc65f8fd5992e59a98e98f0a1a83929ef10aceaf6319332d7ab660dd00e2c4b898fd25600161ccc24aee43a10eeaa62ccf5b95790deab87b88b49ec872f48c8713cd4597f6e55a556ab242ed843f866b64ac5b100916f0e4711a02bbfa9c42e47e5b4e72e84c28e27fbc7858d1d8791d600f29936680f470fc4badefbaf72f2ece3be6b85835b1bfddd8aa5c5b7a87f6e9c64ee9e76e836ee16efeaba2491865c24e9e0c5de706a567a54b77400bfaec3b2c9d6f43fedd6a13f713fea1474a6c3504ed17bd5a5e3ad57d807eec47d6e6ed65d8fcb4b9c0e7588680128b865677c38d7a2b8918cb76462bdace3efa22217c64b05cbcc8d29039f438e2687e521078677e1a44678a441e31752d86f6da8a46f91766a40a03526cdafda3e69e636093d8b27eb3d12f235525e21f98483db3f9735d76f2d5bd5f1b4a945a367ee471def08f2f5a5af864772436a0d2f8da323824489dbdd65da331c9ecf8eededb058c563ba0b1d378109df5186fc7e100c0beb4651e4ef8de0f33cd0e49ddfd7efb84be4ebd71f778f26ad273af8b47aac27f103620eab42694fde562a68312b551afc2c592c7f7efaec178625c51eac8f8028687c953bb26151305c6ba22d44d7c6c9a48bd1542e0211ef0633ffc8bb0ac98b3aebef98fe710b4af01303b1feeaadbbd3d631f3e133c646a0853887ad851130dd2924e84da97191494b93fd711639c4aa54eb60341b67cc5f39fe6b8844dee0c70d38f729d4aa44553a161e9af54c667b9ed083a65451286dff52d9503e3f679b5fa38ce7adf78ef8558176a0fefffd45e8a402105d075b9d50e6aea8fc00dbd5db0d3e0259e0207b07ccddb1cb54c0c7d07699a9bb817246c86bc7e76b4726d2648b266508dfcce13b7390eba8b3b5629ee150b540fc032ac6fcf6a7e20d8a5dea3dbba99b8470ee035a991f2e2a7cabba89431308d6e64a7cf4e931572e937069659f88d36afa4c6e98cbbf12fa624b17cfa15118151bca7912380d4d66220f7b58fb3bb71f3af7861f553066a356bb4333d18e9795fb5a209155f38c7d0141364ec4f0de11a38d5efec4f592f6fca3388f5772527d9e216aa09d8f3be8591d0e4efd9fd19dba829032b524eeb123f27ac8695600658b034b0c5eb4a1166e952667e8441004f4e8de0d0cc9c4cb79fb5579b9d6f2ccb390baea7c9719a55beb0cb87b82314a0c0c8a0e66aae85fb39224ff7cbcb4e9f252de601c7e452713f736039b7d4e90d6495806ea3f2b129ceec6ae7ac401da17a839d8eb394c0c89e7b39b59ed2f5994659ce9baeaf3c1adbfa943afdd952223457ebcef5a3f9f6d19858fca3e61a78a755ebfaf2e57cdb369a8742161ecbc2fc4c5ec29b9537992e4221e14a7b02e9e63b3e5a6da6587a7ad2a4e6963190421a11879350a39c22c9cc12b9ecf7a3f9132b53e07cfdeda0db6e29ce5a82b347db645377183a02cb852feaffb44a57da36c02945bb803d057a094df43dc1b4ec751f4dc3be45bb3b5490822d790d4d67cb82e23e418327be8732cd2db2f2479651a9130620ed92b6166bb729b0e5a9bceee3d1c7a98e0dee9ce57911b1d1a1f3da7d1370414c19b1454c8da246835158fae4de3f882240345aec8174fd5268f5b73b60c602442e7fcee71531b0120368abe2ad3bb5ac2da6f7ed1a14c168ccffd580261d367327ad4834c524ec554ebc0c248be39d8c6503e4ed2e8df7b36d59ed1b6b9f5d8ac455696102a9f795ca0aa75a7e9c86aa2f3f5bbe0dea2b09b9f0afc708ca38152decba96419ddc4890f74faa7358824328d036453ac16ae420fd5f86c0ed8e3ca44b095f6d4429b534d7f877ca07657704d0c35a5a1fffaf12e12a51bf210dc557353fe27406c36a185d3da3f8013438345e1c28cfa98038883bcfb957e0dfb57ff33f8caf5d5b9edc596d26629559ab6f035ba941fa665337ea16422549c918221fe0b5602180c3ba3fc7c18c7e0afd471c0dca54bb3732ac40b90f9dcd5aaf665b0ca63292ddd7235b67892df8e965fca21a8e77c1ec7d444b6d2ce05bd2253b9cdaadef964813a03b99695bfb5a8aec508e98adc14d35f7daef7d9f384776565152c354deb30b98d0c0543ef080fe48495899862fc553a2d48e1b39fd2c699c7891100c01a89aef6ce38871e04330c8164d6ffa27efc4a4c7f282b5341a8f6f92c87eefcf0facfc9a6417f93df635b600c8c218e185522009e0cc730d30dfbf3f3f2c6b2b582a6298ee855d0d7df41b0005033788c81830fc09cb97e4115ef9713ad8b6b74787db30427237a48fe83126cc2b8d3d431a3deafd22d532e9bb9ec2e74dc0f4901c060cb215b57d07e0c7cacb6d3baede035322fe9c32f212b28f8318a87f73f603c293d516b475cd96eceb1739bb4081a2d2d82fa6a6d0f33231ef184316041e357efc6bd400b7228943b93d2eef303c5ef7d7295ca959f10ead921d6e257dad7160743bdc3db3c8fd87d26b2aca6ed0bd945f41caa574d9ed61f65a45f8ad7ec50f6d7160d05ebd4516d0c3da1eb4efc88f88c5ea244ed90c07239f3d217983d93295926d6d0c8597c1d6985dd9a0df8cae5b0af9718820bb0ab425d4f8b6b5d1a8831b69c695258d2e268deb2e8743a1cba722078458fb7d5b5f4fc0f6f07207a25ed4608b7f8b8ab7ace1eaee32460e5ae00d299f78389bde8f7a64b58116387e5f0e98522c6743e808d63d7d4d3596a74d2812780ec35a9715c8fc440826d37ab29a909cc0c244099453c4a08fe541099cbffb04ead771a5c7c7004c55c44990856f2e02e790e9e4a3db5cf4db2873ad82a97159c86aa8347f8dd8ae846a4cc42f1448046cfa0e121fb9a7787136502f5cc5a21b4ff3da68a5ccdcc54e49f825dabc00a10d437d39a2a9c2e5deb72ab8b188520671eaf1b0091ab7a271a1b6ad840d089f2f79d1ac7ef5e317775eee5b2153c550ccd0038050eb7b7a6789aa6bfe68eebaac4851d93a22af4a08a14e37ec02fb0f89edbf0c1d15905a6dbf691f0f0f9e96539aeccad6267f52574aa4df31ec79dc9fd6f27b5a26fbb1945528180cad006e68745288c3868d611257541af28f036cff9b911172e38272cd5fa693ab89a11e02d795a0c7b4bda53203027994d2dda4129b474a2d94f708cf27840443597d9cc6149ff11d6c747bcc070b4adb4ce4bce847ccb7d7f899d1c4cfcb6db49b87c029b468c71e2a32771364c16c2ec2e6d10d1dedfb3379310e32b3e16153609723855caad0cc183c99c9143953e16a22b832acc833a8cd3b64d448b76134e6cf42b4b68e920c8682387c64bbb4700d8b131c45fe76d6217f96dec776fe6b49d4a770513d8228fb9e8f51f3b8d086e1cce2c5a3af3f4acfd76d985147d7067a3637984f15ce82560c5e1304f25bb1720ebce8407cde58560943896b8a3ecc59cebeb77131b1d33ddc1000dec496650992b25e3b7b13b5ada9ffb8ce961cbe53f43496e95f9f49d860cae0eed145b2c5ed2163a6a06acc60affd3536cbc328eef9dd2c3d778ed42deab72462cd808f2f1cb4ee11376ab3e4e2b1d3b9e4cd061aaeec1c616573ca29d6561f8e675af33805c2d57d2e539fd646851f4fdd9cd00a863bebb6ddca04cae87b5078c7349d6467f8c96fd93f881e76b89d506b5be38cf916094dbd6ba046995de1e38e5874e9582a7920afb47a220e7925f83adb6c14b70ecb6c85e0f14ffc1e99b11e8e8d45a0c4cb7459a758f55a5359776277548a3cb8e806bc1df0238664b35fa2173827781cde2fa9ce98853a86e3bd0bc8f38a53eba0fafa306b97e5657eaa4e5d2325ff5f39a5308126e0a4bc23696bf2d6233d3fb7a2c83f3096f82613ea80c5e4c3a5d5983d0b637c62bba0e18002e8bbec09608403107d1dca13fd22cf5f936829de8d614d8cb04bf969d3a3b7d2a7bd9a572b2ce3e05f62220548e0d8e93cba0c187464ce909cb440f4051a3d58f16ab95185579f37d1e2f890cd730fcdb99f7bad003c4c71bcfdd430eb9dfcba3140c30d9663e6b94df246aa73c5acbff26b7b6c373a0cdf64ac1229f17815be0134509bb63e066f9259d3e7bbc524e95a16907c335d78f830c4b63e461efc80a0d1dd31b63ee14124c23373c4f41f61b1923f4526cfabc5f3598db634ebf07cb70061ea71417f8efa32a2ccbd67fe4943633ef76184e38f3ef594f5ee8e1c148b30637358e32b5031b80db19e982291b383ccf5658cf2398594c5b570a2a284f45b44f343b33c76dbfed74c8919bb7fb09b46c854e0acc2fdcfcd6bb2434ae40f91413fca079183e087ec16be83cf854fa55d3b63fff2ac24877bb1b3205b77c407bb70c04d55023e99eb036fa7dfab98735234fd73a2b4b0962f31fb89e207f5fc8b9d4dbcfa00a184331970379dcf55e0b184fa79f32a9cd7a3b39b2bb9f3a44d628d85abb7453ce94e22ccb14a379e2f0c5caa0deb484e8f8c455e97a1e16f84685300b88211aee0d6ac18965571fab8166e5bb1548d6b556a4808b86494102a8f547f58cc47140d0b8f39718e1a5f35842b2ef476e50eb08ea1202d6db96fdf41c080e6b4609d9acf3114d4532076db372b4ec2c52f2b96a797896b2c1745b13012c3ce9e3e02eceaeb11a714539419b3ece70ffc777b3f633e406d56fd8f3d1feae707a1c9a64d527ece1d57fe78593df50b33227f35a3e31d8ef6777617569113460d7271aece80c344be8f5c139f567568a657d8b5bd5910c3e2f34e4fe5930d5dbf5eabe23ccd4c841a3cddfd39a287796844c95bbf6675567b1cc84feb5d3d4e514206dd4d4c2f90e296a04a8a30744618228efc696c40f7857d97e2ca25b868f236d04bcd20020b654a69180f67d43b5c34b626653abf25cbd364dd465dcb7d9dc1bb61bc3bff4afb9ff31f12ec68fa3e08a3fc91741edf8ec23f0a425482034b47022b74cd8ee32bdc1a4fe0a7e9e626315e1e6adc2686c6394a5520d7e918f5fff74e77a69295aaa00020b8337983e8af0cb58aebe4b486642cd5021c5998b5019cc64d93467c0daad184a7eaad2419755ef37c39de6c0d5905b8629ce6f31a1a447578248787af03d74350f25fa8f859750a98f94317d5915e4ad1b92ff45c440e809a1a612d962e2da2778677040884342e1a655559b1726be4ad5a6afe98ffedec7e4ffe0415b6578e3d4c206f9f9da50fad9d78eb94af03ecda8cc521db0cb49f9e334001e8d6dc7434d5cf0866a27e3c169004c2c722d0dc8843e29091d4c529c59c8d434eef4e967a52d38e86de94f27e61f9162ebee11e95f1cff57f31f943ad04c8e14870bee05454650be86aa4284abfc5fae87fface3d519be8a28571282ffe9869e16bf7eaa3fb491ce1f8e9a1a861a76cebe54283ae43f59d654319cf563ffb04663131c39adf1ca127c89d175880423f686823a9dc8f853912ae6acbf1d7ea0bd0af675760c7435eb4c99961cfe4d27b1d61752fe2d33ef19800c9966752b255693e692315f140a80f830be5d9f824d9136115284775106ed6a0c62bc59ddea4c1662fded15c5eda869f65f0c35f6d139ba8a8048379ea6a3c083e78f68f616d23b913100d71d5348d0b80d516288c9471100af56ce2b5eb5b310d7d8814fb0b7518377b8144ff5a84b5432dcbf931563aaca31d0239c321823fb3c28dd41a89354ce47d4ba95cd30d63dc673ebd03973eb03a7d85ea88d61a6f983e7fb4869591d26e2e818a1f77eef3bf277fa0cf386978ca5195001441a1c0579d78a49b1d5b007b7e2a4c80cc4a9721d6084dc91f6d21f6a10e775c07ca4ecf9a911015cbb63335484f7ef654818804d7474a4c8be35a899bc8a59847590606a07c7eb743244fe10948ff842c8cafa524aac50466b9a5f40461e7fd71950eb673306e477ec34aa9e0259885b128bb7d9c702b410d7d524dad6da5744054c8ca10fe8575375ebe645689dae51cb5142d3511e21a7b43840d0b42333bf558fd51a2870c4860982be9f53355d8cf99ee8bc6836d4692195b0e21fab6280a87e6243097ecee16e9d578012f67f7a377464f4a5f2f222163388a817d0f2460000734e882ce7ebd4276b7e1a6a22fbcef87d65a9ace315ddfbd8e3a02984cc509312fbaf7f4e47a074569ace19f14010aea632670957b2e9d5ec789bef90e32914d3d43c67a4571da14108c2ea8b80af4d332c831ff000d02a98e4ace3dd037ae843a8647163a4e80cdef62e2536c822bc144e643e0f70dc0cbf0a0b54ccf5d188733eb05e87f92add63cf7ea85f91c4fde4e2dac2cbdf2f656cd01db7777ca4190e32ed439d66c18502decd0c8cc1533b263d0d747986c4e6a4aff70e2a84badf0295e5df47c1e35ca13d6bfbcfe7c794cee421e96b219af6b694ad0f24d6f76afc88c387903aca32ab3573eb56dc593f33ee9a910dcc3ed709f9dd830a2029376172e63c7138b757ab345f1de0accb21c2fb6a06b656ac2399c7f1f0d5fb3447c89128448c35e61c2032b4ea4a840119826f69ea73409ababcfdb04356f66429b29499ca72a1037f7a08fb2e6f265b4e743e68035ae0311b2ccb61aed3fe17dd22c77ab38c49b09184107fe012d43ab385a11171ed09929cf462649449b68a2a10587d2f6948df8dc509e05a2bbb1f16563a0b6ca9c8b40528f55d1ba56d68b3d39456c2ea71acbd06db6cfea521f51314aeb39444080a9b05aa99bff9fd5f2feeed3d3b9ed33936c2b1bb0c80cfe0a2bf6fecebfd4f2ba88d6bec458dd065b852d56bf61cdf98651790cf3063d310453ef7efd57c63ae348ae3686433387dff0595622d44ea5917e645d312a1175571338f4ef78c7292ca625d81de1166cddd0005f0332b4c8741c281a8bf6bfa08520622f74e19a7a43a26a8c3cb806009d362d5119c0182b427f52652ecf4c346195aceab37c5aed0e9fcbda7d4d3a7dd59dc3b10c7c5fcfc98ffda7b357327812d2e08492efb7c8dc6fd6f7b3c22193dde3ca229c1a902e9fac48c06bd4ae72560b3420dc772b0ad7fdae4d48f2b286eb5f85a1ec12c77a21eeade57dfff2326e46fe7de05f5b0004fdddba3adf099a3bf1a6ff5cf1fd56cacb8bf08c531237d5d769a1d9cf676dce2ca1309745aece9b341dbdb960b84c40f3f45d72408f2bdf5fb32403c5826da8af71d6319a0e6e9f282b578536cba20c2ed267df4d4ca2f49628df45b836b993232981e0bc4f121b80295847ce60cb0db41f67851d6ac64f683f7cf6f986c5fb22095183645fa87717325496930017920dbd74b0be6899036293809aa863466c271158211f3e26dd86a62bf4c159cb17bb5e303f50650b8776c5644988a896fd7fa1c43fb6f142f1451c6b20250612c0af43ed0c2570fea925070ba9699a6b7ed67e7b83bc1db2e213a5a63c96a2ab691cb38e90a435a8c6f638baa9af40c2913fe0c1d516569acc2c27a57e13116a7ab0cd86152c9d972d332eac62f8c4ef9cff1de4c3e13cf03c3b9dc38c0e71069450443b5e07b51e7cce9b96dfe22b652f29541f0ebde8911fb56c2e6baf361ede39994d3679afb266734128094734dd29a0c2f90df70f30bcf45d0fcf461d1d22548d7fe807a571e690cf3bb71cceccdcd358bb66e42da73b8213770cbda83a50d1e4869545254cb4819f63da570b4b13c8545a095ee5d57bd74a84f70ab847d8d9478f232928a7335d8c36f5985514b81099ab32dbd5536f16c6074e2dc92b44e35aff65c9321e89f0f8b04864d595467c73c8dc618d747d01b3a62ebca9e7a4dd86ffd218e1cdf85ecc2ce300fd5787ee569367c97565a375c414ef65431ccff63866be5e47db305810c7df61727bd1c6b5dc37048656320eb71e3ba587d82b78a34ae20063e4f6598bf60b1d85c27d0fabcfaf9cc414013891e6f02d5953c42ca1c8b66dee0be7ea6a5e882a1d6d35ec67b6e6c0fdecdd7d5a418212ed30ee1cc9116fad5297cf60b1405b8943872197347a890766bd75b258c0cdb6c836dc62ed8581077f389e4f236093a2e530db8baac66a22ce78c5bc67ad6a928a5e666f59135ba35fdd6d8f96176eb7ea8d5d7e73cee10471ad348f89f8e17d1f7770b166253b986d9238e4014d6a1b2d7edb9c13c0779d4e0a11d09df3d2fc3facf27cf867189dc9c261bcecc301a51ac355dd08815807a171fa5d8a045b2fd37137b2f43dc630a4f426b53e01a59b88e3020160ca2c27c799d63b4591a8607cffd25ded698064761babb99103f2d8143192c5d9630cc62beb875e793778092f3046cd9f1b4bc83307a53cbbc36f9f1118053cbd5c06dca39edfd2512e7bfa16397cccd20fe4fb38c6f6ad66513d42be20a40e54cc0deb93feab18795e45bb5878663e0750ed3a4f2d6da17cfd85dda8943464efda1fff03205c84e9cb2a74bb26c938e6bbeeeb20ed2609cf795d6eab980a28c723a14eb809c553d9e5c5594aff1e96074649c1b8f4ea42f0d2dba40d0f07d251621297038119ae5a52c80cf001a332ce5f647d6c2d020ff621dbd5effa4a4be4b4cbd540a0313bec9acee45307429b582a5f71f71a6888bc4429f85beab1f6c11f3ddcf1561ad641e2571d17c77ff28ea33772d89aa6e8cd12c530168ac103a98421c299733ff4842ac944574ce3c9df87506daeac089eb8df22f45ceb8c1230be580bfab7d2a746071f28aba102e93c2e46e30760430ebb3e9883f2d09511f99331cadde5f96d18ec33f3fe2af12d46a3e4819ef95634241dbb23f0ada97759544579e0497f0f4e3077f1dd54a14ebae8b9d825a729e4b853f52b8d58832b6a20e56f27b85fcf38d39563f4afedfa6a5e7c58c47c417438e080463a8ea8db995297613db3be953e31fb570f54f9d90fe16c60fe2d75957701ba1940e98658616d5545d4deb230aab310daba86965131f7a6dc0f7d6c6e6a12cf2481f3095b822e52b54e2cde5e017081358ad1085acd94e434025b710ccf25f9f4423e6e4fc682ffe1479840d37466ff4771ce035855cda8a3e2907323fe7760dc4c39e317cea433c7a32fb587dd860683b08a9b8a568e95498b2107699f5e2192144fe52ba305240a70b8e9d60b56421d7e7edd141734706ff670abb57e1683b1c12b387df907f1982db60a0f206c5892b3b50d1daaf77194caecda12ec5fb1f89fafa5f749bbdacca43de4f7b6d418a9af0ad856e662a2ebb3fa6dc83a41609b1f7d5e90b53d9e8a77a73cf80f2984e3af010aa2bee35794712fd3502c9c58a81f7aaafab04f3265776d6bc82e79d0ec7e7da13fc57c79dc9e4f6ea29c2149fd57c6b878bbc3adb2aad4526dea40e8e30c76fd836d2bb825dbae2fbbe6ce6fb25c71a5cdadc37c0e1852a1b66d7451f90ea84b352c40a78cdddf18b637bfccfce077209c739ce802f85392568f0066f632e2cefc0f468aa965bce09c33713c25c4e0d5381157e7cd13f764cfaafffe0b47ec94b87b9146290719bb5156c44b97623754f59583d8b7f7450ec1b1d7efe8b8f5a09decbdaf790922805d7b06a29aab14cfbde149a79ab9890cb337087f0ab3f40b8abc1f2a5d3bf1aec8a7009a91c7b21548000d50ae6d2955bb9262787eda1fe5e0ec769a0c9a3bfec552fa05d6af6a10223fe4bcee0832475e5a3a38c823cbea745b87cc39d7d49defd664749e920843b801cfc14f558baad762f8796da716674e7ba1a4662b3392afeb24ec6e3a7e85f829d64bbeb812b021df0f8bc00b41660d106920b5ed28f3fffd8b59aff15f6cf514394d457d7ce7fb965b515004c8dcab97e3138b9daea3919f09af0ba2eb15fd37a75b29589f9e7456cab542d91268b8395fa90b8a2f5832f1f55cc4e0fe7c0051376cc1cd09b18b6a4674dff495bbef5869d148a967835c4adc996924e744e19a6e80456f06e30d0fe86a081f481e2aee593cbb63082d1110eae5267a5c6e8f38c766591f5de6ac8f58cba9aeec0be824b61a025bbb8efd53d1e59f6ea24a9408bebdef3b3cd5ebade03afc2dab103172f92ca4c9dae8759c44d67e0b442c00e4b81a50abba7d808c0b2f2bb9465318da6cd9bd89e3948d785b718d1d225c8ccac620bc9b31aa838e9272b4427ee3753dbacb7af3cc175063bb9e063a0570ece3c50fcb9468b7e9080feb5c64507ef91a79eaf0d70a0298f9e51d3a92b2591e3e6bf81b4c46aa5a64d620f1a8817d29d8d0deb2b373d094438a829d124aa966d8ce495dd5eb530989bbf7f4a2410928fd2687a5975bf9384c646d2480c92973f0e76cf8c126265371ea0612548cbb21a2772366d43aff01a3a0fafbc609889431eea924b2fbd0b03c1990f7480bf48e55bc897cff83e2748c8628fde910b643b211c951a01b4e1953853a7602f01ffc05b58f8a8a63f9b1a1573a23342154807940ad3080c77f95cc3fc714cf427845cb2a02a801be971f980f9e20ff0333c3b326c510615c2a266e3e4479f6bb9476b29e9aa9ba2d18e5a47f2c1dc8ca2a2f68d57410f5514c37499f7e8aa634567f1f5f43a1c858ad964d4ab4b3fda048e98c2bb369a3d7c9bbe6d202450bbe4e6874d01ad3ade3070cc6435223911150230529e67998d86740ce1cb35530e4510f08b968b78e319c904e2f92187af2ae70900ea7faff3830180a72ff9a55636e6ea0d6be5ea7c8a9dd2cb4e67afd3c4baf5a9506f75173e5c8c556b77db2eba2b25f98db84d64302ad1952936eeaf8296379e14cbcfe0546ba40750e3d14d29d884d1fcb0538d90fad43f53c20694e19239e24f5adcaa000718d0819be166a2827a04eafcf04fe4a2f8eea90335fce30090601f5dac609543ce74671cc0b735cfb447002a2edb9b182cce199a57a186f6bd2e5ea5216b93e13e88543de802becd8c0e7b0de5d08dff22903024da169dc45c8c44d4cafbdf8ac7e3d77010dcd94d4a95f1be9322c8b99449a9c737d2812f8a6fde48043d51058633a0db37a64ec4936ab4e80fbe7168b3d44bb92e50d964a9d98d254a72657755790ac9ea9fadf5a221d5c1bb6241bbe6e9682d9f74910f8f3a37d5f597ce1c8ffe000ee97434a452be176171dc2017530ba5314a9cfc90f3fdb1dfe8c9df810b3ea1303892a9df04337d1728d0257f85b921c9e1b2d954a090e0e74cd016114f9e7b829a6d311cfac64aeb2a3cdd10a96bd99ac77faf29ed4fbcf93e81fad1a37bc72f474c650eda34c4150dd67bf9d9c028b65748f99dff24595483a5a5aa8f09886b51cc538aa8666669923b1b39ddacbd201c11b6368b1e15f60bd0620efa37608a8b797f7b275cbc8ae396128243944fbf9dbe14e00817ce4e374ae3c04ce65b31636d53531326108d77012400faf0b6fe6f311e923692d9ac42af0792ec8d7b47303d875e5a95fc355a0a68dd690f29ba2aed1193635f6ac0e428f8b6494402ebf4440ff409bf80fdf4049d292178e18473701e97638ad7e6958a951a457a0df68a1f0e9869c77cc535ea6e8b35fb4b94a30dba24bd9d316f5dbfeaf50832a1573f6965959bc18149d6f7971f567f035d491c7a51825d5b4b5cb72dba919720e6a46f81aacb8e352e5fc15844a173609e89a21a462676c6501fc66c2f57b041e8a1c83917f4a0e7cb890e64b754c4f6731544349c1217343a4c8a7495082ce57e5137a1c80d3d479eb26af5f4ab7452ea86ae729abd2b6ea4c7f22c1aba4d71d47cfeec2344aaedfee4acabe07011807029a52aeb083436e4f4cb73320ef0d5196f6f1b2edea8fd93cae03e54997934e38e3a9b036b24d491f803799a0fece0cee98abced988ca5b32e179a3c8628269b18dbe3b2914d9710c4666cf28e11cf1bd296428875f25085e98ef1fadd60db3e3dba3cda144824723fbc449103634c5314954b5be07d0f2a3c3c45700c712bdf0142f317c2201e3a3670743f9bfe23f91f5b9e5fbebf6302a8faf6107eaa1820ee82584557da209796ab692d37f2bf7d2cbe192c25b2a2c2ff09648ba21bb3414ccc82a3b49f1fa945f51806b24ee50aa3e562ccdd374778f3ca98beb0459bb56f9359bff810aee72909db26fc5a25715ef07bced17999a010c78226b3f8bbbcade996bfa7f9eb312113b84cb1d65cd0a52746d2c7a89b109783202eb2aa14bc7a1a46fa225b69d1992838338c6f5e04f0858d026bc7968a632d3fb345f39c2dd850931554c7f8950246134b9cd5f4f252a0d22d958a072c61d25d5ec54cb9242259f444c5e7e536f0c32fb716993fa7965fc98e080970a36a08dc4eff9f85b77ebd468cdcd2e72d0a2a021b0a853bcc033ffc48bb68dff288828e46310dec368b860853a6c19069db8634486a6f545744ce83cbb1dd2c5f2d2868aae84f1ad0fda0aafe11f0fc0acddabe4877896d26c90d6542692b80c65395f69132f0d2fbf7cd0ead779a468ae9f617eaad77ba25db0acd78dfa2b2005aa8e8273a364cf2559e955efb36db928aef178a4d4184007d5ca5c7ec611016a355ba79fbefab86a123fb97f53b0eb32cfed5ace50a458f83644cf40b91066118343e605a1f6d3d5ba3efe9e0f2250d5bb5257ea2fe097a7a6a3f80fcc6702956e79474e6bb0ad3564e621fbc320437499ae7c72a2384664a44a01a06106bbf02d9c1953ca2d2d61c95a1d6038b11635bddde4ebbfc5b419e3058ba0b4b6d9b165becb17b64d4cc450b31ab20964d26ee2352085758f5851d41f754d8a8f8a920ea88631b61f69d60146a2167a704129a58fd8fa6165c162820611a8906d1688ace79bf9e66512cfd1484c096ce50388e602ed43cfdf67cc63847f3aa17dc2db6b7002ca597de8170c5c45011b43fcd529de06cc294bcbd8d73220c46770c5a9701628669d049a83e764855c5d32d40e646989d7dd84266a7d280eebaaa8d47e470634732d59e9ab4297eb3f07578b53f3531adc1391cb47dbfa75fdc02cdeffacc061deefbb7f0ec9f0f96e8ce0496e5abf5d87798a699e416175058bd093e97c1773b5616aaf56ff4ca5c26fb191c5ae41d4c2f361fa60b6d554b3480aec44c9fe477d2f0f7a45d7c6629344e67c93f33afe231625e5aebd5f81541f8218052a577b9ad7f6b72ac5723585e6de4ea90a2872b2b951df89ac3939cd87c91200b254d574c7f7cb55af7c9b278b4d6b201b12f0b94ae06cbb91007bef89368b73b0ae8020f955017c5d46c291f0fe20fbdba1739ea4a331803c6fe7bff876f89949cd5da5e2e7eee764b3c7c45e8ae0292753ca8ab2ae631fd8438d349a55beb9bdbe841e6bbe7882bcdb488470b0e55e6eecb0124ad3fd3db5a1376f11969b8287456d3d44f9a32dd474999da8f7ba73dabaeddbd2c810cc63006d8e5dd154a09b9e814997ea719de328d4e4eb6ce1ddf1bddd6e9d7caaf9b386955a8fe142d010e86532ecb06b3685c0a69161f2627a8333068c7ce52140eef125b42b36bf27f74070d192ba8936aa587b9d398446c2261fb965280bd6bc56309184181df312ff673c4335afb8a9e2140a1ed9832d14cb05c8f77ea84f3f39d7516707ff801d070e9f425c138f06929f5fe233ae282b20085c6cfd5458c7d08c24966692af215c5693f9e09861c01d9b0f860dd4b582c44b4a73459741a15e0edb6e3022391b79d5caccf5386b635829f3208a8158ad8b7a396f6890d0f4cbfbc61a2624c9ff55d45ebde3ac5c5f07c9c043d015c249bf7b40262997f5c4e93716f5e381b544e4cbf76729fb813e98333642f7d68cb6caee49668824a79f2d59cf73363b3aab88b4a6640547e9beb87cd78b6e056197ff9871fa1dc4f38d05f21a6d156011761b90a605015d45a834c13ff17854037e72af3a17de04b9deefc24c59768c8a615231b6f3a0ab37d69ad4a1ab4318803c69a3423f70ef6ebae7c80be230a4c64ff3b7ee3c29c6eec97370868294bd185ce576781e8dbbb69138f9569e4c638650301017285f5bcc2f606b39222130456ce7c2608366f51cbbdbe36eaf3448d841d4021c2bccf17c6de26fd1a2a36fe4477d9a966e34cf035b9f1490b7cd0161cfe4a8afc343dc85119e5ced8c7757206d6bfbe673677dd099007e05d031b53fac544405a942f3654502ae43cf1f882d7c562841daa43f96e062437eee30a993c488ec4d501ebe4ae0019344c8d9ba892d2e1ae84c07e24e9ac59852a15be0425881742c7c83caa026da718596830ed3ede80bea26565d24a529fc04ba53f5efbe355b36451ba4d33d7f7b12e1a09081838682308bf2c5a3b7597870268591f34fac0691ca37901fea0afd16dadd5cd3e2c28d2f265a4fc4f027289278184b6806e95fced0d23912b9ac4f76225b96bfff99aa953e779976043dbed61175db8b9373b21343de3b2e8a367e32197cc198a8cdba743210cc7a42971a10966a4c990088df071f1ecc96db33ec361bbe98c825ca3575b0f23a8294ea969cc8dbddab551a450655e488341c0541d8d189ad94b5fd67d052554cad0ffbc8d020a4110209bcde2e4bd6d461af522794470be1d268ff633f1317459eeeac3717e72bf9da0f4823dd4e2981c46655c5ae2dd213e329c5b151e9fa973396db3cf3d79e5362c6e63190c1fa936d95b98d81411aab1da95ecebae94d4c113fd49dc269eaf9033b6ec49c13201c14f7dcec4b29a146056800d4eff9a0c979f6a381de3f69f6aed2c5828cb82ded7db52ecd793b753205feec5a48f61b714c151aefb9944e254c43045a6c5e9c002c79f2bd42a17b713f23b5de9aa788f87e4266dc70ed569ef6e68ac621acfb6814f7869887fe266258bc82b29a70d391922a2713c31071bf73da0b4843b8e1f5a7f3f3f2aeaece946a819f00e34e0e2d98fc32580e79cc237647de6126a451102b1e8c5cc9bb2a3c01f0afdffa4005c60aa869011d4dd3eaa4a7bab937bfa7f82518150ae35fa9ce24bf829c197b792fc0994c7ebfb42a2f9a93f4c1892e4c25f662dd8248d67aa4ed9afa83ce9326dee5a20cfd879fafe9573b472817e74efc65e8ecf5f47199c9737332f634a05b3354cfe9340d12460f9846404e1b406c911db86c6678fb04a3317a645b1993e5e61bdcc0f822ee280bae9d84db191051a9958f22740cf0894555de3cf136f4cc8eb1c4ac44a8a9822f95a15fd6f80bd37252677a2cd10d703f9294855d169242679002d3b322656b4ab8eed68e404d79614fd4d91816c20a390341703e68e3ee241e41739418373d0fb9ba1fa6a0366b0cc0a2e9dbdd983188f8c0400dfb354b81f71817cd185526793fd68683952ca58763933e76fb7f1020df7f59bb35b63d41d14cd157a24fa3e4248b5cde6286519e8f437a086e9191b1faf82367faa4486978ba6f1810dbe877e5e33977dba474dbe70a3c4d4671b3f63e67e1db5281b1e0d0e0f1a66b1397e0e7726c4c374840a5fbbcbb98b6952fc211261def9275eec50e83729df2399756d6162cc2ca4e317ac25801e89b8f18974e47a6c3fdfc263181a689aab050f6866485b67fc445f037294f95592bd75c454a17df0a95a7e2a30c89a9bb2091102afe444d98002f36c65be3c1f79fb6665ae408e8744ebd1c399d0c95b20b21f06f7ec00f86554ccae8171ffc4698173464551795a77ce540ffe692450ed264e99b64dcd4deb6665c99d906b8b193ff1a1960d1fe4417660de1fe4f7df440b46effcc4ce95944bf2ab5ee1aa227d2cf5f513c191f6d5baa23d5f4f759e9404a0c1c86236e72d1bea4aed258bc652bd97ed8a226ad6e50a61918b3201406099938ba0d528d9bc6ceb36df2f7ba6c6538678a52db407d9272e32b13eadb152dd4b8ea3d384de54f57c33354c9123be7b8bc8a770ac5338c912c1539124449f627d105aba2102a4b0b67b0c65221cf18203e77f49027d0ab585b8bfc6861227b7752342921b1c00ee5a8742c089f201a190b19e41d3740e919a808e1ce8d3591a7beb851d81b8a55d61586651d229c1650f5ae4d2dca791c1b6d3cb2ba7059fcdb19dee84e1ad21ed8f12bf460be2dfd201a9fec9f0ba555af60262073d1910898bd32ff43a6cc844a9eceaf111208c959f48b03fd34393fa60ebcd35c973c9bc61a27bfc3f6e5c4b0ba005d1605542ce249fff70a03097330181444e4ce33c130517551acba9a693226e84a5edba5da80a3fb530bbc8bb5999739dee9c696a9ee1c7198f4cbe9635e007afe30cb77c1315a02429e226e3727146074c38e043c4770481cf4d7d321421ced4bc44cf359fab27bb5add0ebc1f02d1de646c157aa0a2fa053f76c5c17dc0e21271d3eca7ea2b4db478f23c052e106a734f93c5b3ca78de70c8c549dd411b5944ed9f18854b4b1ea1acf5a8780163a1fcf2104cb4c483fd17e8f759880a84c0e4cf5f8304d6e4389237afa8c93650b27e0319ac90f660782f92afab62e21fff102bb8d316eee157c2096ae69ea4cb3234c3894fbde89aa3e8cb18819f09ab8961d91d33901b0fb8234142d6692f6f38298534088a4c66b78760ac4cef690e6e67ef1c25bc8d010a0d12ce5af512bd2cf723bc65aa592ed8af781767b89cf66584553ea49cc8bca17efbf881a2bb424add154ff538c41bdccd485e625ea495c8ee50f6aca3a1bb285905295dcfe15b564a649af905cef87ab0c018facbcc04002f9903c8ee91de5e455eb4c35c1f0b1ebd65ed2c79fa3aa1a3ad8fdde6949c21bcb0489310021dd0ff77bd3a54e4e3b445547cec1ffd8883c38629b4493d0317093e90d51903c814885d75af75d2afd96792430eea0b65b981305cb684cbe871f4c5b5de566a86d2982b6ba8643116f50e988b228a460de65d9ac360736fb2b5052f51014142b0a9e2b01c36bd85182942ebf39943787b4b19f33effde6a4b78f137e41967cc89d4ee64e44d67ab1dc84dd30e65c51866217d0819ede95aeffafaa14642a6a8ce8ed24a8abdda1f5972db5f0bb9649440d298dc68e37ea3175044e099297e0bd5d62a9baa8be949ac6005a39105ab710042110c78a487ce7c87a2eb18341b7477b540fca6b0fbae9550aef51b759b9993e3485535e15799c473c5d71b1f42f1f460b9bf9467ac4548d5119444d721349a122314eb746e50915a4463efe65931bd505cd5883f0d6e4a2b4142da4c61326649baa5f679e8a4da39ead03b18235e148dc5bd8c6cd35c616bf1ae2f6f781baab1f4cf5f618ab074b774f2246830822a196c16053ff2a97bf9a1a04b83544abaa2c40cc2b3c3c578c00cad70dddd0ee284160987387de74bdc1198d083e814ceae60b4815bcc8677bc2999818615a872ebe40100a6c6bc0224c0195ff0b464e2c7ab6e828b977f5019e3cc6afd85cf39ad89fd1f73186d57d7b111e7ecba67edeec09dda5a22a968c9315df130d07863488ad7d72812f23a3d10a2d4295f6ed0e18a861c01bd621bf7090617e92b18e32cb91cf35741ecb3084b7f7b7bbaa2e358be52f2c197e1d6a8acc294c7c43f059178c1ac2ff2fc5ae567651350d4a87284f7fa8c7c31b9c2e8ce78d70205ffd1a869fc63c228d3d7973ff6f757abe1aa7c71f3e8624b8ed2c1dcd0643535eb15e73dea20e3ff98c8ddabdbe2fb6c346c4211849b993f27973dc6143a86228bfb2b5e2b94db481bc8d63c61b0e6115ec4b75799bb3fcc8bf5ffd7e0287a8221eb5388d05ec69fabee5ad152347ff9ddfc352838e139f791970773d32170afce03ba5d306042bc4e3a9c9d447ca0d93672bec897ff1b59f3a91e7f769f0cf7d1afe5af0266c48f486997722470555f59015a6e9b74a32019aab26bebe79b86bdf6511c434889ecf5d8766fd798f9ae4f10acdf4810add185d82345b0c469eed4f967a5ae662859127b22b1f0bad7a32196b6f941d3a2aba1641af14e074d19dae0e024b5defdf98064de910758b275eb0d39349a9aef6b2b9b9e9ca3d5a32d771872fa0c6ae9e43f39bbdfd36b9c017ac31be13f98bf47da6c367b92bfc8532bce28d6d9762fd56919c033378d0827ae8685a4ca04e3482fc1a3a023e103e2924cc4d5b897e23afcbb8f09ba3586ac455f95f4ffdb99e5fc93ce6aec0d673aa6b08de5926a19eb9a7de0ac787881bbe67a10690020006133c407293fb99695e491d26bd254354c22e08e3fdb9fc7b828f446c7538fd631d5ddc9992dde920bec9457ff2021bf0ca34f87beb6235f42e3606a48b183ea3c30dfa04ee94b11969a308ae876011826eac0a64ed655e84e136da48efd9d4682ca0e41dba225b19272b3212b98fd0dd1ee708aaf2c16202252fba04d8d9a960103d431b8e037b6d9b97aad6735fa563600edf95e8cee2bf5d0977538db048c7dc011582cbf4d7f94df5b39928f015538f76acd23db98b72cc785554523aea0af7eba88041df512957cea80ee6df20da80aaf0f76e850b4d5ad7fb780a37a34f695c8dbb96f1c03dc28cf6c07e803412d3c0cfea9e94e300334e8b6d94ecf53e0811238c0417399ba16309ab6fbfd99f7574c552911c05ab03dd301a47daed3fbe186bad39a262d8edbde91902aa96169eb53c5f7506cf14671e5faff0e6f3c33932c9374486e727858fe5c6a8bcfe80ac5672bf1f62cbb50ea9025a48cdeab0abfac0dc1edb00cd8fd3e92d19e042ab6e319561c26b1d5fae34b408a926941e4defc5655b193e1538db3e88e657d0e15f5475e51b039e85c7ef373b7a9fde3d9aa569fb5965986049fce25a73449b953ab590071979a479fbb6b8f44f03c8a9831e09efbf4138dad207e5ae666ac169682300115e04907e090e27deb33992f89cb3b4e460f7a7c577bbdf749f2707ba42a8263496a56922cb7fa2d97a72753a98db9165de7083ddca4af6f640d1fa6430e47ff43c22f87206247343bb0d365a77d459edd065ea228127ce1a1c56fb535e109fb0ef6562d04459b2ee1b7f8ac7666eb3a44ceb809419d0cbe0c4ecfc274d4a3ee511e1060b3b7948c86bd3c9effb965d5429c099df9b14c9e0db340c7090ec8ee9356c8bc8a6b8b4315b9092051d69b9ec86715edfe305eda73d1188b8426bdaa49329fcbf916eb555e23aea4b57b9c674ecc0f3107ed0b7c41008a61d0b77af5f5dffaa015ddd623e33ea020c0f8f5007d2bbf76fbd94ef312bc27b77d69b730e4eb5e4686e2e4950ce7a2084ca361d93c54e0b0b4ca9eed6710f1a2dde8875b877c226d2d10b828e7e346aa2bac738d1dda914d2a8089eb012a9ce5359ff74561150f7202ccbf5016fcecfa22f1489631524a022332806f7e9daf39fb8b774d4d3490107c2a4d27546d7cb2270b5804d01c3371aa06a3d180f0b4df11d23a6545157c1eeaf1edf0a02f492d45a639aa6e9d50246b966a2eec5053d40bde13b502a2f86f384ebd94d6e4f69cbba9ad9a126e2a0eb6dd84d129e9c5d2cd5763d05420e283170e73be17bfc612941e1b6b035f20d1ccf010faf6188ef76068b908fe5519c772bcefbbabcb7ef717e9b906a62a0b00ec56bbc0fcd72f08f3ae1a257dc847874f420a42d73bea2fac9900baa9dc87b3cfd979fd4b20d754b0ec92b8afd19523a2c46f913febd7489a156ad0c69c26a0472db960cb7f73a020e202294f29ea25eb51de2d4f72a2b9b7f9d1a379492e98c9514e77b5b516f32b974f67ea7a33398467e33e125db8994913ec7721c66a31407fdd147f6275f768bb61f1288e6318610f2a471ad84e88230363edf13c7173da724656392e8c08cc53729080ced477ff76cac77e2985d38272e06caa793d60d14d5df6d051699deb8a51b7749b20e0115e648a87c8b44c8b26d7b8f6447097b0566ce388a30087c1050fb2475d8540813a0c48e902419e37524d0744b92d3f14081fac0a101232565bf45b70cd90474fb38bdd6f48c9a46c523524a921c4eaba009e37655e3529a15a16dec68fa4baf62c51b6c670d39fd0a769f4714311d0e63ec0c5701fef71e22c29614b3ce509ed448bfdde4648cd32b98747e93e2cc668f83aece521ac4306960bd04241c2d5e9bfec9b8160603733f3d015e7fa2527ee300bf6de1c2c9fb0dcc3611941dddf5c8129b27f09801169b0fb1913bb7748e7193f87550479d6b7ead333d8da7e98feb892cf92b2ef450335bd4d2eb203c698b59d862ce9096c030eccc5885835f37fe2aef3184c3b75b2b9b3803b526bb32e0194d812eede8bfabb3abb84c66d8c6450b08e5009647749da4891b4e93e5dfc5de09b546cb2a80b9cf2701d1ca4d15bc5da86caeca4ab339b179d9e497db1450d4681d32da23c5f3b8c24a9f86ac1d0df434821823c47d8483ec86b011ef6a219cfa14b74c2064f029d636d8392494fe106979e179eb7ce234a4dba1551cf447309efa5ad1f70556ae08bc777714d586578effae2cca477e2ce61b227f5e2d5241e493d0b2379b621219faf8d8be44ea9ef4ebb8b6aad01440d0e9b10c964066e849128334566f5dc39724a8ca22d8d057cf32c7916ad2870c5eda332f87fb65e781b419a10fbe49630d47f1b115545309a5390e505702728ed43936c42d1885df2f9bb06cc834dd68d42175d9289a91879678ae06032bd324500e51f0ee40b14d1cf6d940a5e0850d3b202685963b8035e7ada7bcb80dc524d19188c2160f8539edf53d2fb0567ebd56a1a2f7c43bf7de5532a50b9afb27f43ea631ce59367a3274ab8607452f9811f4400dfde9cb049b2a475c8943f4f83e0600470b4f6ce177e2198c8fe569d2bae0a1d0b139211d741024b26f3a9916ea3c0b704b36c93275a4991dec8d91a541ce27b332183a4a1fc372dd34bd6b876cd1524e6600b7db242a799f20837376307110d7454e187b6c56d62a574e51e9e5f95e81e3ef5fa42c3518bb3c42ce4d49fb3395c42c2d44bd829594566f4a12e1ff8c4ecc5c34aad3f1a8721ce44d3b696863434948aa4770927401e4b213842605e41a3412a73158d51586e5311d5314de755737d4a0cafad4b8eba58b552ba767e4462ab4a3bf2b82d952bcbfb28cd2746b65ed92935abb4fafe2e7603317dadf8fdeeb6b46c97ee02e5727b8bb31ee910f86f45add2098d6a6b0fb2bcbc12e7b8ea5ea851984dacaad187134e5163013745e85e19947b2ef56735d9b461ed5ddda0e538b1239bde55ba16bed3b1dc453e4148d89fcc32288fbebd90b7e1daf6ad6ece865da477c8658075866b5c4994e8d0632391d8c6ebe0efe37f9c439582cb28824c2aad6548342eff83fda4d7d90cca7955c66b2ae0274ef207c2fa6fb7504ef4871a5df25bf001b8adf8dfb19dc19ab03fe960cbe6f44b93418ba8ab173c075405cc6d28000f22f6c7cfe4063e7aca85914e30ef32ceb78709e5134030fd91b142c2ad6f00ff43bd5e9804b6571aaa1d57777a059f080d47c6f38ecc9dd1d1f1a593107c91d9f24eb60f5e22b58ba06e7f032e4d66ba8fa89ae8eb539d0ed255409b0d4b0a3681f1dc4f5a519211c8efc83186a8847c9913756ab7d0614fc275320f1e8312e204e6043a11af78587f65a0cf95d55d7aa10bf3b7bec83fe9ffadfd9f6c0b7d5e01213bf5a01142a4c25bbbd865480da06eeb7b7db6943d40b60b731d5857561556d87a5288ed6b575d80b60db8c0ef59c1cb3bb43c62a35da0ca1a5fdacc0f1d5b2bd59ddeb3f4411df1e5d813f78a5ca0eabafee8c7e1a0b55b8067c9f784d9910b8d22d8224b90d89c0ce85dd2c5c2882c213d7b8a3d07c512e572c8a9696e0e8e028ea45572196fd2c8185f6ba4077e87051020fc197b21b69774d6be94b6e50d7ea64ee11515ab61fc64e6f7ec85446e7c1033fdacedf6a627e3b29d8136f45ee98f445df5680b953daffd7b625d98afafcf8b590b67aed14f8b7c50af82c8387790ea496c9ebf781ce6e31347b5b9970ee98ea734ba7f8a1a1638fee252cb38afcfd435c9acba935b15dbbac6f3197f09d4a470d9b36f3a6f27cb73d4b7e840cdc0593074bb3a07881806e614767145003acf8c8336d6827b082a23bd71fcb7665e4b4351b5b86fe7167d89fd4a93480c8807658796dcb6edf799939dad35645b23e73ace82096c83a1f92c985c85ee9c963a9b4f9e6659d3f426e00ac8bd31d9cb649f6da7a5553b2f4303697ad1bd765759aa0ed9111a7c92963d4b6f09dc60c2d71b830725b8f6c932e917a0e122372a2cb1673408a6c97ffad46e71fe989cb3f377bb9906ae8c2528a6a0bdc4bd4c2394a9b165a79b9796d769dda30a4e5e214ecac12b45912b38e367e571f2816b5be12554f19934c0e95b94532d854d8100f26d26cc4686b82e5663efda19d69c39f1f8df964f52139ddf90f5a3f1acec7f5419b1d7b5bed7601a37fa6b26d98058c60cb6da1aa740d496e08fe23222321dafc8b2ef5dafe7207a57cd2fe009eff2518b1d9dde687a687139d184723b03f6120c1ffc62fc96f243971418a10003242b9ff8c297ef036d5f014622d5e377d18eeab535043d37fda41d58656c370bcc862527040d79650060b8c54388bd4cd1b2928f1c5aef361671ed44aa181b5fc7810481d8eecdb4947c02b126fa260e3f0c65446427135c9b7d5902ed16093a750188a454d594180b968e26c21fef0984fc1be54ca52bc141d99d9f181d0691e0970dd5a28c2882085db6cf7bfd0c5573c978f938214eb2d52c499bdf9a89b8409247df73b903ce48de5f7cd945ab6be939d157cb7ca6939054940b5dfdb32248ad51323d1742e31dce87abd534bada2b7bdf2bc45927c1696eb8fa6594199014f5cd7b07b3f67c1025435518bc2aec0836925d0e94ec05ef11cd5cac5360b2ec6718f9ef989848f0578cbb0070daffd728b0ef14a95bda563173c988a81d03993b69823404ac4d56dceabf1bbbee8147675140897dc12213ce461b48218ea733f121a1322f58d0223acd6a90de5c67c10f329b5685d17d3a89c22b94f778df6d4c9ef6bac8232d22d3315e234d852ef2e610ab99fd4393da933e9cf51073b5f3d590b5ceb456b12096965598290a50749826a65e2f965cbccd3b7151e0e14489b8836cb3a5d389f42d5fb53e3677d14ac4b8bfa239f5d0412781adc3d0372c7a04aaa04b66bdc26315138da0306b387ae992e575d098e65bfb1e9939f9bd1599eb9f1f82a9e0152ae8d39c9231db5f1eec0e7eaa6816c96b0ad41fcc5c4ae38fd91fbc986fd9fbd489690e7a2bc5cb7ccb02da6bd99e6c4bc5dc5f40eb863eaae35ff9d9213c59e7885dcf6cfd59aabcede31ee3a5118747747387ba8eb269ee0e25b265a9bbc221a064b585ffcae30cf99f47f3db6b84d1aeec9c44692ceeacca71a272e55caf2be6a35f145ce28b5b3f9d210067fed63657d6a16d9f30aecc3cc5f3a9c87f5e30fa950b8b2f3a16e5103c90cdac1098c9774c89ac73d126a10aa36fca4943fcfcaa7de8733e414f4387818c27720107babbab2cfcb6c86ffb69dec906d96f52077a5f1973846206e50ffa7b1f6df1c5b5e05a8ce7db5e86171f8378a8e9bf06f240f0484720e268f08fb19a35b48481192d56cea354cdade638f090769410659706b660bd7d2831544d06f7ab99ba58e8301544426f345571c39366d420fde95da95210d4e1201ac76b987594253b8a026ca7983ec54ffa7fe1e28aceda66bae4c239cfebad4228802d6a445a1aae432b2c2cfc203d59ba2ac771a31c00c09e84f66b8b1ba4fef1780bbde37b9145d36b9e20c013a9ea0b1663cd9e27cf2298b0c97b024a5f456c500261e3fb73d72c5747cf0bfab168d0b86af86a554cff342966fa5246f810fd0301505b26cead8f37024e8c6fe6affabba731d4211afe60b6831f2ceeee91baf42a0a657358a98cf640f4eabdb3cdb38e37e45e6420086fc74e6a0b7c049c807f4050248f3cd11e2905119288950c2d5817c1677c8af8b015e1f351841f938bff6b7f702c8c391555b99dfacf06b19ef48c67f9d07a701f6a83ea0fcda3205235b0cfe8202f735266cfd4753bf0868b2fb77d29b640731fbe17f366b43f56cfa7cdf8c87d47dbdc9ff7154b9ab83618bfc80970b8f49a946c85688965923f7da71649d15176882ce3be480a20ed60960549c04fe806646b5eb7dfb9cf95742f7bdcf91bddaac1dcf48291fb62ecb5a15303dd6ce6c21ea72bef3f683df0df65ff43e0b5da22b0555fbb0e7093ac81df61b4d0c7de6f4f98dbdfadc2a82085e0e6dca0e3979bfb16e562d76e7616305b60d3dfdb70c14d9a1ff16a729cd7400846432279f68e97c7c7bfdd362cb901153e51dd56baa81ef7159738c19452ca6bed8546d9ab03cc12bc2ab252149eef44e3551c81be3f0a446e8a00d84d210cc31abc6b750172010c6b7af7d40ac2512c655285043a888e7115b46a0eb9c4913d75e86cfa17e617272107598da589b774036cac697192c6f9e54fe53ecd1c4ed1c3d3348ecccb0340582ece994cfa7544e5cbaa41051ee4ccb5d6d762e812b7d5cf8b7e76d6300ba211c48265521fdab33cd858dabc97167a0bdb78f26637740407398b7fcb78b9d435bcafb3b379fe3e5411036f48703ec672e34b8e85ea1e163f8fdeb146610cd93620ccfbfc85ae4bfdfd1ecdddc5f3c8e0fc5630ec43bfbf6708028696dfa94200db11cbdfae810e3d55ca1f855a1d9b98377b54aeaed3156391614ff1415146cae45c3f4c0e3f377f90d7e60c28c34e958c66ac091a66494a9cb1dcedfa5cb6eaa38f062664f113c3491e28606088d11e446a8cc41070298927be4c68a85cae340ee34af8e008bd1e2516534aaa3883a6f986ca290ce83bd717eaddf95888e66455619cdacc38912ffd9a44b0b072709705ff5c70ddc556f31d9e6c2b99e59bfe0272a336ec7914da06f957f27c378a6c563acf94ef5a91fa747893f2383b1ab848778abce8dc0a2b42172a7fb91d3cd8312aa98d155b5d9f13be2e5545b3d9bb3050c2447aa007575ff9aec56517c977752a13b199d82e299b253e708a21c1dd27d6659c7d1a724642b142c013122f6dde657cec6db586553b72935463668fad9ae186996ef38fdf6fde5aa8c4a7f073521f3cc2bbd9e33c4928a6a4726ce3a560659a41293701dc275c39eec03d57a359ea48ebede800e9208f4dbfb377293efad76fd92a7e6dedf8a530149879a8e50627059046c0c90e886e1790d5354e48fd8dc621596c55063784bb35c34ad25eaa8cb23a361091d4971ef7bad1cd83e23d9e40ffeb5a70bd0eeab408b4a427f6304b2ce806fdb8ed7b7e8c2102f66bda076abba771cdba1d0f5d7d456779ce8bef20929972e7417e6579a74d50245e5cbed2f04bfc519ec77c6234fb8c257515e29d1edb96ba6e551487d36a0cded60da9fa7a843f62599a1f6114afbe43b72ded08043b37e4dcef5a7802de1596ab457049a003aec15d0fa943aa0578db81492775c046217a204204ade6b158bc7c4b7bc33e771e49b9ceb385f3b6d11acd8d1c209d4b571b90174231bdbf9e4aff370b46a1e80398d19fb0399980852005063035090e408d1296d28e85dc58e90c7b68196b54faed1a22c5cc356e99822825049239b3002b946218e67841c61cb008e59c891481218da34708d17d6896fce941b7b3084a481c226827fdfe19e4c7455a6a2cedf205affaa821833b63855eb5964b074d4b475196abf9c592d54bd98cbff0d925fb36333b5e7d4fc87ba654cee389c47ac9cdf1673898e71ec8b23520386b0b0f6f9b807204d85d772383ab5d24526d6c57c87c5e2d566b58fc8f8058b38064153ab6c3a50f5f06ea4764ab9d606f6377e7a2c7412efecb71a4cb634cce3a63bf5359e8814ed77ddc6b4a40fc204d9af66876d06ecd2e40f9366c1185eaf88108b40b6f0f57f5ae175f5d46beb5036134f474cafbe8f4ef2260df5ba2da17bacd1acbbcd64ddc03746ba3e928cd0afcfa2939a8d6f11924528922d9f10ef8a312f69d8dabd8cc20de9b7fe58aaea75af2de6dc862940564c61c19754187341caa90ec381760e1c4810d8e771335ea5cb1a5c5e8e558af3069660d79e1ad66e30ac224cfab91a1bad9a4e9ea244b7aa1544ab11d44a25787bf9532f5c319e6e34f803929fa09cac811af9740548b28ed6dc4cdfac9616072f317cb0dbe1118488ab770d8576f090e58f5dfb6288f42afdfda46ed59277f7e06823e5ea03e2d414a79f454d86db3e322e1350ea893dec7099b1ecf696a4a3fb89d359fe29ab620529dc6a7dc5d632b696de9f333f506f84bca9fd7f66c936398123c12073a51e335ef3a0bf6b80055a551bd036177b7838aa07680e7f92556853232ef151caad7d7b09b9c7dc04925a835e1a4683cac902eeea199831cc929753e8c3f00114ed86bfa35e9ee4371be2bf58d159b93fbb24933a7815bb02712340282141d259a2deb9a1788ddfe75b85568bebf1452dbc3ee892c8d824cf01005c143a6cea5ccdd9a8c70e9e1b8e832bde53a109fd498af345eebeea6e5334e304cb584370ae3417a4a465ef262bbd64c2e5f6696a59991b4e9bb50a7381be46d0adc35e67276e1bcf3d34b781f788bdc0e3f6f5f21c246246eeeb28ffe8e57dd2fd09088bb2771e9c39b56726b036ee0f8fad7fa03ab203db69d4cc332b60d74e967f245f20b3d53774b795ce491603ef7af90021219ab199480c9edcafce4ff364a1e88f97350299bca866791bf4a61fa014d7d2dee73f62d8a4dd890abc2b6e781f946316a65aaa83dd774b942ff8badc448d7cfa5e9bbcd6d30af77496d69fb684e6c5a06e8e90815da8943277b2b9c05d112e3b70393cb8e0980edbfe6bd772feadb2ad553ac867b595959b3f078d0e89183854b41b4770db8a79649c5e4340bb205fb32822d4115434459be3101fb6ca248d6254149a414e482b6f602931c9e3a8ad09bd5db8def3c766799423d12e9d3af353b1861f9dccc899694c256e7eb2403edcd4c3b1dbac2a5677b44ce17dcd2995d8223608a7b6ea41bade82b2b195675775f2610ebce08eb81bc4ea253467a9f76e2701555266147b79f031ac9418b8b60433299bb850f4e463b446136b33fb58fc684de519142b477e32d11c3a41041edba421c1620f43a6813020d1d33cda7df23d2a8c24b7bd7772149b8bcb3fb28e53c0195696a648c0a7272ccdc2c78b3b0bd52b61dace9f79755df413efcfbb1ff1d5491b5010dd2dc6241c7c9b541ad5cb251b0e8e0c432de389aed2db2d339a84afc67594e460dda83c019fee3e1eb85591d0f036a09addb34b852a7bf79c21e5592fe0cdf1c2420e73293a2832b2ac6e734834cdfaada0265071f96aee6b10662747f79a4f96f51c8a6049afcc43da5b766dea3c42378dd4994817ad9ba561fbea39dc16f8f2766bc8f4532ceef2fb54e879c403e6083b757a6c6055a22c035cdb49ecba97c448263dcc33f509c5c0cbc2e65430b8f32779ba737bd292d77fb5797989090b7e126f67c321f23d671d5b80911e32ca2cdb23bc79596c9b33e5c2941eeb7b7931687f94a159b72a93142774147ddbc71f6ae38ff359bacfebd6aa33676c46703790243089ffc240a7649d4e36216948e3e9ddbb71541b022b640f90253064f0f27276cb93193b54466fe1df047752677dc29364f4bd97793a6735752f97750206c10f60316b3f8d9b9466412e997247ce9d9bc0e8754ef754110736052404af720c7f1fdd7017150160b35b6a2640b906d58475de9060c4b7bc314722e967369ac9a34cda947e542d7921a81a8d5083934e92af91841256e9b82930ca87ee8640c7ae66c5a3c3fdab86dcda30e079976d22a3cf56f9c156559828b8f7334bab35b654f018f331dca72dbbde50541b951d543284d49d3b96f1f2da217632c5e3660283d27904a56e4c83b4cac7675134b8745cdefa91c16905e16d958b0dcb29b5d52020ed7de8cab9dd8ddbe2cc6c44ed652193041e66b8ad5b86c6de0b905daaf17ff243ef2b21991da5b8a18c47f5835c405b13017d9a23fbf126c53bb2777a6d6681715da504698dbdb9d35500a9a11a4d1a41e1890c5aa928f33268c21d8f46a51abdc740aa3923f6b6e1b689af06a5cd1b983ab6111d9f89197e96accd56b7923fdcabc93f044a05902afbe293e4e17bceea5769574fc49020f2ada228b29f86e86439101eec7d551a882c8260d2798a5464341fc08b03f2a1e8d1420b00aeb0629742061b9f53ca9aafe67f06fd0e596b2f47f6197de46646ab70adbd7b200b375934a19280eb3be89c573b2b44ec0c4239f54bb158f23c06b65dd3c5c5d16754e0bc66e647409c38bad393e301cb6c511d3d4194c815b6ee562e0b3e5b1eb54782aa91a9b5957e61c9bbf37f51605e858549b4f1858455860c5ebf685d8f1c864f881e4b7e4b73cf866beeff7232c6d5c08c9b89ec81a9b0337545f8115c4fe44dd6f6147e6cb7788f7e3c1feba373da09f42d731522c32785653d33ff195bfd89f0c06892919b3a90b2ba324d0cadae87dc7d2f795e9a33abd500cdb3ed36975add5b149376a0888e6995023bb5fa7fcbfe83556309b452a436aee6e7d7da5242f03c43630c3bff362adec8ec4974e330567e81311d2732d20ec8e01c497b5fc6ac43f50208e9e11094e2b44a07dbb975e43f0eb0dcf8e9ae67391d3770db07f06166c585915d12f618a50f2c952ca38edf636caa511bdc0e70cf567fd7ab6d1280e5650f6dd4fb2e9951a012d5e2fa22a90e7e0d29637d00a0226cb13c1a90c6a7989063d314c91a4da4c65ac011231320ad2fc335fe7b728c139e58682a8aa4d296c0257234e89a8e6890ef8d982c244fb8f7b391167fa3bd52cd13704de96aab9589f7ce03532fa1b23618fd29d5d96a7ec7c43f2db64562ec1213e364f50a021c954c24f64b84332b38aff3d5bf23a2be97d23f7740a072d0e248993143a9d28afb08a577eabced00f638d953ddecd6bc8c54deae66d5d9d61f3ba20c15961270ac5e331437306200e89a2c1ba8b4748280d8092488258e7a006d2e5c47a83e02b71655ad8dc8ce9d7ae0302ca9ce3ad281fc83f71efb0f38afc85f9a02c024ea521b09aa7cb9fa6ec18b352d069adb122513b6a5c976004fa08ac546aaacad730c128bb2233b504640eb977697238304a8867e07ad5452827ce5c57f688d69f8b637231bbba36b435b4c2eb54122e20a256b316fe0b9f09ba49c6e890f1c125b3b0a54055fadf566b4116bda77d1c1f54f6179470dbc0f18a750471e97b4df70e1f7c8d65ddc7724ecde9cd8d540ecec35ea698f3234ae9cf110fbbcd70ae1e8c085abea9b8eecc97783ea2e645e696d4d62c47a714fea6c527ce28b679ab060070899611feea7a2dbea36a5e5cb0db1cc0d1954377b0b1a80063c510ef3435f8ab3c0611712212a0d3dc09efef11a5c9b2b6427913b993a6429e5dfa64530c7a064292300ac582663714d24541cbafa772c2ecfd224650ec4149d7f095af599fb9abe4ed606a3c9a01fd6fdfb8a39152dad05d896a3aa98b9b6be9484de85057d8f0a9fd38382f0a1b18ab6fe8e5feb15248b2852d4bcc5381d228f3fb833720ba581518d3ed51b7a7cbe1f1c883de8c0deaa92f34c54c14dccb5a333f84fb444fd93d9219be03045ea966a61a6fa74690c9dedfeea1ae26cc89c1e2352f9af665672f5c7ed01f12b6ae5a71563b2416fc1498d604aa8bfbefd0210eb2fa619d365dc507c15ff8a80e63e106902633f6625cb7f3a16aae1f77c8aa40d1dbe2f239219e19b2e238754b267c5c32b410592fd969ec8638dcc6c878f681999709a177ebfc6380a1edb6d88b18a8aad25cf4c1afca087d2c7297af4135394ebefb8d48785f045e894039845f0403969c4de08195ba4a280e664caa381bd709d7c9eec0f1bfe41f1569f31c57f98e3a51a1edc09c08334f181dfb582cb2c3abbf497508112620ec29490991316dce02458f3721ae0d09ac121586d5f3a83432bfd77c5487cc4cd6879f7de8737f10ca55ad7660dc66cb1060500cf5e9c0af985918520d774c51dbf55b310928707cde6b80ed4c2d5280ce9427bc3ce0778bf9286567bc07b784fe362188192180b0c726f73981741cae69be102d6fa51ded9d1d07d8f1c6ccf34e1e8041d3285d7a2e1ffab2fd909b96cad746c8c9583cbd611f00e4ed72c2d9b11db8c8f8a185b507299d124f35e17047e759729e284ca3f5c0706c7a4aea95cb944c0279106b33fb36c559a9cebb9011ec6532821684fcca72e59f709d986f69a8bce0c1567e8a2c3c239bd6f4688babbb14301ba31653cc4665540195d04a7b97adb80a8301807a2f71f3b296586ed84259dbecb51543e2d47814a6163f25c4250694560c0f6bd13a0d14137949bd3942aabee89af46cd1b044fae3eb930a1bfce61e7c0612211d38ec068089a90c4e8409c4a0ec5aad74c259357d9f6da5777d6d210cdf9fb64ab98f6efaeace77f23a73a5c5a333680c69e87460f55b1bec27fbc0173c3a491c1b82fa62a90399d7154a4540e96a7054dc34e6413ceb9e294344f2acefc9107306f49da2204f3ea2d3a38f8e9ff0a88eb2807a00c2c0b9e01cd1a95bcc825572be38f33c77fc2e9b82af06e15c3b5fbc0c4c343e50dafd03180f1975357ed2d1eedf46a947a2a4209051ed388a54477abcb3a4a36808e4e3aa725822689f861e2deba71abdcabc30af4fe2e644b11f8eb3dba80cf7b3fbf9eb7f5d378c42397cde37b33f4029548b6dbe0e9c0c87dc3bf8d516fd20b510be907bd560ba1e60f4e36a8ac3d5b34218ceac208dd732706030a712d38de4857358e94ea547688de11de94de4247fc9e50018e1d93f53e29b360673089e92c250453f0ff13e6e117ce94250076ca5e4efcb5c9f30a6fc123f470ece1f640ba2bd4cebdc6856e6e9d0a2a4d51c48f12fafef263ea83dd64fd575692ce6caf6536120393b349436cf782839950db0366b2f935f509e45ced95961f6537c461daed9f8c1d1ccfca06965ce7b578a8fca69166d700e7c5b8646b9f6cd92d88856ecb9bc70559e86dce5f8d1db1707d3cfcbff2bb7b49b189aea939f994061ae816d5016989eb581451798fb22dce881832df99e472ed1b22339b5c4d3a2f7e904702d03ac3685ab7a8d354610e846c07074af671040f9aa72f533f064a27118cb01a46817dc3997988c4a18ae0e0ead047ad1e32825f0e6393951bab272e41d94bbc47f4657b8fb65c1bed5c6cf73cbf3479c0bfcc5196af8bc3b99fb18dfdde33f2c438cf00c29e0a3481010f7348aee0894d1cd0223956ad0ad94fc70469af646c66ad124f1f7f8f98e7d49fb16df47c538e2c08e1264f4e404edddfa5cbdd5efb24b8f39164bdee556930951e1ae491608eeec5dd8a644cab8f60cb51a3ba144d1189b2a4778da5a7890888ff7d67f41195feb8ac09071403687776963000a253b3d29adfc97ab7ba383fff162b0e612b4e2c8f29e9249b7da9aa24c45a051ad1a860a7b440a0e32e579920d68c4220f7415caf8171043b87390baf3faba39fdd606567b6be277d6d2225ead573ac7b662d8f8350d2accd5efaedf0a3892379e9c33752a908c4e8353c83350c3e9901fe74bfa688d776ff6022a5212451fc40883182f03a97ffc776fff4cd9255837c86e90fbf58070b77c5ef94c8c7be4d610722aaca275ee4b9d099f2516706dd3d2e4dd1b790c35a703909fabb2c0d4e405bfee8d795e0a4cc7e64f5adc7a63d5164b195dd2eb96f64dc501916dfb83db3c7cc6583e4ba71e1c042a9fd7e749279c282ed1667f93de0921052cbb7f540f0b3cebba625084b8fafb1cbe43a377efa6e9d423dfe1ec135b5989ce33b5a35454a337e3ab1a2894f531ffd41b758fa33ed4d81da600084666b7d308928d39d57a4fc5f19959928e2b03473eca7f85e8ac4813ae0084067d9777a015715f5bd52a5e6bdeb47d89c194a01037ecbfd749c81dc6f3882b2b146770fc2c6f4696bd3367de7a344d6b2cff7087159176bf8daf3f19b39304b7e1158eb406b05bc032a3102ca08ef31b0c76c7537b60e8617ecfe1cca51127c3000d092bb871f520f027cf9e3ecc7c309bec05243c83566866b8463eaba69ee315c97011a1faad7fe85634d8a02e98c60148e0290cbc5f129bf4a4fb125ee7f73c0a1543d799cf9b78f507d0a2d243b40c574dad05cbb562a5265c7e9b222c49a66619b736447f0804bacdd141456051e51d00752c417a1e43e963b81632b0d2f55d63814f441cb13f7b4de940618773c8e8793a46ad675042bf70a1a855826f35c3ca70b87f294b21b0f7f8fe518ff659654cd88a644ba0122bb7310da8292d7e248a5b6333f2bf518e185230d8c0eb552c0863c912cecd8c0b09e5dd12755956d655dca984ff8f58f27a09abfe2dbd04228149e4bb651dcb3e5c20995177c4e041db0e5987f05cf5373241f8589357630789f4beecf54990c575ee19e44d102cbe196e9b216598534bcbba914e5b9a9db58746d31183641842e627e632ae64b68ea00bf1028b28c72692b0080e7bfdf75ebe1ad250d982afb13e0eff041ae6dea39bd81c73dff596ae04e29ab13e510524e722e32a372b94b160c62ec1874dbc6f6b78dc6060c3ee2f621c8bbfedea0307578eb17529944596d52dc9cf3ce36a23489f1f6ac8e9f875640c87277067d226467554a246446ecc26839c062f167504a18f84bc277aa36c614456cd8e0f3ae3950793b9d0338b756b275b999be099882319d49996d670bccab251c316d07a258291731841c8d4934f5e24f518e625872d38a50f60de3cf9208164a18666c6446c01e07109a3b0c069a260c15fe25c92ac258f35cce04beadd571084fe6cd7b69ff6e3060a69543e3c3487b3624989bdcfafd05946e9358c2e1ed74ecee9559bdc96999ef308348d3d30b3b4bed56618d2ab85265575fbf57cf04e9727ebd4047b619e820f7392f794cbae73820f1a4dddb39beef22cfaa43e0bea8b69edeacac9f1ec94963f97e847037729dc41087632b491a497abf2cce87d3c8bfc1e1839ed3ca39aa07c38bdd0a716082ed14b4215107785393c572f2681b960944a5bd236976dd64033ef413be6096e44535e5f80f82388353d12094201cafafa91b5376c85949a3887b43b3230f11019f070f5ac8721f5ee403a431d2335d61303392077d836519df7e9ef24f934e1cb280f3317024d29086fe92be30717e9efba8d6eb07c41fd3a6dea96187de1d2ffdb63b04d48f67772b11a6cb34481ef41f990599e1e86752375526784a3c1f4d97b331c1fe66e325978ac95fcadee4000884a771d48dc3ce951a2c9e2f40eee6419948a0941d2743a3c5fb951c0a6770d012693dda2aa872fb47f22567cae362b4828dba8a8347fdcaf136574553e7c3b6794d13d19d2244139fb9732c6ddced4d823e4cf055d3d8c1f73915e837a6e0149255ca89dfbc9ba0e2bd03714a494b0877276524de61b26bac8cd51f55880bcce5289638aa7ec1bf01ff981c5a02bf6268aae8de093187c876595bdddbe1d85efae6bf8f0d6bdb8f1ca6a04489516d637c497ff04eb10b98231b11ae7b841709efb0c84fcfb9b45b50f9090bf4330d72adac441dcbeba3a05922cfa9661cda0c0213044a8f76f71e401800d733fc496316faa2ed4300e38e5d06aba03c22a60e319e59433fc794cf41555549896b7375cfed93906240286b97a608db0049c46d159d1af37c8bcde7f21d3595ad168f9e0e1a4808d3594d66b7abc82f1a6b26a1020d41c3c514e0f74c90e6e8f90923f97b1d1c68e2d5014b4c39e1f141481afe00ef0d661d9e7b7b406a05b27cf1c199b21cc989ecc0f11283d59dbefa13b3b3e42f316cb65b9fe685b3990a502e998c208bcda8bb6e2b005bb3f41b0fdc981949b0333280dd8154f4a58055e287ce4692ce17f75fe4e4363a687768368ff79d60856a1426546d91d501cb68528ad4bdba7c444cdd66926da18092a8eb2ab989029a03e8294bd288b07716737a010385f3788777be8fcc15a8a003f27eb855f7807b78948ee62546c22e2a9d9e986e011e942969c9561c6d3bed28b5f1ea2ed708af3a4b9d8442527d683f2af2ff0f19f5cdddd23702dac08cd3604ce8850c24134eaa883cb39a9d6f3eb2e0975201c806f7c8b237abad30f6261704960b0c859ad26d696e5ea9ec129890ae7b90110902c4ed91aa29563ee9b07e73534f2a48e1a48881cef1bf1a597006b778cb1861f364749880806355d95898cf1a622e5d323452b576741db3cd4ea3c3502fab211755a686cac6ed4479c67636b331c81e84072f6c510e9f42cd50bdf80c1bcb5fbe3a7bb1715afbf89443c08bbb2680588ae67a442e16d4677e345b0509a61d30e5f93414f4b535ecf4797f4ac1b9f2c66d13963eb36bd789c6fc90e79c9021e057df0b6a28c2febef82ca6247a6fac2ef981f2cb13e4ebd5273ff0155a173c85b7525a0401d00a5294f11e295b9d9230c20aee4cc8e17135e5a6484eb43a9a2dff0e9a776ca3bbc0116b24a1230e20a21ad442c2c0e9cf23227d9c74a87485e223d01bcebb5584eccd173f572f00c03187bf0b3a6f01da929040165ddf2da37cc04baab1ab65dea89e4ec45d18c78f6feecb33be606e3c724d914ebb81d9b2e4b21657a0c2d80e0ce3d66f9b5e2a090804a17c487107aac548c011379e2c14fa63c08dc42f95d673da6fbb601f56764bc54741ae4c99a753df9e472768ddac6e05de8de89a6450eacfd96b1f86658e733f5f9c3fb0a48babc6a474b17d518101cacc59e42dd0d22eb29fb9c9f281861dc9e38d2162b2ddad5af5a4b0882f7fb78bc431ad08eece35fa3d6fdb82b01630edd437613fd7200ffd262fa3c30daa5d9eb6746d421cf00b4633a3d869f33794a0144f0dd84a1dff5c0622ca345b0a2938040e70be33ed44c4f43c4addbe087942343fb326450946880b253a96e9691cc2dc6f1d67164887f36be09464cf65b571c123880b400fb6992b533e1f54f939da2f00d4ef68da75ad8f13d4b132e72ecf97752d3a6a46dfe7a1a17e533c0554fbd4296fed20664789062779a328ac65173ec7ded581d4710fda8d2aaf6e8a4a3f302722859caeeccf30f93218352641c32ba67dffd90838e36a735fe319b9b3237a9d3a1aaa332131db6ebc98e051d02d9593e1fd831a457465f2f01c7ff18f0f4819a6527c270fb83a6bcb0478e1bf2287dc1c43c61cf8bc241afd41841373e564d81f1c4ba420a6e71b068bfa6c501ed4d9e9d2be32e6821248821afb7a41a9f9152254ecb53c15cb2b453db2624ea39c534318f8d82227046c4c221d8c3c72ef47928ac3574f1df75d8b9e706df6d71208cb1f1e2b02daf03b5c40d0bcecaedd01961f1c529c76e87eda4361f14790ab7394804094ffe72a12f43514761fb8626b6efa2038d208beae9b3741e2f7f89199ce19521beaf5dfab65cd1a996d9235cb0c2d1a317270c3066a5a7e10c5c07a8517de954dad4dd801eda0c56201bdb05d5dd2ab9d5c714dd2c681ad80cde51ce503056defda8916a91b6223312fce40a2efdfa673af7153d26dd00d0f8f9548d864718e37ae77792f0b2f1ebd15536e2eab78611f5731947debe7f2865bbc12ed2df4c6b979a6eec953cafded230dcee32757228e93b7464ff4d70ce26dbe9f36f063584a5bfe611eac3aada03446a496d66353b62da3732b94e999699e123f04cdc52e7435e27f531422ecac8f975e050e04e402a8b044d5c138d43bdbc529c2bd33cc5d289dec0267a15834e25f8186fff7ff7b28b666d89a9c147e91ff11954bd7ad58dd0a9b64d8d62cef3b1338c793fefef81864607cefbf42345e1929170aecf64d76429d8b9c5d46d4f810b44177c9b49ce51084118516150c6eee2313f668ddaa7bc95ce7427cb7876282ad0e5dd8c7ea32272866dace65fbfd784964de578b9ad3a4cfad36202f0c4a9e21a20817764b4f8d51716c1a53da492be3eeec89308c0f282c9dda08d451b0bec08925414d7de85baf1a25b2df1e064841b8e3b1a54bbbbd28ac2a472ca53b3a562de916208b2e5fd4e266b4e5c871303538ded890008d06eadbb72eee48acd9202c8d842c6b8058f2869182b352f1eda0744a70a84206643f81f3a3ff88cd8fa20c673f2d556cb5bc4c2f8b73cab62f5e95b1c86115eafb0798ec26b69b9143556c28d601d3df060a7f66d7a04f86d643bcb5b2203461b45468c410d0d8ac00a4dfb7f152fe19b7249e96f7e95f26b3171de6dcbd60f948ecb02940eaf5ef7b0f174a4c9c6369aa5acddbdcfcfc7d10dbbd2bb2a37ec89f0a3678f0e13afbba2e93d6ca70262c61be1984cddba702a9b03249ba78f10e42238147473d46c4d83f6267e3b9465feb61709cceafd55b2e40d0f7f6dec8872bc3ce5d76bd95c74bca04acd83553fd8ef3de43c7ab75c563b92b4d1d4574ff1bfc30ca7cff34990827a415544dc3aee8dd4fa829fdc73a90e417be10d0f3b4940dfeaa1b63e342124a549632774e1f8b4ea8551ca0c5db23af8f9d4904b1c856fb1af5b1765e6e10c50f501cec51251ba43065460c4c28c62c6e78c608020d05908569069e923f62f4df9968c5e8cc85ddd1a24b3dfa5bcfda1c63f5c6a5586bcb442edb8477aed80beb98f41199bbbc651faf831da597952b2dcf9827fdcfd7ab73501c12d48290cfa42d54a9feeec882aac8be93cefb68b7df376f08ca181245c161f80f20aa18ed333545a7112d80bd13f7288117ba293e2dbdb89af2a360e95bf9104cf752a5ac0332af003fe04c415958454fd420ae8af4a590a891295c916a1b5ca1575fc907f8ab95b1f3475dbc483f4c4633817c248f7faf20f3e0179497d771d10f982113a7384c0da912e5208c14b3b1b747a43db2bc08f902eb9f0e8ac70ff416041e8f34b463ba0d63ae6e80d7835f3befc3e70f9df6c5384c82313ea953d482c3dc26e75f8e73f6b993b44f20b829b8a60cba964175088d874e762dee989a97e071d36bd3fe1054644594db29ad2dad2daf6cc9e98ce72f6b777246fbff8423431322d63dcc6746b32230ddab0e24699c3e729e81f630689f5683f987144eeba2c4767b615dd1027faa69ec220499be0218e66f0c7b9c97fc10716c60c47ddf165d4dee1157e93dd49568d1f930b9a86d02f7b30ef4fc98ae9fdcf2b0e38a40b6f10741c5ec1303c6d324e178b4d502328f6b4702413f29466fc7774d3a64a0770b39caec7e594e37a6a10f925e3a000c50da50d4fd56b24327b2ad8d9ba29aeb4cc5ada1cac2850c0cfbae38774dc3ba2e27f2ed71abc582da19b0bc57a6281241a548895f81ed1180a2b24499a02b67bea809269ac68b0830db4fb8be4bfcc87e278142f189c71069a48e3924e8ab44e32d2f8eef816b59591175c426a1ef5c7959bc219f895a80640512324fb81b8732cdcdde03ff0a6af260ea81f5379f7badac712b9822bd66f749473fd7b863f3cf1f04d8bb9607e471c0751eac01fc8c93bfeb3325c38b5dd370977fa1633fcc790722693b779ca576136e7423e4e5979bfd8e14905f9b5357af7693cc4d8c5a84bc0649ee9241f3814513b77f950bd3728d2377e543349a1a90d78e282350b331bf9de827780a6d63674993168a8d8a1fa759cda1e6c0afe0be05e14cda543f5256fae8c1dbedfddd1e5ad8662a35f9baaf8858965084f64d6fde2d48008c0d949c02a1e01bfac4ef2d96d83e45c4e72dc00ee043db90d055f918a1c0dae749391fd432e898f19629229829e56380ccdc208b49bcdb6606383ba481bc4be2187af221d0f8579148dd66dd68041bf92e3ff078df9c5df6164129ac31e21238169b163410e2a98163a1523cf3cc1cecfbd5ca7684ab88025e11abc05fe8c52ff68441efff3714e54d9e0417a1425c5f4f1d0e0dfa1868863ca918fec1f5a8e0b21f0ae749fb9295f85c25a4e289d8c0ac32d291f36e0e40ac6d92a227b2b70b8435fecb064531b84263e9dcbf45cdef9cb147f8a87608772e51b4ba2d86978e4102291c858de0eb0a51068d2dbf2ec3298bb593906f83b36484a9ef0019b156abc13a7cf84f7b36199315d609bbca4820beacbf30395b560809ea6acb24e546da890080cee950df19042d6f58acf0d3d0bfb51f02a8e54f76205ff34e670aa75199fc8b4e9765864d8f5601b51f3dc06c9734b322ac4bb5fc4f7059f3ed147169201209b4ea02cf358b5b0ddd6c9890096325e581a5db36dc3225a00ca4be484d58efbb25e3fe537e96e916f0cfca18b8b48bb3f6d9e9243bde52698027d1ae1c050722cca64043a77b68a9b0221b1e02a1eae592186e858d433861679045e6db6f68d3c3dac81a0eb4cf1033329bfb294e4d0d823857c6bc88770f11fb0d1004a410fcd26a5411acb8d85a0c3c188d3ed77c13f7c65c0afb93429da02d6b082c766a46165b91b8a44573e68cd4617bbe673528cd09399890af88c50ff54bfebdcfeda611a7cc5f48f0a67a66ffa9e812e8d9c110309ee9c8a11c9694317b00abb81279ec2f5dd5f52c0f17ea50b92253da2d62318daac6fce4f183302fd9241c03c4cf599d8689bc6afa7257b6e445ed97879daf3ea4fc54cbe441b39838a4449747d1f6c1b5263aa19e4288007888b795549a9f8f68def34ec583a3f88e1c0b9a9b5204a6caab1af4e0207024276fbc31a015e9c86d723dd032d5a57f4064823ab5afedc2e461fb8bbd78402ad3b9d9cbc01afb755b63aa9b0685fc540a24d669d3279fed66609863606aa934fbf0ff767a5586dcbf9e5c0f947ca68d524e36989ed2fd504ad21a3164b7a61bb64a2e761cd81d3524fa7c41915d1620cfa5f9c583be3e1ac14c0c82635c755b954e9ab017a25d35d275b8848051fa154620b5e9db540a9baf6f034b0eed0d88a423a41da50d769467bff952541dcddf5d8fa8b4b4f97fa4634fa1118712eae5c2fcbd9d6762ad60f909f087e4ecf7a97a1d2e885e720ca8bbecb04500277ce1ae37882d034de86b3e6870377889b4a82e83b29b05438b6b2a3811403da177744347832ed885bdb25cbe37ee9d092a2fd0df58e064fb42969ff20c39e915fb4af981a28d032350d3c23f8259a5d7bcdb7544f7d6a815bb0f822e720cf6087d0dc5627b4d2f01d074ef50d6fb5f17794bfd07c446bfa367376aa44b801eb7e30bc5107fccc546c16c52b06e76c4830ab2f8daba95bb0f3a6a5991c267608b0e37a66099e2dc6f24f75b9fb4b3edb65d3a4d9850d487323da40e7cf87da2a7580f0b7da764eb92b9a3053e7e55f0d6f45ceb0333e8050ea6f8b7954bc22075fd3b619a6b09594a0be2aa94da2fadc9cf8e86bd4845a1ae32e571d589e4f59b3edcbcb106dadd32c21c8c476261b7396c924317ed821f4b414d83777a4a5d0db38fdafe99febdf68376631289ba334bf7da9d19ad3999e41348da8bc95b294beaef0e933914029e5e7b8f1ec3086f0dd1f8303f8f6620bbd6c5bae29356e61a1b2b8c8f368547373f4205c3c989baa78e1191da15c4e48c5e98b0d94164613c248516aad49b172d6d0e7afe12d81358cfc0359719e12fb76f36c9629419071511329e0fb9fe544cbec302eb6b7f73fa226f7fd7c662cd248e8cc46a3bd640b9c9677a77947f545cc44a054192a1f7641729f9eed9e9188abe61fabc1e8aa984dfd779d6b0b93f78a089d8187634315aaffabb80e19a1cb8cd15d160937ec91c1097ebf119a296b07485eaf9885760f5deb6a389f534b2679322156b7444a4e8bb34f4ec203156495336766009bec24c2b26acae5894f30025b7a9d28d344f23e274e36071e125121471b585dd1bf65f6b19acec147a948be300cc42b07460aa6bb1a29233fd085fdfe7e15a442c1e6f1af8c9739c54709b70683649e7363fd6bd9d00bbe7846c948ea9eb072eee4fac07a2efee6d7392062ed28643e6c56ee1d021277f08921fe59ee61e07ccf28e5d87a7a6aa514a69196669e600fddc0d51f6da190ecb56a2387fc21b7cfddaa01d4f4f9b0cbb3bcba19fc57439af6aa4bfe9cd2da0fe1d6287f7558ba05adfcb2896e877daaa1635b56cff8e3071da2ce3adf307226a7e2c88f33cf485a9eecbf27b4e986ddfa67418534a8f116a93682f414dff1c458c4f1ba3f342a9228549bfd603078d07061da145dde4ee6021d76bd6df943e5c8ac0a761803387b46a5c166277d4e5a67698608a76045bf4cb81c821f9133a511efc687362d32435f0f2ea6e5cfb24dd2aa201439c7c5cf30782e1b5e16260679ef77bd7abd4b78f153ec69d2e054c520c5d5031809c781e1799705561ace4679dcf33875c334f8a62a9ea0a379c83dbc0dd8359174403e241f19d70abf4b7381810f63462d779543b8f37cb344bd28906139395e8d568fc3cc2a12c77caca902c0aa0a07fd001042182f0332b3812d65ceaeddcbe3f41cade320cc6f15fc245895c5c4f820372155a785f488e54f8d0453d1094448d61540bb8c629a5295156095b036b61c9d6f631fb65293ec16e46d69eec2b025b82b4daecaaef5999df5c99849f07537e31d2bf1d4b70dda3b7cd651b04a0c334e3f05b02dbf6a6868f26d6e9ce254c583653932a3979e58c023582e976cd8185e491d10a2a57d6504204ea005dd8599f3427fde2b688c9d06f6de7e9425b8976b21e9de305f600807b0c1b09148277741a9ab9b96618d03a1d457af7dd7bcfb1afbe8f0b4716a660c8e3b8b33332ddf043857b594fa841dcecd3ebb3bbc9ca310c541ed583bb94ee37875d405b12bd0453546cf0601569238791ec898521e3ef8de5b4156aef5502fddfba88b723561b5b791e024ce65dd3a888b6abaf6550e579a73a5f88537d6966ad4232f9606412c863239ca8f0a070df60b32739dde141fe1f14ac9fd402e0d1702fb1796712c54803fa4f979b12afd0b63bd8bef38813c0c651e0503f058d2a8120249337ba7b09e89e736d3b2ef11b87c117def03a1ce67709a756426e5afe91fec17929e4e8905a058a6131ee3ae14e58f63a81d6f18d54cf9aaf0028eda6c065ebbfe0eeea5b2b8d88da14046847a58c9f4825f82c9af4893184726824c867e0aba615b6344529319e11c3396a42d97705dd31555af7e05b40637c033025ae1d82230f3edda382c482c9adb4a91e42d7196dd463b48fbeecc895a6fe6429bd12002716aa10424645adebe512c6314cecef677cfe6c62d8c2d106d7ed5ce92dcd55e80a3c766f32d1b9b288a63cfdf868cb632914a464646252839312772694e9220bcd748f033830f82a25bc2f3b0e9d86fd78bf37b7beab07f55d0b425b0a9b9842b08de52828a3cbfca156f44c14e9fb6a45be40145b1d76c48ec1bef40d022d7875e76ea550376b8ab110f1d4c460a005ab40b49484cbc985aeb9e6418bbb67bf4b4c39e882ba91084de9662c2ded559e4ecd5126bacd38afe57c017f70ae984432ad57481ef9940e79950051c9bcf9691703fb67a984538563a820783eb0b51a95ebedca9893132a8268935ace2cc95380f02b8c155874007c2182b4a48b18cd622b998bb371e181f37d16b4ee5437ecd419c865900b549a14b9b3ae03399b324458ddd96a1e9d163c8532e19f64e778ac71d8c8ac6072396c5a5d9337704fd15ffb8407cb22b914033fe85382d04f9899b2d13091e03cc4cab0bf1e84f6767c0e29ecaea8f5e9d0cf07be7fc715eb8b1e44754edaa0ccc51c5cebbd29d1349658869b160eaa225f56af4f30f0e\"" - +pub const HEX_65536_CHARS: &str = "\"0x65536b37b273ac2f126b11185ef91c1cff07c9b5081fae332ef45c4e90dc0bad4868ee6713acc2eac2d7071fa5885f8b2a9988a07e01bc7de1ad0cb0f06a3467905a2e8723987bbc96c86ac4f1503479e1089d8d90cf4d836c4d12816a1a39dd8c379add349bc3f2f543a81051140483f61e81c3f18ff1c4049aee78e1c245846ec12f1b8392028c6a2e9e3fb110d20f0877110b2f266609a3d92f0cd8b59b7385cbd2e70417ee062ee356ca191f8c68aa68243f5ab62c5f4b237033c32ff7813405c8a4bea82f73380818dbd169cd1060f4f6791b96a402bc4dc83f6a6fde353e3a5de2e626706ae897b7da6b1e3522ed2f4b0f340378c70bfcaf10a7fc805d696822a76f4d8d2206a9a8aacc132dbe770ac870b9a406bde566ac665c942978e5edc1efada06bc13781315eea26f9977f58327a188a9051c6eaa00ca4e2e8e04f26f8dc2b589c399a6160c1345816f35e33b4a0db6d33322c3ccbf82cfc5c1b28be7da5d834d1720343c1bfd8b8964552bae373ed2a6b1b6f02766f679a2c9f1007610014cd71810e8117255b0a07e5e5e87711ac4713e13c6adc899350c4b35d9a22925bd46cf2cfe04fab2fee013e3560380de3b879b01a2e03347c784eba5b1f0367aa5a51cce5dd0cce8f983a1c0876887d679ca207faac11fff8e8a64c81ea02f0aa7ef2cbba80f75eb37a3975963b757f7fcc805adaf7611d729f1a47df9e85df5b2a2a8eed2b69d657a4513472ca6b6a44a1b695c8183dd178dcd621b2066ecccdc1fb88fea080735bb2edb104b88547ad6de9cea81a2afd04f245d1d15c92955648c248501a9b837ab549030860417f7ddff056eb73e90b1263923fd6e5ebba43ccd9f839817967df5c8a6b2b7fcdc62e4a55978c0baedb046059e4754aa043fc975d57c09ce6dcf56b1cdd24a85272faa1d821853f71b0bfbff2967f4b2dae326e4d990c3417f6c990559d25ff5b3d0b2573f49212bcf33d06fabd8a971f90060b396e178c6b52f55667958b1307460827b78884022187a85ea89cd2981f88365b5e2e66e0fb55926d7eed93edb19afdef4217f64a33d1c9318cc05cfe774e0f09428ec4a9dd38ac42a48751cefea3c9a8935ab04fdf7b24b834261def53839cb68a89cc61ebaab7047cca9d5727a86d79d5944737b0642e8770974cf2128c663d0133d9555cb4aa141e39dfc10c70e2fdfae954a7dfb39af8b7419909255d2d81fc069071fc29d1763c7579e729d87fd148a9bf26262a9332b2994a024adbb8f43ca76389fcead15a14ae24f005cfec16d4da950c8c1647b8e2357ab59be554f1faad237e492d6a5de11618817da26a611f122fe6c5f1ef9a826d3905ed4c8841f9f5e3431b994d1f4b62d10877c973aa1daf9614bbd55cdd865e36e25b9241c08e3b6627846113d6fdf4bdef35eacd48ca2d76959baa1484adb988262b38013926c388102bf0551c35616970f9d5fb1fded91a25e12f1f12afcce8295aea0ae7608163798af65025a84ef6e1f3d60a5d354d55e0fe408023894f68c2e523be63725b5cf33ace0828cf89a69ee428d073bca15014e7faa83014f1bbf6797ecb80bf75ae012fbd35b48f3d7e213d85c3ef991933021baaa7984e7c92073fcb720911ee736479e79eb55046d3ce4433b669c34e38c14b232402222788c3df8aeb42ee8b487c3cfe6eaece11d9b3aa681b143f9f360c7dc0f157d613c2b85ec407f6d85b1b1876c298c0ee50ecb9e0351738e6eac1e0e5749ab6a5f7aeb7fcb150af97b0c6b4b6df19dd0d06dd44764dfc10361e6217c8808b9b34b62311c5a93b4e6e0f7ef3ae52d4ca2689ffe94a04a4405442059bce9df2d919299f1bf75e74d5d51b963b19a0133e6904cc3c0310fb50a7c0c04c0e9a7d259c12c12cbf84dc72e3e460d74db9a2a3890d551b9451c84ec4a5e565c17692124a0ad25b47bad7cbf8c07ff8df7b40b3226309b783f939baeb02767c829bfe6bbf285b66b235b301372d8896521774d45d5c28ca6593b6f81f9aa9d1c74014fad0877f8134751210ecd6348555bb033d30a2d07a438b111c0ff0ebed28e22b81ddb4b9fc36cc4048afb3c876f62bba6f26b882cb2410d5f8e2895c35b90bd12010f424c64e53d33af47fe5657a2cd25e80b08e8a26274fdec89a6388dfe80bb6c48522115b6f0c78245e18b42a7f175ebf6816c4c45593a9f40dcbd881a1c9f8a9a010a886a319b6fe97e01347f6ebcb64d41687a08c869b0043c6cf684d11e75161c15f1037351c3b51f135c02935815fe9f5e382f4623a3e34ee42946e774c0717d6299a4c3720f26adcac65105771621ff851b47a1f2b96aa6a0d664021e398de908b83d64719da4dc9c6d20304d00c9fe89433f6a787abd9c282212a1c087781c40f5a615fa8a31affac834cd19e98e7b26c115e6f48f397f74e12d1b214ae427f13fad70b62e051d3fe4f90fafa426bd12321e17fa97bb0fd211401db77ebd2ad2bc5586f9a84ca02af969b1bc4c7229db28e02bd5ca60aa60d3ce159dac61bbfdc6dbe3b58cad672bb64a2cf8e30093d2be04b0f84be835aec191fdfa517d671b2bfadd7a76d45b0dc99ac383a53e324d734bee7738f093a6714437cb70a4d93af9dfe71b930158216a5e19e237276f9ba536b3cf0e063fe7101edffa5827b38b5cb74abbbbecc823ad1df97f8b638eba5a75ae875dbcb1077530c06e56ebd5df1956fb93f3a605b8c1b69fe0cdc42fc6ecdc0794057b1c6e756ad5f6f105727b21074ab582e430ce12fbc1b234b4fe9b5ded642378fea32006d6ca4c539e07163c7fec74848db5d164d4f4f2619aad89f246bba33e7cee1bb434c3c4baa73448226e4711e0a13b433dbea7b474248fea8fb3290c6ae92e42dc4a365e6562381bb1d7e903336c8cfef2863d383fbc0b45f015b0b831636724d6390efe15617333cdce3efb5d0c6c99a9b77ac6f6bce8008e85ad4c5cad08aa9232c312f5df1dd55bea468587a58b0a5d966e37f85acfe260b26016e2c2ff7a5f0fe8c296e3c9f21a69067fbb828dda563ae945ff5bcdcff950b53b1890c37add5ef49c3d77c51f046e7d39f1ce013ff1f95b1ea5c66b37e7a244605ac3e8a5d33c521d7286f6bc22b7fe4076d080d8e66f8921772887233e6c883c5705862d2e4894efe63f475460a10f66a19fe50ff82a2b3c03084515bbcd25aaaf506c9fd90c4eaa150a24fabd3331220d83a48961cf9c4e9da384959935f79d84a2218348d70d394b522ec0787e934aaa472633c75c98909d91576a322198980c06f0ea370573ff2c2e2daa3f71f0f8d92eaf202fdda79d56bcfbd5dcfa435f8300c25a179338bc0d85f9ac297d283aa763892fcfb96167ad6d803eedca540bf05d9980531c135e553bf5c27442e228c761863ff43950e66526a1805e0671afa5ff0eb021fdf175fff6a92841d639b7c88bf7195a8f32b43871a648048ec0fda674cf79239865eda8dfed6d74968400a53a647b66e5b00276d088d2dc91b57f20fafc5103ced1a56ec7f6bc8e56f8bbe42bbd034ed6aeeba802e3f8ae05426758bee5b0e583e34e4df9632b636b79b467c84618ce7aa026dcd7d1ecfc54a87e6bd6aa5c3d5159693fed90a6b59ed65e0bb6d4c4a2ba045fb539652e7df48386bc2ed72e5c29f8d7e8e45233992fd18c473a39cc04fc8cffe0d70cbc0f75fdc7daa1d9f4ed0434732051968cc6123a5ac63e49a8984aface0787dfd84dd5f16573034c95a6b10b980bca6868c903a8a6d4119acfd7c844c1bc5b6c86977e2de33440e4b31f472d6b7b58bf4394a079a529587d25f2ad0fa1bf22e196d859bb45b81788b5789384dad71b23d2a226d75e0679e1cfe9efb1c4d66463a42796877cfc9e5e2435b072d3f4453e23fdc6555d2cba5ebdcf4ad536b85d30286f3df49cb5972abad4019159adc68abfc34c04e46a5bdc182e622a08694db464760c5a4505021abe6b0dfe15c8eb5fa686fe0cf8a337dd14274884cafaffa3127ca9311de8cc04d19b0ac632ea8ba34c4308436cca4e25efe3796279f3e9da9f697448a9d4a0bcf4673f3e3e6a3d630d9ddae3b6a1ff95a545db4a5fad2f17bcc6c21be224b3a038e346d6e6db12ff12ccd6d573bba31807b31ae12056ad5b77cfc5047df67a48392389665ba2c1e28f9889b8448f0fb14c30ff2050ebca2546af586aeb5d83a785eaa54874521c5ac3daa5e9b365d620801e6384b6d72122dc72ede0ca03001af82d5650011abb62769a1a55ee09aed875fd706f14946248fb4b82d12548107db353b6f39a9da939acce756e7fb9c1866c2c00de1a79a82ee91b46122bb8f30d584619cb21a247cbffc8b7b9b09d76eb13bfa48c8f28ea335c9ba9230a030ae4a69fd7ec47a5eef8c5f9851bb1142092f173239dde5c059a728590f1741dc9f8aad1590501ba5616b7bc188d257009190356f968bbceacf77ff23f014f8310c9f0a89279d02c8d225754ce535000f79ec86455757319f65c4202df8831dad4c8e2b94a67c8949e46b733991f783d9368282bcc00475da4a1be720974d972544c810c5d789cba5e7b0935ed4f849e47207cf9d54342247a7702b8176a164bd7816fdb2bfd52f4c0d6e528b4080150d0eca5707dc5c1c220cb5c0508edc220d14e84b5386ba229b422558d49c9ed3dbe128058ca59aebdee15c9f040f10ef8d77345dcd95e2c500fa995330b6c0b2769c086d03049ef3efb98999265395a2c0ae3cec18c13d270bd91c1feb53588812942c9ad73f6d89eae4e84160abea950ad7da47e2c82dc9fb06d194642c2abf3af0b30e5d04ce6e028d0f522d4639cae0bfbcb2649d2e64a6a55f7437ffbbc65f8fd5992e59a98e98f0a1a83929ef10aceaf6319332d7ab660dd00e2c4b898fd25600161ccc24aee43a10eeaa62ccf5b95790deab87b88b49ec872f48c8713cd4597f6e55a556ab242ed843f866b64ac5b100916f0e4711a02bbfa9c42e47e5b4e72e84c28e27fbc7858d1d8791d600f29936680f470fc4badefbaf72f2ece3be6b85835b1bfddd8aa5c5b7a87f6e9c64ee9e76e836ee16efeaba2491865c24e9e0c5de706a567a54b77400bfaec3b2c9d6f43fedd6a13f713fea1474a6c3504ed17bd5a5e3ad57d807eec47d6e6ed65d8fcb4b9c0e7588680128b865677c38d7a2b8918cb76462bdace3efa22217c64b05cbcc8d29039f438e2687e521078677e1a44678a441e31752d86f6da8a46f91766a40a03526cdafda3e69e636093d8b27eb3d12f235525e21f98483db3f9735d76f2d5bd5f1b4a945a367ee471def08f2f5a5af864772436a0d2f8da323824489dbdd65da331c9ecf8eededb058c563ba0b1d378109df5186fc7e100c0beb4651e4ef8de0f33cd0e49ddfd7efb84be4ebd71f778f26ad273af8b47aac27f103620eab42694fde562a68312b551afc2c592c7f7efaec178625c51eac8f8028687c953bb26151305c6ba22d44d7c6c9a48bd1542e0211ef0633ffc8bb0ac98b3aebef98fe710b4af01303b1feeaadbbd3d631f3e133c646a0853887ad851130dd2924e84da97191494b93fd711639c4aa54eb60341b67cc5f39fe6b8844dee0c70d38f729d4aa44553a161e9af54c667b9ed083a65451286dff52d9503e3f679b5fa38ce7adf78ef8558176a0fefffd45e8a402105d075b9d50e6aea8fc00dbd5db0d3e0259e0207b07ccddb1cb54c0c7d07699a9bb817246c86bc7e76b4726d2648b266508dfcce13b7390eba8b3b5629ee150b540fc032ac6fcf6a7e20d8a5dea3dbba99b8470ee035a991f2e2a7cabba89431308d6e64a7cf4e931572e937069659f88d36afa4c6e98cbbf12fa624b17cfa15118151bca7912380d4d66220f7b58fb3bb71f3af7861f553066a356bb4333d18e9795fb5a209155f38c7d0141364ec4f0de11a38d5efec4f592f6fca3388f5772527d9e216aa09d8f3be8591d0e4efd9fd19dba829032b524eeb123f27ac8695600658b034b0c5eb4a1166e952667e8441004f4e8de0d0cc9c4cb79fb5579b9d6f2ccb390baea7c9719a55beb0cb87b82314a0c0c8a0e66aae85fb39224ff7cbcb4e9f252de601c7e452713f736039b7d4e90d6495806ea3f2b129ceec6ae7ac401da17a839d8eb394c0c89e7b39b59ed2f5994659ce9baeaf3c1adbfa943afdd952223457ebcef5a3f9f6d19858fca3e61a78a755ebfaf2e57cdb369a8742161ecbc2fc4c5ec29b9537992e4221e14a7b02e9e63b3e5a6da6587a7ad2a4e6963190421a11879350a39c22c9cc12b9ecf7a3f9132b53e07cfdeda0db6e29ce5a82b347db645377183a02cb852feaffb44a57da36c02945bb803d057a094df43dc1b4ec751f4dc3be45bb3b5490822d790d4d67cb82e23e418327be8732cd2db2f2479651a9130620ed92b6166bb729b0e5a9bceee3d1c7a98e0dee9ce57911b1d1a1f3da7d1370414c19b1454c8da246835158fae4de3f882240345aec8174fd5268f5b73b60c602442e7fcee71531b0120368abe2ad3bb5ac2da6f7ed1a14c168ccffd580261d367327ad4834c524ec554ebc0c248be39d8c6503e4ed2e8df7b36d59ed1b6b9f5d8ac455696102a9f795ca0aa75a7e9c86aa2f3f5bbe0dea2b09b9f0afc708ca38152decba96419ddc4890f74faa7358824328d036453ac16ae420fd5f86c0ed8e3ca44b095f6d4429b534d7f877ca07657704d0c35a5a1fffaf12e12a51bf210dc557353fe27406c36a185d3da3f8013438345e1c28cfa98038883bcfb957e0dfb57ff33f8caf5d5b9edc596d26629559ab6f035ba941fa665337ea16422549c918221fe0b5602180c3ba3fc7c18c7e0afd471c0dca54bb3732ac40b90f9dcd5aaf665b0ca63292ddd7235b67892df8e965fca21a8e77c1ec7d444b6d2ce05bd2253b9cdaadef964813a03b99695bfb5a8aec508e98adc14d35f7daef7d9f384776565152c354deb30b98d0c0543ef080fe48495899862fc553a2d48e1b39fd2c699c7891100c01a89aef6ce38871e04330c8164d6ffa27efc4a4c7f282b5341a8f6f92c87eefcf0facfc9a6417f93df635b600c8c218e185522009e0cc730d30dfbf3f3f2c6b2b582a6298ee855d0d7df41b0005033788c81830fc09cb97e4115ef9713ad8b6b74787db30427237a48fe83126cc2b8d3d431a3deafd22d532e9bb9ec2e74dc0f4901c060cb215b57d07e0c7cacb6d3baede035322fe9c32f212b28f8318a87f73f603c293d516b475cd96eceb1739bb4081a2d2d82fa6a6d0f33231ef184316041e357efc6bd400b7228943b93d2eef303c5ef7d7295ca959f10ead921d6e257dad7160743bdc3db3c8fd87d26b2aca6ed0bd945f41caa574d9ed61f65a45f8ad7ec50f6d7160d05ebd4516d0c3da1eb4efc88f88c5ea244ed90c07239f3d217983d93295926d6d0c8597c1d6985dd9a0df8cae5b0af9718820bb0ab425d4f8b6b5d1a8831b69c695258d2e268deb2e8743a1cba722078458fb7d5b5f4fc0f6f07207a25ed4608b7f8b8ab7ace1eaee32460e5ae00d299f78389bde8f7a64b58116387e5f0e98522c6743e808d63d7d4d3596a74d2812780ec35a9715c8fc440826d37ab29a909cc0c244099453c4a08fe541099cbffb04ead771a5c7c7004c55c44990856f2e02e790e9e4a3db5cf4db2873ad82a97159c86aa8347f8dd8ae846a4cc42f1448046cfa0e121fb9a7787136502f5cc5a21b4ff3da68a5ccdcc54e49f825dabc00a10d437d39a2a9c2e5deb72ab8b188520671eaf1b0091ab7a271a1b6ad840d089f2f79d1ac7ef5e317775eee5b2153c550ccd0038050eb7b7a6789aa6bfe68eebaac4851d93a22af4a08a14e37ec02fb0f89edbf0c1d15905a6dbf691f0f0f9e96539aeccad6267f52574aa4df31ec79dc9fd6f27b5a26fbb1945528180cad006e68745288c3868d611257541af28f036cff9b911172e38272cd5fa693ab89a11e02d795a0c7b4bda53203027994d2dda4129b474a2d94f708cf27840443597d9cc6149ff11d6c747bcc070b4adb4ce4bce847ccb7d7f899d1c4cfcb6db49b87c029b468c71e2a32771364c16c2ec2e6d10d1dedfb3379310e32b3e16153609723855caad0cc183c99c9143953e16a22b832acc833a8cd3b64d448b76134e6cf42b4b68e920c8682387c64bbb4700d8b131c45fe76d6217f96dec776fe6b49d4a770513d8228fb9e8f51f3b8d086e1cce2c5a3af3f4acfd76d985147d7067a3637984f15ce82560c5e1304f25bb1720ebce8407cde58560943896b8a3ecc59cebeb77131b1d33ddc1000dec496650992b25e3b7b13b5ada9ffb8ce961cbe53f43496e95f9f49d860cae0eed145b2c5ed2163a6a06acc60affd3536cbc328eef9dd2c3d778ed42deab72462cd808f2f1cb4ee11376ab3e4e2b1d3b9e4cd061aaeec1c616573ca29d6561f8e675af33805c2d57d2e539fd646851f4fdd9cd00a863bebb6ddca04cae87b5078c7349d6467f8c96fd93f881e76b89d506b5be38cf916094dbd6ba046995de1e38e5874e9582a7920afb47a220e7925f83adb6c14b70ecb6c85e0f14ffc1e99b11e8e8d45a0c4cb7459a758f55a5359776277548a3cb8e806bc1df0238664b35fa2173827781cde2fa9ce98853a86e3bd0bc8f38a53eba0fafa306b97e5657eaa4e5d2325ff5f39a5308126e0a4bc23696bf2d6233d3fb7a2c83f3096f82613ea80c5e4c3a5d5983d0b637c62bba0e18002e8bbec09608403107d1dca13fd22cf5f936829de8d614d8cb04bf969d3a3b7d2a7bd9a572b2ce3e05f62220548e0d8e93cba0c187464ce909cb440f4051a3d58f16ab95185579f37d1e2f890cd730fcdb99f7bad003c4c71bcfdd430eb9dfcba3140c30d9663e6b94df246aa73c5acbff26b7b6c373a0cdf64ac1229f17815be0134509bb63e066f9259d3e7bbc524e95a16907c335d78f830c4b63e461efc80a0d1dd31b63ee14124c23373c4f41f61b1923f4526cfabc5f3598db634ebf07cb70061ea71417f8efa32a2ccbd67fe4943633ef76184e38f3ef594f5ee8e1c148b30637358e32b5031b80db19e982291b383ccf5658cf2398594c5b570a2a284f45b44f343b33c76dbfed74c8919bb7fb09b46c854e0acc2fdcfcd6bb2434ae40f91413fca079183e087ec16be83cf854fa55d3b63fff2ac24877bb1b3205b77c407bb70c04d55023e99eb036fa7dfab98735234fd73a2b4b0962f31fb89e207f5fc8b9d4dbcfa00a184331970379dcf55e0b184fa79f32a9cd7a3b39b2bb9f3a44d628d85abb7453ce94e22ccb14a379e2f0c5caa0deb484e8f8c455e97a1e16f84685300b88211aee0d6ac18965571fab8166e5bb1548d6b556a4808b86494102a8f547f58cc47140d0b8f39718e1a5f35842b2ef476e50eb08ea1202d6db96fdf41c080e6b4609d9acf3114d4532076db372b4ec2c52f2b96a797896b2c1745b13012c3ce9e3e02eceaeb11a714539419b3ece70ffc777b3f633e406d56fd8f3d1feae707a1c9a64d527ece1d57fe78593df50b33227f35a3e31d8ef6777617569113460d7271aece80c344be8f5c139f567568a657d8b5bd5910c3e2f34e4fe5930d5dbf5eabe23ccd4c841a3cddfd39a287796844c95bbf6675567b1cc84feb5d3d4e514206dd4d4c2f90e296a04a8a30744618228efc696c40f7857d97e2ca25b868f236d04bcd20020b654a69180f67d43b5c34b626653abf25cbd364dd465dcb7d9dc1bb61bc3bff4afb9ff31f12ec68fa3e08a3fc91741edf8ec23f0a425482034b47022b74cd8ee32bdc1a4fe0a7e9e626315e1e6adc2686c6394a5520d7e918f5fff74e77a69295aaa00020b8337983e8af0cb58aebe4b486642cd5021c5998b5019cc64d93467c0daad184a7eaad2419755ef37c39de6c0d5905b8629ce6f31a1a447578248787af03d74350f25fa8f859750a98f94317d5915e4ad1b92ff45c440e809a1a612d962e2da2778677040884342e1a655559b1726be4ad5a6afe98ffedec7e4ffe0415b6578e3d4c206f9f9da50fad9d78eb94af03ecda8cc521db0cb49f9e334001e8d6dc7434d5cf0866a27e3c169004c2c722d0dc8843e29091d4c529c59c8d434eef4e967a52d38e86de94f27e61f9162ebee11e95f1cff57f31f943ad04c8e14870bee05454650be86aa4284abfc5fae87fface3d519be8a28571282ffe9869e16bf7eaa3fb491ce1f8e9a1a861a76cebe54283ae43f59d654319cf563ffb04663131c39adf1ca127c89d175880423f686823a9dc8f853912ae6acbf1d7ea0bd0af675760c7435eb4c99961cfe4d27b1d61752fe2d33ef19800c9966752b255693e692315f140a80f830be5d9f824d9136115284775106ed6a0c62bc59ddea4c1662fded15c5eda869f65f0c35f6d139ba8a8048379ea6a3c083e78f68f616d23b913100d71d5348d0b80d516288c9471100af56ce2b5eb5b310d7d8814fb0b7518377b8144ff5a84b5432dcbf931563aaca31d0239c321823fb3c28dd41a89354ce47d4ba95cd30d63dc673ebd03973eb03a7d85ea88d61a6f983e7fb4869591d26e2e818a1f77eef3bf277fa0cf386978ca5195001441a1c0579d78a49b1d5b007b7e2a4c80cc4a9721d6084dc91f6d21f6a10e775c07ca4ecf9a911015cbb63335484f7ef654818804d7474a4c8be35a899bc8a59847590606a07c7eb743244fe10948ff842c8cafa524aac50466b9a5f40461e7fd71950eb673306e477ec34aa9e0259885b128bb7d9c702b410d7d524dad6da5744054c8ca10fe8575375ebe645689dae51cb5142d3511e21a7b43840d0b42333bf558fd51a2870c4860982be9f53355d8cf99ee8bc6836d4692195b0e21fab6280a87e6243097ecee16e9d578012f67f7a377464f4a5f2f222163388a817d0f2460000734e882ce7ebd4276b7e1a6a22fbcef87d65a9ace315ddfbd8e3a02984cc509312fbaf7f4e47a074569ace19f14010aea632670957b2e9d5ec789bef90e32914d3d43c67a4571da14108c2ea8b80af4d332c831ff000d02a98e4ace3dd037ae843a8647163a4e80cdef62e2536c822bc144e643e0f70dc0cbf0a0b54ccf5d188733eb05e87f92add63cf7ea85f91c4fde4e2dac2cbdf2f656cd01db7777ca4190e32ed439d66c18502decd0c8cc1533b263d0d747986c4e6a4aff70e2a84badf0295e5df47c1e35ca13d6bfbcfe7c794cee421e96b219af6b694ad0f24d6f76afc88c387903aca32ab3573eb56dc593f33ee9a910dcc3ed709f9dd830a2029376172e63c7138b757ab345f1de0accb21c2fb6a06b656ac2399c7f1f0d5fb3447c89128448c35e61c2032b4ea4a840119826f69ea73409ababcfdb04356f66429b29499ca72a1037f7a08fb2e6f265b4e743e68035ae0311b2ccb61aed3fe17dd22c77ab38c49b09184107fe012d43ab385a11171ed09929cf462649449b68a2a10587d2f6948df8dc509e05a2bbb1f16563a0b6ca9c8b40528f55d1ba56d68b3d39456c2ea71acbd06db6cfea521f51314aeb39444080a9b05aa99bff9fd5f2feeed3d3b9ed33936c2b1bb0c80cfe0a2bf6fecebfd4f2ba88d6bec458dd065b852d56bf61cdf98651790cf3063d310453ef7efd57c63ae348ae3686433387dff0595622d44ea5917e645d312a1175571338f4ef78c7292ca625d81de1166cddd0005f0332b4c8741c281a8bf6bfa08520622f74e19a7a43a26a8c3cb806009d362d5119c0182b427f52652ecf4c346195aceab37c5aed0e9fcbda7d4d3a7dd59dc3b10c7c5fcfc98ffda7b357327812d2e08492efb7c8dc6fd6f7b3c22193dde3ca229c1a902e9fac48c06bd4ae72560b3420dc772b0ad7fdae4d48f2b286eb5f85a1ec12c77a21eeade57dfff2326e46fe7de05f5b0004fdddba3adf099a3bf1a6ff5cf1fd56cacb8bf08c531237d5d769a1d9cf676dce2ca1309745aece9b341dbdb960b84c40f3f45d72408f2bdf5fb32403c5826da8af71d6319a0e6e9f282b578536cba20c2ed267df4d4ca2f49628df45b836b993232981e0bc4f121b80295847ce60cb0db41f67851d6ac64f683f7cf6f986c5fb22095183645fa87717325496930017920dbd74b0be6899036293809aa863466c271158211f3e26dd86a62bf4c159cb17bb5e303f50650b8776c5644988a896fd7fa1c43fb6f142f1451c6b20250612c0af43ed0c2570fea925070ba9699a6b7ed67e7b83bc1db2e213a5a63c96a2ab691cb38e90a435a8c6f638baa9af40c2913fe0c1d516569acc2c27a57e13116a7ab0cd86152c9d972d332eac62f8c4ef9cff1de4c3e13cf03c3b9dc38c0e71069450443b5e07b51e7cce9b96dfe22b652f29541f0ebde8911fb56c2e6baf361ede39994d3679afb266734128094734dd29a0c2f90df70f30bcf45d0fcf461d1d22548d7fe807a571e690cf3bb71cceccdcd358bb66e42da73b8213770cbda83a50d1e4869545254cb4819f63da570b4b13c8545a095ee5d57bd74a84f70ab847d8d9478f232928a7335d8c36f5985514b81099ab32dbd5536f16c6074e2dc92b44e35aff65c9321e89f0f8b04864d595467c73c8dc618d747d01b3a62ebca9e7a4dd86ffd218e1cdf85ecc2ce300fd5787ee569367c97565a375c414ef65431ccff63866be5e47db305810c7df61727bd1c6b5dc37048656320eb71e3ba587d82b78a34ae20063e4f6598bf60b1d85c27d0fabcfaf9cc414013891e6f02d5953c42ca1c8b66dee0be7ea6a5e882a1d6d35ec67b6e6c0fdecdd7d5a418212ed30ee1cc9116fad5297cf60b1405b8943872197347a890766bd75b258c0cdb6c836dc62ed8581077f389e4f236093a2e530db8baac66a22ce78c5bc67ad6a928a5e666f59135ba35fdd6d8f96176eb7ea8d5d7e73cee10471ad348f89f8e17d1f7770b166253b986d9238e4014d6a1b2d7edb9c13c0779d4e0a11d09df3d2fc3facf27cf867189dc9c261bcecc301a51ac355dd08815807a171fa5d8a045b2fd37137b2f43dc630a4f426b53e01a59b88e3020160ca2c27c799d63b4591a8607cffd25ded698064761babb99103f2d8143192c5d9630cc62beb875e793778092f3046cd9f1b4bc83307a53cbbc36f9f1118053cbd5c06dca39edfd2512e7bfa16397cccd20fe4fb38c6f6ad66513d42be20a40e54cc0deb93feab18795e45bb5878663e0750ed3a4f2d6da17cfd85dda8943464efda1fff03205c84e9cb2a74bb26c938e6bbeeeb20ed2609cf795d6eab980a28c723a14eb809c553d9e5c5594aff1e96074649c1b8f4ea42f0d2dba40d0f07d251621297038119ae5a52c80cf001a332ce5f647d6c2d020ff621dbd5effa4a4be4b4cbd540a0313bec9acee45307429b582a5f71f71a6888bc4429f85beab1f6c11f3ddcf1561ad641e2571d17c77ff28ea33772d89aa6e8cd12c530168ac103a98421c299733ff4842ac944574ce3c9df87506daeac089eb8df22f45ceb8c1230be580bfab7d2a746071f28aba102e93c2e46e30760430ebb3e9883f2d09511f99331cadde5f96d18ec33f3fe2af12d46a3e4819ef95634241dbb23f0ada97759544579e0497f0f4e3077f1dd54a14ebae8b9d825a729e4b853f52b8d58832b6a20e56f27b85fcf38d39563f4afedfa6a5e7c58c47c417438e080463a8ea8db995297613db3be953e31fb570f54f9d90fe16c60fe2d75957701ba1940e98658616d5545d4deb230aab310daba86965131f7a6dc0f7d6c6e6a12cf2481f3095b822e52b54e2cde5e017081358ad1085acd94e434025b710ccf25f9f4423e6e4fc682ffe1479840d37466ff4771ce035855cda8a3e2907323fe7760dc4c39e317cea433c7a32fb587dd860683b08a9b8a568e95498b2107699f5e2192144fe52ba305240a70b8e9d60b56421d7e7edd141734706ff670abb57e1683b1c12b387df907f1982db60a0f206c5892b3b50d1daaf77194caecda12ec5fb1f89fafa5f749bbdacca43de4f7b6d418a9af0ad856e662a2ebb3fa6dc83a41609b1f7d5e90b53d9e8a77a73cf80f2984e3af010aa2bee35794712fd3502c9c58a81f7aaafab04f3265776d6bc82e79d0ec7e7da13fc57c79dc9e4f6ea29c2149fd57c6b878bbc3adb2aad4526dea40e8e30c76fd836d2bb825dbae2fbbe6ce6fb25c71a5cdadc37c0e1852a1b66d7451f90ea84b352c40a78cdddf18b637bfccfce077209c739ce802f85392568f0066f632e2cefc0f468aa965bce09c33713c25c4e0d5381157e7cd13f764cfaafffe0b47ec94b87b9146290719bb5156c44b97623754f59583d8b7f7450ec1b1d7efe8b8f5a09decbdaf790922805d7b06a29aab14cfbde149a79ab9890cb337087f0ab3f40b8abc1f2a5d3bf1aec8a7009a91c7b21548000d50ae6d2955bb9262787eda1fe5e0ec769a0c9a3bfec552fa05d6af6a10223fe4bcee0832475e5a3a38c823cbea745b87cc39d7d49defd664749e920843b801cfc14f558baad762f8796da716674e7ba1a4662b3392afeb24ec6e3a7e85f829d64bbeb812b021df0f8bc00b41660d106920b5ed28f3fffd8b59aff15f6cf514394d457d7ce7fb965b515004c8dcab97e3138b9daea3919f09af0ba2eb15fd37a75b29589f9e7456cab542d91268b8395fa90b8a2f5832f1f55cc4e0fe7c0051376cc1cd09b18b6a4674dff495bbef5869d148a967835c4adc996924e744e19a6e80456f06e30d0fe86a081f481e2aee593cbb63082d1110eae5267a5c6e8f38c766591f5de6ac8f58cba9aeec0be824b61a025bbb8efd53d1e59f6ea24a9408bebdef3b3cd5ebade03afc2dab103172f92ca4c9dae8759c44d67e0b442c00e4b81a50abba7d808c0b2f2bb9465318da6cd9bd89e3948d785b718d1d225c8ccac620bc9b31aa838e9272b4427ee3753dbacb7af3cc175063bb9e063a0570ece3c50fcb9468b7e9080feb5c64507ef91a79eaf0d70a0298f9e51d3a92b2591e3e6bf81b4c46aa5a64d620f1a8817d29d8d0deb2b373d094438a829d124aa966d8ce495dd5eb530989bbf7f4a2410928fd2687a5975bf9384c646d2480c92973f0e76cf8c126265371ea0612548cbb21a2772366d43aff01a3a0fafbc609889431eea924b2fbd0b03c1990f7480bf48e55bc897cff83e2748c8628fde910b643b211c951a01b4e1953853a7602f01ffc05b58f8a8a63f9b1a1573a23342154807940ad3080c77f95cc3fc714cf427845cb2a02a801be971f980f9e20ff0333c3b326c510615c2a266e3e4479f6bb9476b29e9aa9ba2d18e5a47f2c1dc8ca2a2f68d57410f5514c37499f7e8aa634567f1f5f43a1c858ad964d4ab4b3fda048e98c2bb369a3d7c9bbe6d202450bbe4e6874d01ad3ade3070cc6435223911150230529e67998d86740ce1cb35530e4510f08b968b78e319c904e2f92187af2ae70900ea7faff3830180a72ff9a55636e6ea0d6be5ea7c8a9dd2cb4e67afd3c4baf5a9506f75173e5c8c556b77db2eba2b25f98db84d64302ad1952936eeaf8296379e14cbcfe0546ba40750e3d14d29d884d1fcb0538d90fad43f53c20694e19239e24f5adcaa000718d0819be166a2827a04eafcf04fe4a2f8eea90335fce30090601f5dac609543ce74671cc0b735cfb447002a2edb9b182cce199a57a186f6bd2e5ea5216b93e13e88543de802becd8c0e7b0de5d08dff22903024da169dc45c8c44d4cafbdf8ac7e3d77010dcd94d4a95f1be9322c8b99449a9c737d2812f8a6fde48043d51058633a0db37a64ec4936ab4e80fbe7168b3d44bb92e50d964a9d98d254a72657755790ac9ea9fadf5a221d5c1bb6241bbe6e9682d9f74910f8f3a37d5f597ce1c8ffe000ee97434a452be176171dc2017530ba5314a9cfc90f3fdb1dfe8c9df810b3ea1303892a9df04337d1728d0257f85b921c9e1b2d954a090e0e74cd016114f9e7b829a6d311cfac64aeb2a3cdd10a96bd99ac77faf29ed4fbcf93e81fad1a37bc72f474c650eda34c4150dd67bf9d9c028b65748f99dff24595483a5a5aa8f09886b51cc538aa8666669923b1b39ddacbd201c11b6368b1e15f60bd0620efa37608a8b797f7b275cbc8ae396128243944fbf9dbe14e00817ce4e374ae3c04ce65b31636d53531326108d77012400faf0b6fe6f311e923692d9ac42af0792ec8d7b47303d875e5a95fc355a0a68dd690f29ba2aed1193635f6ac0e428f8b6494402ebf4440ff409bf80fdf4049d292178e18473701e97638ad7e6958a951a457a0df68a1f0e9869c77cc535ea6e8b35fb4b94a30dba24bd9d316f5dbfeaf50832a1573f6965959bc18149d6f7971f567f035d491c7a51825d5b4b5cb72dba919720e6a46f81aacb8e352e5fc15844a173609e89a21a462676c6501fc66c2f57b041e8a1c83917f4a0e7cb890e64b754c4f6731544349c1217343a4c8a7495082ce57e5137a1c80d3d479eb26af5f4ab7452ea86ae729abd2b6ea4c7f22c1aba4d71d47cfeec2344aaedfee4acabe07011807029a52aeb083436e4f4cb73320ef0d5196f6f1b2edea8fd93cae03e54997934e38e3a9b036b24d491f803799a0fece0cee98abced988ca5b32e179a3c8628269b18dbe3b2914d9710c4666cf28e11cf1bd296428875f25085e98ef1fadd60db3e3dba3cda144824723fbc449103634c5314954b5be07d0f2a3c3c45700c712bdf0142f317c2201e3a3670743f9bfe23f91f5b9e5fbebf6302a8faf6107eaa1820ee82584557da209796ab692d37f2bf7d2cbe192c25b2a2c2ff09648ba21bb3414ccc82a3b49f1fa945f51806b24ee50aa3e562ccdd374778f3ca98beb0459bb56f9359bff810aee72909db26fc5a25715ef07bced17999a010c78226b3f8bbbcade996bfa7f9eb312113b84cb1d65cd0a52746d2c7a89b109783202eb2aa14bc7a1a46fa225b69d1992838338c6f5e04f0858d026bc7968a632d3fb345f39c2dd850931554c7f8950246134b9cd5f4f252a0d22d958a072c61d25d5ec54cb9242259f444c5e7e536f0c32fb716993fa7965fc98e080970a36a08dc4eff9f85b77ebd468cdcd2e72d0a2a021b0a853bcc033ffc48bb68dff288828e46310dec368b860853a6c19069db8634486a6f545744ce83cbb1dd2c5f2d2868aae84f1ad0fda0aafe11f0fc0acddabe4877896d26c90d6542692b80c65395f69132f0d2fbf7cd0ead779a468ae9f617eaad77ba25db0acd78dfa2b2005aa8e8273a364cf2559e955efb36db928aef178a4d4184007d5ca5c7ec611016a355ba79fbefab86a123fb97f53b0eb32cfed5ace50a458f83644cf40b91066118343e605a1f6d3d5ba3efe9e0f2250d5bb5257ea2fe097a7a6a3f80fcc6702956e79474e6bb0ad3564e621fbc320437499ae7c72a2384664a44a01a06106bbf02d9c1953ca2d2d61c95a1d6038b11635bddde4ebbfc5b419e3058ba0b4b6d9b165becb17b64d4cc450b31ab20964d26ee2352085758f5851d41f754d8a8f8a920ea88631b61f69d60146a2167a704129a58fd8fa6165c162820611a8906d1688ace79bf9e66512cfd1484c096ce50388e602ed43cfdf67cc63847f3aa17dc2db6b7002ca597de8170c5c45011b43fcd529de06cc294bcbd8d73220c46770c5a9701628669d049a83e764855c5d32d40e646989d7dd84266a7d280eebaaa8d47e470634732d59e9ab4297eb3f07578b53f3531adc1391cb47dbfa75fdc02cdeffacc061deefbb7f0ec9f0f96e8ce0496e5abf5d87798a699e416175058bd093e97c1773b5616aaf56ff4ca5c26fb191c5ae41d4c2f361fa60b6d554b3480aec44c9fe477d2f0f7a45d7c6629344e67c93f33afe231625e5aebd5f81541f8218052a577b9ad7f6b72ac5723585e6de4ea90a2872b2b951df89ac3939cd87c91200b254d574c7f7cb55af7c9b278b4d6b201b12f0b94ae06cbb91007bef89368b73b0ae8020f955017c5d46c291f0fe20fbdba1739ea4a331803c6fe7bff876f89949cd5da5e2e7eee764b3c7c45e8ae0292753ca8ab2ae631fd8438d349a55beb9bdbe841e6bbe7882bcdb488470b0e55e6eecb0124ad3fd3db5a1376f11969b8287456d3d44f9a32dd474999da8f7ba73dabaeddbd2c810cc63006d8e5dd154a09b9e814997ea719de328d4e4eb6ce1ddf1bddd6e9d7caaf9b386955a8fe142d010e86532ecb06b3685c0a69161f2627a8333068c7ce52140eef125b42b36bf27f74070d192ba8936aa587b9d398446c2261fb965280bd6bc56309184181df312ff673c4335afb8a9e2140a1ed9832d14cb05c8f77ea84f3f39d7516707ff801d070e9f425c138f06929f5fe233ae282b20085c6cfd5458c7d08c24966692af215c5693f9e09861c01d9b0f860dd4b582c44b4a73459741a15e0edb6e3022391b79d5caccf5386b635829f3208a8158ad8b7a396f6890d0f4cbfbc61a2624c9ff55d45ebde3ac5c5f07c9c043d015c249bf7b40262997f5c4e93716f5e381b544e4cbf76729fb813e98333642f7d68cb6caee49668824a79f2d59cf73363b3aab88b4a6640547e9beb87cd78b6e056197ff9871fa1dc4f38d05f21a6d156011761b90a605015d45a834c13ff17854037e72af3a17de04b9deefc24c59768c8a615231b6f3a0ab37d69ad4a1ab4318803c69a3423f70ef6ebae7c80be230a4c64ff3b7ee3c29c6eec97370868294bd185ce576781e8dbbb69138f9569e4c638650301017285f5bcc2f606b39222130456ce7c2608366f51cbbdbe36eaf3448d841d4021c2bccf17c6de26fd1a2a36fe4477d9a966e34cf035b9f1490b7cd0161cfe4a8afc343dc85119e5ced8c7757206d6bfbe673677dd099007e05d031b53fac544405a942f3654502ae43cf1f882d7c562841daa43f96e062437eee30a993c488ec4d501ebe4ae0019344c8d9ba892d2e1ae84c07e24e9ac59852a15be0425881742c7c83caa026da718596830ed3ede80bea26565d24a529fc04ba53f5efbe355b36451ba4d33d7f7b12e1a09081838682308bf2c5a3b7597870268591f34fac0691ca37901fea0afd16dadd5cd3e2c28d2f265a4fc4f027289278184b6806e95fced0d23912b9ac4f76225b96bfff99aa953e779976043dbed61175db8b9373b21343de3b2e8a367e32197cc198a8cdba743210cc7a42971a10966a4c990088df071f1ecc96db33ec361bbe98c825ca3575b0f23a8294ea969cc8dbddab551a450655e488341c0541d8d189ad94b5fd67d052554cad0ffbc8d020a4110209bcde2e4bd6d461af522794470be1d268ff633f1317459eeeac3717e72bf9da0f4823dd4e2981c46655c5ae2dd213e329c5b151e9fa973396db3cf3d79e5362c6e63190c1fa936d95b98d81411aab1da95ecebae94d4c113fd49dc269eaf9033b6ec49c13201c14f7dcec4b29a146056800d4eff9a0c979f6a381de3f69f6aed2c5828cb82ded7db52ecd793b753205feec5a48f61b714c151aefb9944e254c43045a6c5e9c002c79f2bd42a17b713f23b5de9aa788f87e4266dc70ed569ef6e68ac621acfb6814f7869887fe266258bc82b29a70d391922a2713c31071bf73da0b4843b8e1f5a7f3f3f2aeaece946a819f00e34e0e2d98fc32580e79cc237647de6126a451102b1e8c5cc9bb2a3c01f0afdffa4005c60aa869011d4dd3eaa4a7bab937bfa7f82518150ae35fa9ce24bf829c197b792fc0994c7ebfb42a2f9a93f4c1892e4c25f662dd8248d67aa4ed9afa83ce9326dee5a20cfd879fafe9573b472817e74efc65e8ecf5f47199c9737332f634a05b3354cfe9340d12460f9846404e1b406c911db86c6678fb04a3317a645b1993e5e61bdcc0f822ee280bae9d84db191051a9958f22740cf0894555de3cf136f4cc8eb1c4ac44a8a9822f95a15fd6f80bd37252677a2cd10d703f9294855d169242679002d3b322656b4ab8eed68e404d79614fd4d91816c20a390341703e68e3ee241e41739418373d0fb9ba1fa6a0366b0cc0a2e9dbdd983188f8c0400dfb354b81f71817cd185526793fd68683952ca58763933e76fb7f1020df7f59bb35b63d41d14cd157a24fa3e4248b5cde6286519e8f437a086e9191b1faf82367faa4486978ba6f1810dbe877e5e33977dba474dbe70a3c4d4671b3f63e67e1db5281b1e0d0e0f1a66b1397e0e7726c4c374840a5fbbcbb98b6952fc211261def9275eec50e83729df2399756d6162cc2ca4e317ac25801e89b8f18974e47a6c3fdfc263181a689aab050f6866485b67fc445f037294f95592bd75c454a17df0a95a7e2a30c89a9bb2091102afe444d98002f36c65be3c1f79fb6665ae408e8744ebd1c399d0c95b20b21f06f7ec00f86554ccae8171ffc4698173464551795a77ce540ffe692450ed264e99b64dcd4deb6665c99d906b8b193ff1a1960d1fe4417660de1fe4f7df440b46effcc4ce95944bf2ab5ee1aa227d2cf5f513c191f6d5baa23d5f4f759e9404a0c1c86236e72d1bea4aed258bc652bd97ed8a226ad6e50a61918b3201406099938ba0d528d9bc6ceb36df2f7ba6c6538678a52db407d9272e32b13eadb152dd4b8ea3d384de54f57c33354c9123be7b8bc8a770ac5338c912c1539124449f627d105aba2102a4b0b67b0c65221cf18203e77f49027d0ab585b8bfc6861227b7752342921b1c00ee5a8742c089f201a190b19e41d3740e919a808e1ce8d3591a7beb851d81b8a55d61586651d229c1650f5ae4d2dca791c1b6d3cb2ba7059fcdb19dee84e1ad21ed8f12bf460be2dfd201a9fec9f0ba555af60262073d1910898bd32ff43a6cc844a9eceaf111208c959f48b03fd34393fa60ebcd35c973c9bc61a27bfc3f6e5c4b0ba005d1605542ce249fff70a03097330181444e4ce33c130517551acba9a693226e84a5edba5da80a3fb530bbc8bb5999739dee9c696a9ee1c7198f4cbe9635e007afe30cb77c1315a02429e226e3727146074c38e043c4770481cf4d7d321421ced4bc44cf359fab27bb5add0ebc1f02d1de646c157aa0a2fa053f76c5c17dc0e21271d3eca7ea2b4db478f23c052e106a734f93c5b3ca78de70c8c549dd411b5944ed9f18854b4b1ea1acf5a8780163a1fcf2104cb4c483fd17e8f759880a84c0e4cf5f8304d6e4389237afa8c93650b27e0319ac90f660782f92afab62e21fff102bb8d316eee157c2096ae69ea4cb3234c3894fbde89aa3e8cb18819f09ab8961d91d33901b0fb8234142d6692f6f38298534088a4c66b78760ac4cef690e6e67ef1c25bc8d010a0d12ce5af512bd2cf723bc65aa592ed8af781767b89cf66584553ea49cc8bca17efbf881a2bb424add154ff538c41bdccd485e625ea495c8ee50f6aca3a1bb285905295dcfe15b564a649af905cef87ab0c018facbcc04002f9903c8ee91de5e455eb4c35c1f0b1ebd65ed2c79fa3aa1a3ad8fdde6949c21bcb0489310021dd0ff77bd3a54e4e3b445547cec1ffd8883c38629b4493d0317093e90d51903c814885d75af75d2afd96792430eea0b65b981305cb684cbe871f4c5b5de566a86d2982b6ba8643116f50e988b228a460de65d9ac360736fb2b5052f51014142b0a9e2b01c36bd85182942ebf39943787b4b19f33effde6a4b78f137e41967cc89d4ee64e44d67ab1dc84dd30e65c51866217d0819ede95aeffafaa14642a6a8ce8ed24a8abdda1f5972db5f0bb9649440d298dc68e37ea3175044e099297e0bd5d62a9baa8be949ac6005a39105ab710042110c78a487ce7c87a2eb18341b7477b540fca6b0fbae9550aef51b759b9993e3485535e15799c473c5d71b1f42f1f460b9bf9467ac4548d5119444d721349a122314eb746e50915a4463efe65931bd505cd5883f0d6e4a2b4142da4c61326649baa5f679e8a4da39ead03b18235e148dc5bd8c6cd35c616bf1ae2f6f781baab1f4cf5f618ab074b774f2246830822a196c16053ff2a97bf9a1a04b83544abaa2c40cc2b3c3c578c00cad70dddd0ee284160987387de74bdc1198d083e814ceae60b4815bcc8677bc2999818615a872ebe40100a6c6bc0224c0195ff0b464e2c7ab6e828b977f5019e3cc6afd85cf39ad89fd1f73186d57d7b111e7ecba67edeec09dda5a22a968c9315df130d07863488ad7d72812f23a3d10a2d4295f6ed0e18a861c01bd621bf7090617e92b18e32cb91cf35741ecb3084b7f7b7bbaa2e358be52f2c197e1d6a8acc294c7c43f059178c1ac2ff2fc5ae567651350d4a87284f7fa8c7c31b9c2e8ce78d70205ffd1a869fc63c228d3d7973ff6f757abe1aa7c71f3e8624b8ed2c1dcd0643535eb15e73dea20e3ff98c8ddabdbe2fb6c346c4211849b993f27973dc6143a86228bfb2b5e2b94db481bc8d63c61b0e6115ec4b75799bb3fcc8bf5ffd7e0287a8221eb5388d05ec69fabee5ad152347ff9ddfc352838e139f791970773d32170afce03ba5d306042bc4e3a9c9d447ca0d93672bec897ff1b59f3a91e7f769f0cf7d1afe5af0266c48f486997722470555f59015a6e9b74a32019aab26bebe79b86bdf6511c434889ecf5d8766fd798f9ae4f10acdf4810add185d82345b0c469eed4f967a5ae662859127b22b1f0bad7a32196b6f941d3a2aba1641af14e074d19dae0e024b5defdf98064de910758b275eb0d39349a9aef6b2b9b9e9ca3d5a32d771872fa0c6ae9e43f39bbdfd36b9c017ac31be13f98bf47da6c367b92bfc8532bce28d6d9762fd56919c033378d0827ae8685a4ca04e3482fc1a3a023e103e2924cc4d5b897e23afcbb8f09ba3586ac455f95f4ffdb99e5fc93ce6aec0d673aa6b08de5926a19eb9a7de0ac787881bbe67a10690020006133c407293fb99695e491d26bd254354c22e08e3fdb9fc7b828f446c7538fd631d5ddc9992dde920bec9457ff2021bf0ca34f87beb6235f42e3606a48b183ea3c30dfa04ee94b11969a308ae876011826eac0a64ed655e84e136da48efd9d4682ca0e41dba225b19272b3212b98fd0dd1ee708aaf2c16202252fba04d8d9a960103d431b8e037b6d9b97aad6735fa563600edf95e8cee2bf5d0977538db048c7dc011582cbf4d7f94df5b39928f015538f76acd23db98b72cc785554523aea0af7eba88041df512957cea80ee6df20da80aaf0f76e850b4d5ad7fb780a37a34f695c8dbb96f1c03dc28cf6c07e803412d3c0cfea9e94e300334e8b6d94ecf53e0811238c0417399ba16309ab6fbfd99f7574c552911c05ab03dd301a47daed3fbe186bad39a262d8edbde91902aa96169eb53c5f7506cf14671e5faff0e6f3c33932c9374486e727858fe5c6a8bcfe80ac5672bf1f62cbb50ea9025a48cdeab0abfac0dc1edb00cd8fd3e92d19e042ab6e319561c26b1d5fae34b408a926941e4defc5655b193e1538db3e88e657d0e15f5475e51b039e85c7ef373b7a9fde3d9aa569fb5965986049fce25a73449b953ab590071979a479fbb6b8f44f03c8a9831e09efbf4138dad207e5ae666ac169682300115e04907e090e27deb33992f89cb3b4e460f7a7c577bbdf749f2707ba42a8263496a56922cb7fa2d97a72753a98db9165de7083ddca4af6f640d1fa6430e47ff43c22f87206247343bb0d365a77d459edd065ea228127ce1a1c56fb535e109fb0ef6562d04459b2ee1b7f8ac7666eb3a44ceb809419d0cbe0c4ecfc274d4a3ee511e1060b3b7948c86bd3c9effb965d5429c099df9b14c9e0db340c7090ec8ee9356c8bc8a6b8b4315b9092051d69b9ec86715edfe305eda73d1188b8426bdaa49329fcbf916eb555e23aea4b57b9c674ecc0f3107ed0b7c41008a61d0b77af5f5dffaa015ddd623e33ea020c0f8f5007d2bbf76fbd94ef312bc27b77d69b730e4eb5e4686e2e4950ce7a2084ca361d93c54e0b0b4ca9eed6710f1a2dde8875b877c226d2d10b828e7e346aa2bac738d1dda914d2a8089eb012a9ce5359ff74561150f7202ccbf5016fcecfa22f1489631524a022332806f7e9daf39fb8b774d4d3490107c2a4d27546d7cb2270b5804d01c3371aa06a3d180f0b4df11d23a6545157c1eeaf1edf0a02f492d45a639aa6e9d50246b966a2eec5053d40bde13b502a2f86f384ebd94d6e4f69cbba9ad9a126e2a0eb6dd84d129e9c5d2cd5763d05420e283170e73be17bfc612941e1b6b035f20d1ccf010faf6188ef76068b908fe5519c772bcefbbabcb7ef717e9b906a62a0b00ec56bbc0fcd72f08f3ae1a257dc847874f420a42d73bea2fac9900baa9dc87b3cfd979fd4b20d754b0ec92b8afd19523a2c46f913febd7489a156ad0c69c26a0472db960cb7f73a020e202294f29ea25eb51de2d4f72a2b9b7f9d1a379492e98c9514e77b5b516f32b974f67ea7a33398467e33e125db8994913ec7721c66a31407fdd147f6275f768bb61f1288e6318610f2a471ad84e88230363edf13c7173da724656392e8c08cc53729080ced477ff76cac77e2985d38272e06caa793d60d14d5df6d051699deb8a51b7749b20e0115e648a87c8b44c8b26d7b8f6447097b0566ce388a30087c1050fb2475d8540813a0c48e902419e37524d0744b92d3f14081fac0a101232565bf45b70cd90474fb38bdd6f48c9a46c523524a921c4eaba009e37655e3529a15a16dec68fa4baf62c51b6c670d39fd0a769f4714311d0e63ec0c5701fef71e22c29614b3ce509ed448bfdde4648cd32b98747e93e2cc668f83aece521ac4306960bd04241c2d5e9bfec9b8160603733f3d015e7fa2527ee300bf6de1c2c9fb0dcc3611941dddf5c8129b27f09801169b0fb1913bb7748e7193f87550479d6b7ead333d8da7e98feb892cf92b2ef450335bd4d2eb203c698b59d862ce9096c030eccc5885835f37fe2aef3184c3b75b2b9b3803b526bb32e0194d812eede8bfabb3abb84c66d8c6450b08e5009647749da4891b4e93e5dfc5de09b546cb2a80b9cf2701d1ca4d15bc5da86caeca4ab339b179d9e497db1450d4681d32da23c5f3b8c24a9f86ac1d0df434821823c47d8483ec86b011ef6a219cfa14b74c2064f029d636d8392494fe106979e179eb7ce234a4dba1551cf447309efa5ad1f70556ae08bc777714d586578effae2cca477e2ce61b227f5e2d5241e493d0b2379b621219faf8d8be44ea9ef4ebb8b6aad01440d0e9b10c964066e849128334566f5dc39724a8ca22d8d057cf32c7916ad2870c5eda332f87fb65e781b419a10fbe49630d47f1b115545309a5390e505702728ed43936c42d1885df2f9bb06cc834dd68d42175d9289a91879678ae06032bd324500e51f0ee40b14d1cf6d940a5e0850d3b202685963b8035e7ada7bcb80dc524d19188c2160f8539edf53d2fb0567ebd56a1a2f7c43bf7de5532a50b9afb27f43ea631ce59367a3274ab8607452f9811f4400dfde9cb049b2a475c8943f4f83e0600470b4f6ce177e2198c8fe569d2bae0a1d0b139211d741024b26f3a9916ea3c0b704b36c93275a4991dec8d91a541ce27b332183a4a1fc372dd34bd6b876cd1524e6600b7db242a799f20837376307110d7454e187b6c56d62a574e51e9e5f95e81e3ef5fa42c3518bb3c42ce4d49fb3395c42c2d44bd829594566f4a12e1ff8c4ecc5c34aad3f1a8721ce44d3b696863434948aa4770927401e4b213842605e41a3412a73158d51586e5311d5314de755737d4a0cafad4b8eba58b552ba767e4462ab4a3bf2b82d952bcbfb28cd2746b65ed92935abb4fafe2e7603317dadf8fdeeb6b46c97ee02e5727b8bb31ee910f86f45add2098d6a6b0fb2bcbc12e7b8ea5ea851984dacaad187134e5163013745e85e19947b2ef56735d9b461ed5ddda0e538b1239bde55ba16bed3b1dc453e4148d89fcc32288fbebd90b7e1daf6ad6ece865da477c8658075866b5c4994e8d0632391d8c6ebe0efe37f9c439582cb28824c2aad6548342eff83fda4d7d90cca7955c66b2ae0274ef207c2fa6fb7504ef4871a5df25bf001b8adf8dfb19dc19ab03fe960cbe6f44b93418ba8ab173c075405cc6d28000f22f6c7cfe4063e7aca85914e30ef32ceb78709e5134030fd91b142c2ad6f00ff43bd5e9804b6571aaa1d57777a059f080d47c6f38ecc9dd1d1f1a593107c91d9f24eb60f5e22b58ba06e7f032e4d66ba8fa89ae8eb539d0ed255409b0d4b0a3681f1dc4f5a519211c8efc83186a8847c9913756ab7d0614fc275320f1e8312e204e6043a11af78587f65a0cf95d55d7aa10bf3b7bec83fe9ffadfd9f6c0b7d5e01213bf5a01142a4c25bbbd865480da06eeb7b7db6943d40b60b731d5857561556d87a5288ed6b575d80b60db8c0ef59c1cb3bb43c62a35da0ca1a5fdacc0f1d5b2bd59ddeb3f4411df1e5d813f78a5ca0eabafee8c7e1a0b55b8067c9f784d9910b8d22d8224b90d89c0ce85dd2c5c2882c213d7b8a3d07c512e572c8a9696e0e8e028ea45572196fd2c8185f6ba4077e87051020fc197b21b69774d6be94b6e50d7ea64ee11515ab61fc64e6f7ec85446e7c1033fdacedf6a627e3b29d8136f45ee98f445df5680b953daffd7b625d98afafcf8b590b67aed14f8b7c50af82c8387790ea496c9ebf781ce6e31347b5b9970ee98ea734ba7f8a1a1638fee252cb38afcfd435c9acba935b15dbbac6f3197f09d4a470d9b36f3a6f27cb73d4b7e840cdc0593074bb3a07881806e614767145003acf8c8336d6827b082a23bd71fcb7665e4b4351b5b86fe7167d89fd4a93480c8807658796dcb6edf799939dad35645b23e73ace82096c83a1f92c985c85ee9c963a9b4f9e6659d3f426e00ac8bd31d9cb649f6da7a5553b2f4303697ad1bd765759aa0ed9111a7c92963d4b6f09dc60c2d71b830725b8f6c932e917a0e122372a2cb1673408a6c97ffad46e71fe989cb3f377bb9906ae8c2528a6a0bdc4bd4c2394a9b165a79b9796d769dda30a4e5e214ecac12b45912b38e367e571f2816b5be12554f19934c0e95b94532d854d8100f26d26cc4686b82e5663efda19d69c39f1f8df964f52139ddf90f5a3f1acec7f5419b1d7b5bed7601a37fa6b26d98058c60cb6da1aa740d496e08fe23222321dafc8b2ef5dafe7207a57cd2fe009eff2518b1d9dde687a687139d184723b03f6120c1ffc62fc96f243971418a10003242b9ff8c297ef036d5f014622d5e377d18eeab535043d37fda41d58656c370bcc862527040d79650060b8c54388bd4cd1b2928f1c5aef361671ed44aa181b5fc7810481d8eecdb4947c02b126fa260e3f0c65446427135c9b7d5902ed16093a750188a454d594180b968e26c21fef0984fc1be54ca52bc141d99d9f181d0691e0970dd5a28c2882085db6cf7bfd0c5573c978f938214eb2d52c499bdf9a89b8409247df73b903ce48de5f7cd945ab6be939d157cb7ca6939054940b5dfdb32248ad51323d1742e31dce87abd534bada2b7bdf2bc45927c1696eb8fa6594199014f5cd7b07b3f67c1025435518bc2aec0836925d0e94ec05ef11cd5cac5360b2ec6718f9ef989848f0578cbb0070daffd728b0ef14a95bda563173c988a81d03993b69823404ac4d56dceabf1bbbee8147675140897dc12213ce461b48218ea733f121a1322f58d0223acd6a90de5c67c10f329b5685d17d3a89c22b94f778df6d4c9ef6bac8232d22d3315e234d852ef2e610ab99fd4393da933e9cf51073b5f3d590b5ceb456b12096965598290a50749826a65e2f965cbccd3b7151e0e14489b8836cb3a5d389f42d5fb53e3677d14ac4b8bfa239f5d0412781adc3d0372c7a04aaa04b66bdc26315138da0306b387ae992e575d098e65bfb1e9939f9bd1599eb9f1f82a9e0152ae8d39c9231db5f1eec0e7eaa6816c96b0ad41fcc5c4ae38fd91fbc986fd9fbd489690e7a2bc5cb7ccb02da6bd99e6c4bc5dc5f40eb863eaae35ff9d9213c59e7885dcf6cfd59aabcede31ee3a5118747747387ba8eb269ee0e25b265a9bbc221a064b585ffcae30cf99f47f3db6b84d1aeec9c44692ceeacca71a272e55caf2be6a35f145ce28b5b3f9d210067fed63657d6a16d9f30aecc3cc5f3a9c87f5e30fa950b8b2f3a16e5103c90cdac1098c9774c89ac73d126a10aa36fca4943fcfcaa7de8733e414f4387818c27720107babbab2cfcb6c86ffb69dec906d96f52077a5f1973846206e50ffa7b1f6df1c5b5e05a8ce7db5e86171f8378a8e9bf06f240f0484720e268f08fb19a35b48481192d56cea354cdade638f090769410659706b660bd7d2831544d06f7ab99ba58e8301544426f345571c39366d420fde95da95210d4e1201ac76b987594253b8a026ca7983ec54ffa7fe1e28aceda66bae4c239cfebad4228802d6a445a1aae432b2c2cfc203d59ba2ac771a31c00c09e84f66b8b1ba4fef1780bbde37b9145d36b9e20c013a9ea0b1663cd9e27cf2298b0c97b024a5f456c500261e3fb73d72c5747cf0bfab168d0b86af86a554cff342966fa5246f810fd0301505b26cead8f37024e8c6fe6affabba731d4211afe60b6831f2ceeee91baf42a0a657358a98cf640f4eabdb3cdb38e37e45e6420086fc74e6a0b7c049c807f4050248f3cd11e2905119288950c2d5817c1677c8af8b015e1f351841f938bff6b7f702c8c391555b99dfacf06b19ef48c67f9d07a701f6a83ea0fcda3205235b0cfe8202f735266cfd4753bf0868b2fb77d29b640731fbe17f366b43f56cfa7cdf8c87d47dbdc9ff7154b9ab83618bfc80970b8f49a946c85688965923f7da71649d15176882ce3be480a20ed60960549c04fe806646b5eb7dfb9cf95742f7bdcf91bddaac1dcf48291fb62ecb5a15303dd6ce6c21ea72bef3f683df0df65ff43e0b5da22b0555fbb0e7093ac81df61b4d0c7de6f4f98dbdfadc2a82085e0e6dca0e3979bfb16e562d76e7616305b60d3dfdb70c14d9a1ff16a729cd7400846432279f68e97c7c7bfdd362cb901153e51dd56baa81ef7159738c19452ca6bed8546d9ab03cc12bc2ab252149eef44e3551c81be3f0a446e8a00d84d210cc31abc6b750172010c6b7af7d40ac2512c655285043a888e7115b46a0eb9c4913d75e86cfa17e617272107598da589b774036cac697192c6f9e54fe53ecd1c4ed1c3d3348ecccb0340582ece994cfa7544e5cbaa41051ee4ccb5d6d762e812b7d5cf8b7e76d6300ba211c48265521fdab33cd858dabc97167a0bdb78f26637740407398b7fcb78b9d435bcafb3b379fe3e5411036f48703ec672e34b8e85ea1e163f8fdeb146610cd93620ccfbfc85ae4bfdfd1ecdddc5f3c8e0fc5630ec43bfbf6708028696dfa94200db11cbdfae810e3d55ca1f855a1d9b98377b54aeaed3156391614ff1415146cae45c3f4c0e3f377f90d7e60c28c34e958c66ac091a66494a9cb1dcedfa5cb6eaa38f062664f113c3491e28606088d11e446a8cc41070298927be4c68a85cae340ee34af8e008bd1e2516534aaa3883a6f986ca290ce83bd717eaddf95888e66455619cdacc38912ffd9a44b0b072709705ff5c70ddc556f31d9e6c2b99e59bfe0272a336ec7914da06f957f27c378a6c563acf94ef5a91fa747893f2383b1ab848778abce8dc0a2b42172a7fb91d3cd8312aa98d155b5d9f13be2e5545b3d9bb3050c2447aa007575ff9aec56517c977752a13b199d82e299b253e708a21c1dd27d6659c7d1a724642b142c013122f6dde657cec6db586553b72935463668fad9ae186996ef38fdf6fde5aa8c4a7f073521f3cc2bbd9e33c4928a6a4726ce3a560659a41293701dc275c39eec03d57a359ea48ebede800e9208f4dbfb377293efad76fd92a7e6dedf8a530149879a8e50627059046c0c90e886e1790d5354e48fd8dc621596c55063784bb35c34ad25eaa8cb23a361091d4971ef7bad1cd83e23d9e40ffeb5a70bd0eeab408b4a427f6304b2ce806fdb8ed7b7e8c2102f66bda076abba771cdba1d0f5d7d456779ce8bef20929972e7417e6579a74d50245e5cbed2f04bfc519ec77c6234fb8c257515e29d1edb96ba6e551487d36a0cded60da9fa7a843f62599a1f6114afbe43b72ded08043b37e4dcef5a7802de1596ab457049a003aec15d0fa943aa0578db81492775c046217a204204ade6b158bc7c4b7bc33e771e49b9ceb385f3b6d11acd8d1c209d4b571b90174231bdbf9e4aff370b46a1e80398d19fb0399980852005063035090e408d1296d28e85dc58e90c7b68196b54faed1a22c5cc356e99822825049239b3002b946218e67841c61cb008e59c891481218da34708d17d6896fce941b7b3084a481c226827fdfe19e4c7455a6a2cedf205affaa821833b63855eb5964b074d4b475196abf9c592d54bd98cbff0d925fb36333b5e7d4fc87ba654cee389c47ac9cdf1673898e71ec8b23520386b0b0f6f9b807204d85d772383ab5d24526d6c57c87c5e2d566b58fc8f8058b38064153ab6c3a50f5f06ea4764ab9d606f6377e7a2c7412efecb71a4cb634cce3a63bf5359e8814ed77ddc6b4a40fc204d9af66876d06ecd2e40f9366c1185eaf88108b40b6f0f57f5ae175f5d46beb5036134f474cafbe8f4ef2260df5ba2da17bacd1acbbcd64ddc03746ba3e928cd0afcfa2939a8d6f11924528922d9f10ef8a312f69d8dabd8cc20de9b7fe58aaea75af2de6dc862940564c61c19754187341caa90ec381760e1c4810d8e771335ea5cb1a5c5e8e558af3069660d79e1ad66e30ac224cfab91a1bad9a4e9ea244b7aa1544ab11d44a25787bf9532f5c319e6e34f803929fa09cac811af9740548b28ed6dc4cdfac9616072f317cb0dbe1118488ab770d8576f090e58f5dfb6288f42afdfda46ed59277f7e06823e5ea03e2d414a79f454d86db3e322e1350ea893dec7099b1ecf696a4a3fb89d359fe29ab620529dc6a7dc5d632b696de9f333f506f84bca9fd7f66c936398123c12073a51e335ef3a0bf6b80055a551bd036177b7838aa07680e7f92556853232ef151caad7d7b09b9c7dc04925a835e1a4683cac902eeea199831cc929753e8c3f00114ed86bfa35e9ee4371be2bf58d159b93fbb24933a7815bb02712340282141d259a2deb9a1788ddfe75b85568bebf1452dbc3ee892c8d824cf01005c143a6cea5ccdd9a8c70e9e1b8e832bde53a109fd498af345eebeea6e5334e304cb584370ae3417a4a465ef262bbd64c2e5f6696a59991b4e9bb50a7381be46d0adc35e67276e1bcf3d34b781f788bdc0e3f6f5f21c246246eeeb28ffe8e57dd2fd09088bb2771e9c39b56726b036ee0f8fad7fa03ab203db69d4cc332b60d74e967f245f20b3d53774b795ce491603ef7af90021219ab199480c9edcafce4ff364a1e88f97350299bca866791bf4a61fa014d7d2dee73f62d8a4dd890abc2b6e781f946316a65aaa83dd774b942ff8badc448d7cfa5e9bbcd6d30af77496d69fb684e6c5a06e8e90815da8943277b2b9c05d112e3b70393cb8e0980edbfe6bd772feadb2ad553ac867b595959b3f078d0e89183854b41b4770db8a79649c5e4340bb205fb32822d4115434459be3101fb6ca248d6254149a414e482b6f602931c9e3a8ad09bd5db8def3c766799423d12e9d3af353b1861f9dccc899694c256e7eb2403edcd4c3b1dbac2a5677b44ce17dcd2995d8223608a7b6ea41bade82b2b195675775f2610ebce08eb81bc4ea253467a9f76e2701555266147b79f031ac9418b8b60433299bb850f4e463b446136b33fb58fc684de519142b477e32d11c3a41041edba421c1620f43a6813020d1d33cda7df23d2a8c24b7bd7772149b8bcb3fb28e53c0195696a648c0a7272ccdc2c78b3b0bd52b61dace9f79755df413efcfbb1ff1d5491b5010dd2dc6241c7c9b541ad5cb251b0e8e0c432de389aed2db2d339a84afc67594e460dda83c019fee3e1eb85591d0f036a09addb34b852a7bf79c21e5592fe0cdf1c2420e73293a2832b2ac6e734834cdfaada0265071f96aee6b10662747f79a4f96f51c8a6049afcc43da5b766dea3c42378dd4994817ad9ba561fbea39dc16f8f2766bc8f4532ceef2fb54e879c403e6083b757a6c6055a22c035cdb49ecba97c448263dcc33f509c5c0cbc2e65430b8f32779ba737bd292d77fb5797989090b7e126f67c321f23d671d5b80911e32ca2cdb23bc79596c9b33e5c2941eeb7b7931687f94a159b72a93142774147ddbc71f6ae38ff359bacfebd6aa33676c46703790243089ffc240a7649d4e36216948e3e9ddbb71541b022b640f90253064f0f27276cb93193b54466fe1df047752677dc29364f4bd97793a6735752f97750206c10f60316b3f8d9b9466412e997247ce9d9bc0e8754ef754110736052404af720c7f1fdd7017150160b35b6a2640b906d58475de9060c4b7bc314722e967369ac9a34cda947e542d7921a81a8d5083934e92af91841256e9b82930ca87ee8640c7ae66c5a3c3fdab86dcda30e079976d22a3cf56f9c156559828b8f7334bab35b654f018f331dca72dbbde50541b951d543284d49d3b96f1f2da217632c5e3660283d27904a56e4c83b4cac7675134b8745cdefa91c16905e16d958b0dcb29b5d52020ed7de8cab9dd8ddbe2cc6c44ed652193041e66b8ad5b86c6de0b905daaf17ff243ef2b21991da5b8a18c47f5835c405b13017d9a23fbf126c53bb2777a6d6681715da504698dbdb9d35500a9a11a4d1a41e1890c5aa928f33268c21d8f46a51abdc740aa3923f6b6e1b689af06a5cd1b983ab6111d9f89197e96accd56b7923fdcabc93f044a05902afbe293e4e17bceea5769574fc49020f2ada228b29f86e86439101eec7d551a882c8260d2798a5464341fc08b03f2a1e8d1420b00aeb0629742061b9f53ca9aafe67f06fd0e596b2f47f6197de46646ab70adbd7b200b375934a19280eb3be89c573b2b44ec0c4239f54bb158f23c06b65dd3c5c5d16754e0bc66e647409c38bad393e301cb6c511d3d4194c815b6ee562e0b3e5b1eb54782aa91a9b5957e61c9bbf37f51605e858549b4f1858455860c5ebf685d8f1c864f881e4b7e4b73cf866beeff7232c6d5c08c9b89ec81a9b0337545f8115c4fe44dd6f6147e6cb7788f7e3c1feba373da09f42d731522c32785653d33ff195bfd89f0c06892919b3a90b2ba324d0cadae87dc7d2f795e9a33abd500cdb3ed36975add5b149376a0888e6995023bb5fa7fcbfe83556309b452a436aee6e7d7da5242f03c43630c3bff362adec8ec4974e330567e81311d2732d20ec8e01c497b5fc6ac43f50208e9e11094e2b44a07dbb975e43f0eb0dcf8e9ae67391d3770db07f06166c585915d12f618a50f2c952ca38edf636caa511bdc0e70cf567fd7ab6d1280e5650f6dd4fb2e9951a012d5e2fa22a90e7e0d29637d00a0226cb13c1a90c6a7989063d314c91a4da4c65ac011231320ad2fc335fe7b728c139e58682a8aa4d296c0257234e89a8e6890ef8d982c244fb8f7b391167fa3bd52cd13704de96aab9589f7ce03532fa1b23618fd29d5d96a7ec7c43f2db64562ec1213e364f50a021c954c24f64b84332b38aff3d5bf23a2be97d23f7740a072d0e248993143a9d28afb08a577eabced00f638d953ddecd6bc8c54deae66d5d9d61f3ba20c15961270ac5e331437306200e89a2c1ba8b4748280d8092488258e7a006d2e5c47a83e02b71655ad8dc8ce9d7ae0302ca9ce3ad281fc83f71efb0f38afc85f9a02c024ea521b09aa7cb9fa6ec18b352d069adb122513b6a5c976004fa08ac546aaacad730c128bb2233b504640eb977697238304a8867e07ad5452827ce5c57f688d69f8b637231bbba36b435b4c2eb54122e20a256b316fe0b9f09ba49c6e890f1c125b3b0a54055fadf566b4116bda77d1c1f54f6179470dbc0f18a750471e97b4df70e1f7c8d65ddc7724ecde9cd8d540ecec35ea698f3234ae9cf110fbbcd70ae1e8c085abea9b8eecc97783ea2e645e696d4d62c47a714fea6c527ce28b679ab060070899611feea7a2dbea36a5e5cb0db1cc0d1954377b0b1a80063c510ef3435f8ab3c0611712212a0d3dc09efef11a5c9b2b6427913b993a6429e5dfa64530c7a064292300ac582663714d24541cbafa772c2ecfd224650ec4149d7f095af599fb9abe4ed606a3c9a01fd6fdfb8a39152dad05d896a3aa98b9b6be9484de85057d8f0a9fd38382f0a1b18ab6fe8e5feb15248b2852d4bcc5381d228f3fb833720ba581518d3ed51b7a7cbe1f1c883de8c0deaa92f34c54c14dccb5a333f84fb444fd93d9219be03045ea966a61a6fa74690c9dedfeea1ae26cc89c1e2352f9af665672f5c7ed01f12b6ae5a71563b2416fc1498d604aa8bfbefd0210eb2fa619d365dc507c15ff8a80e63e106902633f6625cb7f3a16aae1f77c8aa40d1dbe2f239219e19b2e238754b267c5c32b410592fd969ec8638dcc6c878f681999709a177ebfc6380a1edb6d88b18a8aad25cf4c1afca087d2c7297af4135394ebefb8d48785f045e894039845f0403969c4de08195ba4a280e664caa381bd709d7c9eec0f1bfe41f1569f31c57f98e3a51a1edc09c08334f181dfb582cb2c3abbf497508112620ec29490991316dce02458f3721ae0d09ac121586d5f3a83432bfd77c5487cc4cd6879f7de8737f10ca55ad7660dc66cb1060500cf5e9c0af985918520d774c51dbf55b310928707cde6b80ed4c2d5280ce9427bc3ce0778bf9286567bc07b784fe362188192180b0c726f73981741cae69be102d6fa51ded9d1d07d8f1c6ccf34e1e8041d3285d7a2e1ffab2fd909b96cad746c8c9583cbd611f00e4ed72c2d9b11db8c8f8a185b507299d124f35e17047e759729e284ca3f5c0706c7a4aea95cb944c0279106b33fb36c559a9cebb9011ec6532821684fcca72e59f709d986f69a8bce0c1567e8a2c3c239bd6f4688babbb14301ba31653cc4665540195d04a7b97adb80a8301807a2f71f3b296586ed84259dbecb51543e2d47814a6163f25c4250694560c0f6bd13a0d14137949bd3942aabee89af46cd1b044fae3eb930a1bfce61e7c0612211d38ec068089a90c4e8409c4a0ec5aad74c259357d9f6da5777d6d210cdf9fb64ab98f6efaeace77f23a73a5c5a333680c69e87460f55b1bec27fbc0173c3a491c1b82fa62a90399d7154a4540e96a7054dc34e6413ceb9e294344f2acefc9107306f49da2204f3ea2d3a38f8e9ff0a88eb2807a00c2c0b9e01cd1a95bcc825572be38f33c77fc2e9b82af06e15c3b5fbc0c4c343e50dafd03180f1975357ed2d1eedf46a947a2a4209051ed388a54477abcb3a4a36808e4e3aa725822689f861e2deba71abdcabc30af4fe2e644b11f8eb3dba80cf7b3fbf9eb7f5d378c42397cde37b33f4029548b6dbe0e9c0c87dc3bf8d516fd20b510be907bd560ba1e60f4e36a8ac3d5b34218ceac208dd732706030a712d38de4857358e94ea547688de11de94de4247fc9e50018e1d93f53e29b360673089e92c250453f0ff13e6e117ce94250076ca5e4efcb5c9f30a6fc123f470ece1f640ba2bd4cebdc6856e6e9d0a2a4d51c48f12fafef263ea83dd64fd575692ce6caf6536120393b349436cf782839950db0366b2f935f509e45ced95961f6537c461daed9f8c1d1ccfca06965ce7b578a8fca69166d700e7c5b8646b9f6cd92d88856ecb9bc70559e86dce5f8d1db1707d3cfcbff2bb7b49b189aea939f994061ae816d5016989eb581451798fb22dce881832df99e472ed1b22339b5c4d3a2f7e904702d03ac3685ab7a8d354610e846c07074af671040f9aa72f533f064a27118cb01a46817dc3997988c4a18ae0e0ead047ad1e32825f0e6393951bab272e41d94bbc47f4657b8fb65c1bed5c6cf73cbf3479c0bfcc5196af8bc3b99fb18dfdde33f2c438cf00c29e0a3481010f7348aee0894d1cd0223956ad0ad94fc70469af646c66ad124f1f7f8f98e7d49fb16df47c538e2c08e1264f4e404edddfa5cbdd5efb24b8f39164bdee556930951e1ae491608eeec5dd8a644cab8f60cb51a3ba144d1189b2a4778da5a7890888ff7d67f41195feb8ac09071403687776963000a253b3d29adfc97ab7ba383fff162b0e612b4e2c8f29e9249b7da9aa24c45a051ad1a860a7b440a0e32e579920d68c4220f7415caf8171043b87390baf3faba39fdd606567b6be277d6d2225ead573ac7b662d8f8350d2accd5efaedf0a3892379e9c33752a908c4e8353c83350c3e9901fe74bfa688d776ff6022a5212451fc40883182f03a97ffc776fff4cd9255837c86e90fbf58070b77c5ef94c8c7be4d610722aaca275ee4b9d099f2516706dd3d2e4dd1b790c35a703909fabb2c0d4e405bfee8d795e0a4cc7e64f5adc7a63d5164b195dd2eb96f64dc501916dfb83db3c7cc6583e4ba71e1c042a9fd7e749279c282ed1667f93de0921052cbb7f540f0b3cebba625084b8fafb1cbe43a377efa6e9d423dfe1ec135b5989ce33b5a35454a337e3ab1a2894f531ffd41b758fa33ed4d81da600084666b7d308928d39d57a4fc5f19959928e2b03473eca7f85e8ac4813ae0084067d9777a015715f5bd52a5e6bdeb47d89c194a01037ecbfd749c81dc6f3882b2b146770fc2c6f4696bd3367de7a344d6b2cff7087159176bf8daf3f19b39304b7e1158eb406b05bc032a3102ca08ef31b0c76c7537b60e8617ecfe1cca51127c3000d092bb871f520f027cf9e3ecc7c309bec05243c83566866b8463eaba69ee315c97011a1faad7fe85634d8a02e98c60148e0290cbc5f129bf4a4fb125ee7f73c0a1543d799cf9b78f507d0a2d243b40c574dad05cbb562a5265c7e9b222c49a66619b736447f0804bacdd141456051e51d00752c417a1e43e963b81632b0d2f55d63814f441cb13f7b4de940618773c8e8793a46ad675042bf70a1a855826f35c3ca70b87f294b21b0f7f8fe518ff659654cd88a644ba0122bb7310da8292d7e248a5b6333f2bf518e185230d8c0eb552c0863c912cecd8c0b09e5dd12755956d655dca984ff8f58f27a09abfe2dbd04228149e4bb651dcb3e5c20995177c4e041db0e5987f05cf5373241f8589357630789f4beecf54990c575ee19e44d102cbe196e9b216598534bcbba914e5b9a9db58746d31183641842e627e632ae64b68ea00bf1028b28c72692b0080e7bfdf75ebe1ad250d982afb13e0eff041ae6dea39bd81c73dff596ae04e29ab13e510524e722e32a372b94b160c62ec1874dbc6f6b78dc6060c3ee2f621c8bbfedea0307578eb17529944596d52dc9cf3ce36a23489f1f6ac8e9f875640c87277067d226467554a246446ecc26839c062f167504a18f84bc277aa36c614456cd8e0f3ae3950793b9d0338b756b275b999be099882319d49996d670bccab251c316d07a258291731841c8d4934f5e24f518e625872d38a50f60de3cf9208164a18666c6446c01e07109a3b0c069a260c15fe25c92ac258f35cce04beadd571084fe6cd7b69ff6e3060a69543e3c3487b3624989bdcfafd05946e9358c2e1ed74ecee9559bdc96999ef308348d3d30b3b4bed56618d2ab85265575fbf57cf04e9727ebd4047b619e820f7392f794cbae73820f1a4dddb39beef22cfaa43e0bea8b69edeacac9f1ec94963f97e847037729dc41087632b491a497abf2cce87d3c8bfc1e1839ed3ca39aa07c38bdd0a716082ed14b4215107785393c572f2681b960944a5bd236976dd64033ef413be6096e44535e5f80f82388353d12094201cafafa91b5376c85949a3887b43b3230f11019f070f5ac8721f5ee403a431d2335d61303392077d836519df7e9ef24f934e1cb280f3317024d29086fe92be30717e9efba8d6eb07c41fd3a6dea96187de1d2ffdb63b04d48f67772b11a6cb34481ef41f990599e1e86752375526784a3c1f4d97b331c1fe66e325978ac95fcadee4000884a771d48dc3ce951a2c9e2f40eee6419948a0941d2743a3c5fb951c0a6770d012693dda2aa872fb47f22567cae362b4828dba8a8347fdcaf136574553e7c3b6794d13d19d2244139fb9732c6ddced4d823e4cf055d3d8c1f73915e837a6e0149255ca89dfbc9ba0e2bd03714a494b0877276524de61b26bac8cd51f55880bcce5289638aa7ec1bf01ff981c5a02bf6268aae8de093187c876595bdddbe1d85efae6bf8f0d6bdb8f1ca6a04489516d637c497ff04eb10b98231b11ae7b841709efb0c84fcfb9b45b50f9090bf4330d72adac441dcbeba3a05922cfa9661cda0c0213044a8f76f71e401800d733fc496316faa2ed4300e38e5d06aba03c22a60e319e59433fc794cf41555549896b7375cfed93906240286b97a608db0049c46d159d1af37c8bcde7f21d3595ad168f9e0e1a4808d3594d66b7abc82f1a6b26a1020d41c3c514e0f74c90e6e8f90923f97b1d1c68e2d5014b4c39e1f141481afe00ef0d661d9e7b7b406a05b27cf1c199b21cc989ecc0f11283d59dbefa13b3b3e42f316cb65b9fe685b3990a502e998c208bcda8bb6e2b005bb3f41b0fdc981949b0333280dd8154f4a58055e287ce4692ce17f75fe4e4363a687768368ff79d60856a1426546d91d501cb68528ad4bdba7c444cdd66926da18092a8eb2ab989029a03e8294bd288b07716737a010385f3788777be8fcc15a8a003f27eb855f7807b78948ee62546c22e2a9d9e986e011e942969c9561c6d3bed28b5f1ea2ed708af3a4b9d8442527d683f2af2ff0f19f5cdddd23702dac08cd3604ce8850c24134eaa883cb39a9d6f3eb2e0975201c806f7c8b237abad30f6261704960b0c859ad26d696e5ea9ec129890ae7b90110902c4ed91aa29563ee9b07e73534f2a48e1a48881cef1bf1a597006b778cb1861f364749880806355d95898cf1a622e5d323452b576741db3cd4ea3c3502fab211755a686cac6ed4479c67636b331c81e84072f6c510e9f42cd50bdf80c1bcb5fbe3a7bb1715afbf89443c08bbb2680588ae67a442e16d4677e345b0509a61d30e5f93414f4b535ecf4797f4ac1b9f2c66d13963eb36bd789c6fc90e79c9021e057df0b6a28c2febef82ca6247a6fac2ef981f2cb13e4ebd5273ff0155a173c85b7525a0401d00a5294f11e295b9d9230c20aee4cc8e17135e5a6484eb43a9a2dff0e9a776ca3bbc0116b24a1230e20a21ad442c2c0e9cf23227d9c74a87485e223d01bcebb5584eccd173f572f00c03187bf0b3a6f01da929040165ddf2da37cc04baab1ab65dea89e4ec45d18c78f6feecb33be606e3c724d914ebb81d9b2e4b21657a0c2d80e0ce3d66f9b5e2a090804a17c487107aac548c011379e2c14fa63c08dc42f95d673da6fbb601f56764bc54741ae4c99a753df9e472768ddac6e05de8de89a6450eacfd96b1f86658e733f5f9c3fb0a48babc6a474b17d518101cacc59e42dd0d22eb29fb9c9f281861dc9e38d2162b2ddad5af5a4b0882f7fb78bc431ad08eece35fa3d6fdb82b01630edd437613fd7200ffd262fa3c30daa5d9eb6746d421cf00b4633a3d869f33794a0144f0dd84a1dff5c0622ca345b0a2938040e70be33ed44c4f43c4addbe087942343fb326450946880b253a96e9691cc2dc6f1d67164887f36be09464cf65b571c123880b400fb6992b533e1f54f939da2f00d4ef68da75ad8f13d4b132e72ecf97752d3a6a46dfe7a1a17e533c0554fbd4296fed20664789062779a328ac65173ec7ded581d4710fda8d2aaf6e8a4a3f302722859caeeccf30f93218352641c32ba67dffd90838e36a735fe319b9b3237a9d3a1aaa332131db6ebc98e051d02d9593e1fd831a457465f2f01c7ff18f0f4819a6527c270fb83a6bcb0478e1bf2287dc1c43c61cf8bc241afd41841373e564d81f1c4ba420a6e71b068bfa6c501ed4d9e9d2be32e6821248821afb7a41a9f9152254ecb53c15cb2b453db2624ea39c534318f8d82227046c4c221d8c3c72ef47928ac3574f1df75d8b9e706df6d71208cb1f1e2b02daf03b5c40d0bcecaedd01961f1c529c76e87eda4361f14790ab7394804094ffe72a12f43514761fb8626b6efa2038d208beae9b3741e2f7f89199ce19521beaf5dfab65cd1a996d9235cb0c2d1a317270c3066a5a7e10c5c07a8517de954dad4dd801eda0c56201bdb05d5dd2ab9d5c714dd2c681ad80cde51ce503056defda8916a91b6223312fce40a2efdfa673af7153d26dd00d0f8f9548d864718e37ae77792f0b2f1ebd15536e2eab78611f5731947debe7f2865bbc12ed2df4c6b979a6eec953cafded230dcee32757228e93b7464ff4d70ce26dbe9f36f063584a5bfe611eac3aada03446a496d66353b62da3732b94e999699e123f04cdc52e7435e27f531422ecac8f975e050e04e402a8b044d5c138d43bdbc529c2bd33cc5d289dec0267a15834e25f8186fff7ff7b28b666d89a9c147e91ff11954bd7ad58dd0a9b64d8d62cef3b1338c793fefef81864607cefbf42345e1929170aecf64d76429d8b9c5d46d4f810b44177c9b49ce51084118516150c6eee2313f668ddaa7bc95ce7427cb7876282ad0e5dd8c7ea32272866dace65fbfd784964de578b9ad3a4cfad36202f0c4a9e21a20817764b4f8d51716c1a53da492be3eeec89308c0f282c9dda08d451b0bec08925414d7de85baf1a25b2df1e064841b8e3b1a54bbbbd28ac2a472ca53b3a562de916208b2e5fd4e266b4e5c871303538ded890008d06eadbb72eee48acd9202c8d842c6b8058f2869182b352f1eda0744a70a84206643f81f3a3ff88cd8fa20c673f2d556cb5bc4c2f8b73cab62f5e95b1c86115eafb0798ec26b69b9143556c28d601d3df060a7f66d7a04f86d643bcb5b2203461b45468c410d0d8ac00a4dfb7f152fe19b7249e96f7e95f26b3171de6dcbd60f948ecb02940eaf5ef7b0f174a4c9c6369aa5acddbdcfcfc7d10dbbd2bb2a37ec89f0a3678f0e13afbba2e93d6ca70262c61be1984cddba702a9b03249ba78f10e42238147473d46c4d83f6267e3b9465feb61709cceafd55b2e40d0f7f6dec8872bc3ce5d76bd95c74bca04acd83553fd8ef3de43c7ab75c563b92b4d1d4574ff1bfc30ca7cff34990827a415544dc3aee8dd4fa829fdc73a90e417be10d0f3b4940dfeaa1b63e342124a549632774e1f8b4ea8551ca0c5db23af8f9d4904b1c856fb1af5b1765e6e10c50f501cec51251ba43065460c4c28c62c6e78c608020d05908569069e923f62f4df9968c5e8cc85ddd1a24b3dfa5bcfda1c63f5c6a5586bcb442edb8477aed80beb98f41199bbbc651faf831da597952b2dcf9827fdcfd7ab73501c12d48290cfa42d54a9feeec882aac8be93cefb68b7df376f08ca181245c161f80f20aa18ed333545a7112d80bd13f7288117ba293e2dbdb89af2a360e95bf9104cf752a5ac0332af003fe04c415958454fd420ae8af4a590a891295c916a1b5ca1575fc907f8ab95b1f3475dbc483f4c4633817c248f7faf20f3e0179497d771d10f982113a7384c0da912e5208c14b3b1b747a43db2bc08f902eb9f0e8ac70ff416041e8f34b463ba0d63ae6e80d7835f3befc3e70f9df6c5384c82313ea953d482c3dc26e75f8e73f6b993b44f20b829b8a60cba964175088d874e762dee989a97e071d36bd3fe1054644594db29ad2dad2daf6cc9e98ce72f6b777246fbff8423431322d63dcc6746b32230ddab0e24699c3e729e81f630689f5683f987144eeba2c4767b615dd1027faa69ec220499be0218e66f0c7b9c97fc10716c60c47ddf165d4dee1157e93dd49568d1f930b9a86d02f7b30ef4fc98ae9fdcf2b0e38a40b6f10741c5ec1303c6d324e178b4d502328f6b4702413f29466fc7774d3a64a0770b39caec7e594e37a6a10f925e3a000c50da50d4fd56b24327b2ad8d9ba29aeb4cc5ada1cac2850c0cfbae38774dc3ba2e27f2ed71abc582da19b0bc57a6281241a548895f81ed1180a2b24499a02b67bea809269ac68b0830db4fb8be4bfcc87e278142f189c71069a48e3924e8ab44e32d2f8eef816b59591175c426a1ef5c7959bc219f895a80640512324fb81b8732cdcdde03ff0a6af260ea81f5379f7badac712b9822bd66f749473fd7b863f3cf1f04d8bb9607e471c0751eac01fc8c93bfeb3325c38b5dd370977fa1633fcc790722693b779ca576136e7423e4e5979bfd8e14905f9b5357af7693cc4d8c5a84bc0649ee9241f3814513b77f950bd3728d2377e543349a1a90d78e282350b331bf9de827780a6d63674993168a8d8a1fa759cda1e6c0afe0be05e14cda543f5256fae8c1dbedfddd1e5ad8662a35f9baaf8858965084f64d6fde2d48008c0d949c02a1e01bfac4ef2d96d83e45c4e72dc00ee043db90d055f918a1c0dae749391fd432e898f19629229829e56380ccdc208b49bcdb6606383ba481bc4be2187af221d0f8579148dd66dd68041bf92e3ff078df9c5df6164129ac31e21238169b163410e2a98163a1523cf3cc1cecfbd5ca7684ab88025e11abc05fe8c52ff68441efff3714e54d9e0417a1425c5f4f1d0e0dfa1868863ca918fec1f5a8e0b21f0ae749fb9295f85c25a4e289d8c0ac32d291f36e0e40ac6d92a227b2b70b8435fecb064531b84263e9dcbf45cdef9cb147f8a87608772e51b4ba2d86978e4102291c858de0eb0a51068d2dbf2ec3298bb593906f83b36484a9ef0019b156abc13a7cf84f7b36199315d609bbca4820beacbf30395b560809ea6acb24e546da890080cee950df19042d6f58acf0d3d0bfb51f02a8e54f76205ff34e670aa75199fc8b4e9765864d8f5601b51f3dc06c9734b322ac4bb5fc4f7059f3ed147169201209b4ea02cf358b5b0ddd6c9890096325e581a5db36dc3225a00ca4be484d58efbb25e3fe537e96e916f0cfca18b8b48bb3f6d9e9243bde52698027d1ae1c050722cca64043a77b68a9b0221b1e02a1eae592186e858d433861679045e6db6f68d3c3dac81a0eb4cf1033329bfb294e4d0d823857c6bc88770f11fb0d1004a410fcd26a5411acb8d85a0c3c188d3ed77c13f7c65c0afb93429da02d6b082c766a46165b91b8a44573e68cd4617bbe673528cd09399890af88c50ff54bfebdcfeda611a7cc5f48f0a67a66ffa9e812e8d9c110309ee9c8a11c9694317b00abb81279ec2f5dd5f52c0f17ea50b92253da2d62318daac6fce4f183302fd9241c03c4cf599d8689bc6afa7257b6e445ed97879daf3ea4fc54cbe441b39838a4449747d1f6c1b5263aa19e4288007888b795549a9f8f68def34ec583a3f88e1c0b9a9b5204a6caab1af4e0207024276fbc31a015e9c86d723dd032d5a57f4064823ab5afedc2e461fb8bbd78402ad3b9d9cbc01afb755b63aa9b0685fc540a24d669d3279fed66609863606aa934fbf0ff767a5586dcbf9e5c0f947ca68d524e36989ed2fd504ad21a3164b7a61bb64a2e761cd81d3524fa7c41915d1620cfa5f9c583be3e1ac14c0c82635c755b954e9ab017a25d35d275b8848051fa154620b5e9db540a9baf6f034b0eed0d88a423a41da50d769467bff952541dcddf5d8fa8b4b4f97fa4634fa1118712eae5c2fcbd9d6762ad60f909f087e4ecf7a97a1d2e885e720ca8bbecb04500277ce1ae37882d034de86b3e6870377889b4a82e83b29b05438b6b2a3811403da177744347832ed885bdb25cbe37ee9d092a2fd0df58e064fb42969ff20c39e915fb4af981a28d032350d3c23f8259a5d7bcdb7544f7d6a815bb0f822e720cf6087d0dc5627b4d2f01d074ef50d6fb5f17794bfd07c446bfa367376aa44b801eb7e30bc5107fccc546c16c52b06e76c4830ab2f8daba95bb0f3a6a5991c267608b0e37a66099e2dc6f24f75b9fb4b3edb65d3a4d9850d487323da40e7cf87da2a7580f0b7da764eb92b9a3053e7e55f0d6f45ceb0333e8050ea6f8b7954bc22075fd3b619a6b09594a0be2aa94da2fadc9cf8e86bd4845a1ae32e571d589e4f59b3edcbcb106dadd32c21c8c476261b7396c924317ed821f4b414d83777a4a5d0db38fdafe99febdf68376631289ba334bf7da9d19ad3999e41348da8bc95b294beaef0e933914029e5e7b8f1ec3086f0dd1f8303f8f6620bbd6c5bae29356e61a1b2b8c8f368547373f4205c3c989baa78e1191da15c4e48c5e98b0d94164613c248516aad49b172d6d0e7afe12d81358cfc0359719e12fb76f36c9629419071511329e0fb9fe544cbec302eb6b7f73fa226f7fd7c662cd248e8cc46a3bd640b9c9677a77947f545cc44a054192a1f7641729f9eed9e9188abe61fabc1e8aa984dfd779d6b0b93f78a089d8187634315aaffabb80e19a1cb8cd15d160937ec91c1097ebf119a296b07485eaf9885760f5deb6a389f534b2679322156b7444a4e8bb34f4ec203156495336766009bec24c2b26acae5894f30025b7a9d28d344f23e274e36071e125121471b585dd1bf65f6b19acec147a948be300cc42b07460aa6bb1a29233fd085fdfe7e15a442c1e6f1af8c9739c54709b70683649e7363fd6bd9d00bbe7846c948ea9eb072eee4fac07a2efee6d7392062ed28643e6c56ee1d021277f08921fe59ee61e07ccf28e5d87a7a6aa514a69196669e600fddc0d51f6da190ecb56a2387fc21b7cfddaa01d4f4f9b0cbb3bcba19fc57439af6aa4bfe9cd2da0fe1d6287f7558ba05adfcb2896e877daaa1635b56cff8e3071da2ce3adf307226a7e2c88f33cf485a9eecbf27b4e986ddfa67418534a8f116a93682f414dff1c458c4f1ba3f342a9228549bfd603078d07061da145dde4ee6021d76bd6df943e5c8ac0a761803387b46a5c166277d4e5a67698608a76045bf4cb81c821f9133a511efc687362d32435f0f2ea6e5cfb24dd2aa201439c7c5cf30782e1b5e16260679ef77bd7abd4b78f153ec69d2e054c520c5d5031809c781e1799705561ace4679dcf33875c334f8a62a9ea0a379c83dbc0dd8359174403e241f19d70abf4b7381810f63462d779543b8f37cb344bd28906139395e8d568fc3cc2a12c77caca902c0aa0a07fd001042182f0332b3812d65ceaeddcbe3f41cade320cc6f15fc245895c5c4f820372155a785f488e54f8d0453d1094448d61540bb8c629a5295156095b036b61c9d6f631fb65293ec16e46d69eec2b025b82b4daecaaef5999df5c99849f07537e31d2bf1d4b70dda3b7cd651b04a0c334e3f05b02dbf6a6868f26d6e9ce254c583653932a3979e58c023582e976cd8185e491d10a2a57d6504204ea005dd8599f3427fde2b688c9d06f6de7e9425b8976b21e9de305f600807b0c1b09148277741a9ab9b96618d03a1d457af7dd7bcfb1afbe8f0b4716a660c8e3b8b33332ddf043857b594fa841dcecd3ebb3bbc9ca310c541ed583bb94ee37875d405b12bd0453546cf0601569238791ec898521e3ef8de5b4156aef5502fddfba88b723561b5b791e024ce65dd3a888b6abaf6550e579a73a5f88537d6966ad4232f9606412c863239ca8f0a070df60b32739dde141fe1f14ac9fd402e0d1702fb1796712c54803fa4f979b12afd0b63bd8bef38813c0c651e0503f058d2a8120249337ba7b09e89e736d3b2ef11b87c117def03a1ce67709a756426e5afe91fec17929e4e8905a058a6131ee3ae14e58f63a81d6f18d54cf9aaf0028eda6c065ebbfe0eeea5b2b8d88da14046847a58c9f4825f82c9af4893184726824c867e0aba615b6344529319e11c3396a42d97705dd31555af7e05b40637c033025ae1d82230f3edda382c482c9adb4a91e42d7196dd463b48fbeecc895a6fe6429bd12002716aa10424645adebe512c6314cecef677cfe6c62d8c2d106d7ed5ce92dcd55e80a3c766f32d1b9b288a63cfdf868cb632914a464646252839312772694e9220bcd748f033830f82a25bc2f3b0e9d86fd78bf37b7beab07f55d0b425b0a9b9842b08de52828a3cbfca156f44c14e9fb6a45be40145b1d76c48ec1bef40d022d7875e76ea550376b8ab110f1d4c460a005ab40b49484cbc985aeb9e6418bbb67bf4b4c39e882ba91084de9662c2ded559e4ecd5126bacd38afe57c017f70ae984432ad57481ef9940e79950051c9bcf9691703fb67a984538563a820783eb0b51a95ebedca9893132a8268935ace2cc95380f02b8c155874007c2182b4a48b18cd622b998bb371e181f37d16b4ee5437ecd419c865900b549a14b9b3ae03399b324458ddd96a1e9d163c8532e19f64e778ac71d8c8ac6072396c5a5d9337704fd15ffb8407cb22b914033fe85382d04f9899b2d13091e03cc4cab0bf1e84f6767c0e29ecaea8f5e9d0cf07be7fc715eb8b1e44754edaa0ccc51c5cebbd29d1349658869b160eaa225f56af4f30f0e\""; diff --git a/primitive-types/impls/serde/src/lib.rs b/primitive-types/impls/serde/src/lib.rs index 22a17114c..661ff7c0e 100644 --- a/primitive-types/impls/serde/src/lib.rs +++ b/primitive-types/impls/serde/src/lib.rs @@ -19,7 +19,10 @@ pub mod serialize; macro_rules! impl_uint_serde { ($name: ident, $len: expr) => { impl $crate::serde::Serialize for $name { - fn serialize(&self, serializer: S) -> Result where S: $crate::serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: $crate::serde::Serializer, + { let mut slice = [0u8; 2 + 2 * $len * 8]; let mut bytes = [0u8; $len * 8]; self.to_big_endian(&mut bytes); @@ -28,16 +31,19 @@ macro_rules! impl_uint_serde { } impl<'de> $crate::serde::Deserialize<'de> for $name { - fn deserialize(deserializer: D) -> Result where D: $crate::serde::Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: $crate::serde::Deserializer<'de>, + { let mut bytes = [0u8; $len * 8]; let wrote = $crate::serialize::deserialize_check_len( deserializer, - $crate::serialize::ExpectedLen::Between(0, &mut bytes) + $crate::serialize::ExpectedLen::Between(0, &mut bytes), )?; Ok(bytes[0..wrote].into()) } } - } + }; } /// Add Serde serialization support to a fixed-sized hash type created by `construct_fixed_hash!`. @@ -45,21 +51,27 @@ macro_rules! impl_uint_serde { macro_rules! impl_fixed_hash_serde { ($name: ident, $len: expr) => { impl $crate::serde::Serialize for $name { - fn serialize(&self, serializer: S) -> Result where S: $crate::serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: $crate::serde::Serializer, + { let mut slice = [0u8; 2 + 2 * $len]; $crate::serialize::serialize_raw(&mut slice, &self.0, serializer) } } impl<'de> $crate::serde::Deserialize<'de> for $name { - fn deserialize(deserializer: D) -> Result where D: $crate::serde::Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: $crate::serde::Deserializer<'de>, + { let mut bytes = [0u8; $len]; $crate::serialize::deserialize_check_len( deserializer, - $crate::serialize::ExpectedLen::Exact(&mut bytes) + $crate::serialize::ExpectedLen::Exact(&mut bytes), )?; Ok($name(bytes)) } } - } + }; } diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index 67db57b6d..59d117fd0 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -6,8 +6,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use serde::{de, Deserializer, Serializer}; use std::fmt; -use serde::{de, Serializer, Deserializer}; static CHARS: &[u8] = b"0123456789abcdef"; @@ -37,7 +37,8 @@ fn to_hex<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a str } /// Serializes a slice of bytes. -pub fn serialize_raw(slice: &mut [u8], bytes: &[u8], serializer: S) -> Result where +pub fn serialize_raw(slice: &mut [u8], bytes: &[u8], serializer: S) -> Result +where S: Serializer, { if bytes.is_empty() { @@ -48,7 +49,8 @@ pub fn serialize_raw(slice: &mut [u8], bytes: &[u8], serializer: S) -> Result } /// Serializes a slice of bytes. -pub fn serialize(bytes: &[u8], serializer: S) -> Result where +pub fn serialize(bytes: &[u8], serializer: S) -> Result +where S: Serializer, { let mut slice = vec![0u8; (bytes.len() + 1) * 2]; @@ -58,7 +60,8 @@ pub fn serialize(bytes: &[u8], serializer: S) -> Result wher /// Serialize a slice of bytes as uint. /// /// The representation will have all leading zeros trimmed. -pub fn serialize_uint(slice: &mut [u8], bytes: &[u8], serializer: S) -> Result where +pub fn serialize_uint(slice: &mut [u8], bytes: &[u8], serializer: S) -> Result +where S: Serializer, { let non_zero = bytes.iter().take_while(|b| **b == 0).count(); @@ -90,7 +93,8 @@ impl<'a> fmt::Display for ExpectedLen<'a> { /// Deserialize into vector of bytes. This will allocate an O(n) intermediate /// string. -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> where +pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> +where D: Deserializer<'de>, { struct Visitor; @@ -104,7 +108,7 @@ pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> where fn visit_str(self, v: &str) -> Result { if !v.starts_with("0x") { - return Err(E::custom("prefix is missing")) + return Err(E::custom("prefix is missing")); } let bytes_len = v.len() - 2; @@ -119,13 +123,13 @@ pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> where b'A'..=b'F' => buf |= byte - b'A' + 10, b'a'..=b'f' => buf |= byte - b'a' + 10, b'0'..=b'9' => buf |= byte - b'0', - b' '|b'\r'|b'\n'|b'\t' => { + b' ' | b'\r' | b'\n' | b'\t' => { buf >>= 4; - continue + continue; } b => { let ch = char::from(b); - return Err(E::custom(&format!("invalid hex character: {}, at {}", ch, idx))) + return Err(E::custom(&format!("invalid hex character: {}, at {}", ch, idx))); } } @@ -150,7 +154,8 @@ pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> where /// Deserialize into vector of bytes with additional size check. /// Returns number of bytes written. -pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) -> Result where +pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) -> Result +where D: Deserializer<'de>, { struct Visitor<'a> { @@ -166,7 +171,7 @@ pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) fn visit_str(self, v: &str) -> Result { if !v.starts_with("0x") { - return Err(E::custom("prefix is missing")) + return Err(E::custom("prefix is missing")); } let is_len_valid = match self.len { @@ -175,7 +180,7 @@ pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) }; if !is_len_valid { - return Err(E::invalid_length(v.len() - 2, &self)) + return Err(E::invalid_length(v.len() - 2, &self)); } let bytes = match self.len { @@ -193,13 +198,13 @@ pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) b'A'..=b'F' => buf |= byte - b'A' + 10, b'a'..=b'f' => buf |= byte - b'a' + 10, b'0'..=b'9' => buf |= byte - b'0', - b' '|b'\r'|b'\n'|b'\t' => { + b' ' | b'\r' | b'\n' | b'\t' => { buf >>= 4; - continue + continue; } b => { let ch = char::from(b); - return Err(E::custom(&format!("invalid hex character: {}, at {}", ch, idx))) + return Err(E::custom(&format!("invalid hex character: {}, at {}", ch, idx))); } } @@ -226,10 +231,10 @@ pub fn deserialize_check_len<'a, 'de, D>(deserializer: D, len: ExpectedLen<'a>) mod tests { extern crate serde_derive; - use self::serde_derive::{Serialize, Deserialize}; + use self::serde_derive::{Deserialize, Serialize}; #[derive(Serialize, Deserialize)] - struct Bytes(#[serde(with="super")] Vec); + struct Bytes(#[serde(with = "super")] Vec); #[test] fn should_not_fail_on_short_string() { @@ -248,12 +253,14 @@ mod tests { assert_eq!(f.0, vec![0x1, 0x23, 0x45]); } - #[test] fn should_not_fail_on_other_strings() { - let a: Bytes = serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587\"").unwrap(); - let b: Bytes = serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b\"").unwrap(); - let c: Bytes = serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b4\"").unwrap(); + let a: Bytes = + serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587\"").unwrap(); + let b: Bytes = + serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b\"").unwrap(); + let c: Bytes = + serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b4\"").unwrap(); assert_eq!(a.0.len(), 31); assert_eq!(b.0.len(), 32); diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md new file mode 100644 index 000000000..10d37b23d --- /dev/null +++ b/rlp/CHANGELOG.md @@ -0,0 +1,15 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.4.3] - 2019-10-24 +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +### Fixed +- Fixed nested unbounded lists (https://github.com/paritytech/parity-common/pull/203) +### Added +- Added no-std support (https://github.com/paritytech/parity-common/pull/206) diff --git a/rlp/benches/rlp.rs b/rlp/benches/rlp.rs index e874cb0da..1fcd8b21f 100644 --- a/rlp/benches/rlp.rs +++ b/rlp/benches/rlp.rs @@ -11,62 +11,75 @@ use criterion::{criterion_group, criterion_main, Criterion}; fn bench_encode(c: &mut Criterion) { - c.bench_function("encode_u64", |b| b.iter(|| { - let mut stream = rlp::RlpStream::new(); - stream.append(&0x1023_4567_89ab_cdefu64); - let _ = stream.out(); - })); - c.bench_function("encode_u256", |b| b.iter(|| { - let mut stream = rlp::RlpStream::new(); - let uint: primitive_types::U256 = "8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0".into(); - stream.append(&uint); - let _ = stream.out(); - })); - c.bench_function("encode_1000_u64", |b| b.iter(|| { - let mut stream = rlp::RlpStream::new_list(1000); - for i in 0..1000u64 { - stream.append(&i); - } - let _ = stream.out(); - })); - c.bench_function("encode_nested_empty_lists", |b| b.iter(|| { - // [ [], [[]], [ [], [[]] ] ] - let mut stream = rlp::RlpStream::new_list(3); - stream.begin_list(0); - stream.begin_list(1).begin_list(0); - stream.begin_list(2).begin_list(0).begin_list(1).begin_list(0); - let _ = stream.out(); - })); - c.bench_function("encode_1000_empty_lists", |b| b.iter(|| { - let mut stream = rlp::RlpStream::new_list(1000); - for _ in 0..1000 { + c.bench_function("encode_u64", |b| { + b.iter(|| { + let mut stream = rlp::RlpStream::new(); + stream.append(&0x1023_4567_89ab_cdefu64); + let _ = stream.out(); + }) + }); + c.bench_function("encode_u256", |b| { + b.iter(|| { + let mut stream = rlp::RlpStream::new(); + let uint: primitive_types::U256 = "8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0".into(); + stream.append(&uint); + let _ = stream.out(); + }) + }); + c.bench_function("encode_1000_u64", |b| { + b.iter(|| { + let mut stream = rlp::RlpStream::new_list(1000); + for i in 0..1000u64 { + stream.append(&i); + } + let _ = stream.out(); + }) + }); + c.bench_function("encode_nested_empty_lists", |b| { + b.iter(|| { + // [ [], [[]], [ [], [[]] ] ] + let mut stream = rlp::RlpStream::new_list(3); stream.begin_list(0); - } - let _ = stream.out(); - })); + stream.begin_list(1).begin_list(0); + stream.begin_list(2).begin_list(0).begin_list(1).begin_list(0); + let _ = stream.out(); + }) + }); + c.bench_function("encode_1000_empty_lists", |b| { + b.iter(|| { + let mut stream = rlp::RlpStream::new_list(1000); + for _ in 0..1000 { + stream.begin_list(0); + } + let _ = stream.out(); + }) + }); } fn bench_decode(c: &mut Criterion) { - c.bench_function("decode_u64", |b| b.iter(|| { - let data = vec![0x88, 0x10, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]; - let rlp = rlp::Rlp::new(&data); - let _: u64 = rlp.as_val().unwrap(); - })); - c.bench_function("decode_u256", |b| b.iter(|| { - let data = vec![ - 0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, - 0x30, 0x40, 0x50, 0x60, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0 - ]; - let rlp = rlp::Rlp::new(&data); - let _ : primitive_types::U256 = rlp.as_val().unwrap(); - })); + c.bench_function("decode_u64", |b| { + b.iter(|| { + let data = vec![0x88, 0x10, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]; + let rlp = rlp::Rlp::new(&data); + let _: u64 = rlp.as_val().unwrap(); + }) + }); + c.bench_function("decode_u256", |b| { + b.iter(|| { + let data = vec![ + 0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0, + ]; + let rlp = rlp::Rlp::new(&data); + let _: primitive_types::U256 = rlp.as_val().unwrap(); + }) + }); c.bench_function("decode_1000_u64", |b| { let mut stream = rlp::RlpStream::new_list(1000); for i in 0..1000u64 { stream.append(&i); } - let data= stream.out(); + let data = stream.out(); b.iter(|| { let rlp = rlp::Rlp::new(&data); for i in 0..1000 { @@ -74,16 +87,18 @@ fn bench_decode(c: &mut Criterion) { } }); }); - c.bench_function("decode_nested_empty_lists", |b| b.iter(|| { - // [ [], [[]], [ [], [[]] ] ] - let data = vec![0xc7, 0xc0, 0xc1, 0xc0, 0xc3, 0xc0, 0xc1, 0xc0]; - let rlp = rlp::Rlp::new(&data); - let _v0: Vec = rlp.at(0).unwrap().as_list().unwrap(); - let _v1: Vec = rlp.at(1).unwrap().at(0).unwrap().as_list().unwrap(); - let nested_rlp = rlp.at(2).unwrap(); - let _v2a: Vec = nested_rlp.at(0).unwrap().as_list().unwrap(); - let _v2b: Vec = nested_rlp.at(1).unwrap().at(0).unwrap().as_list().unwrap(); - })); + c.bench_function("decode_nested_empty_lists", |b| { + b.iter(|| { + // [ [], [[]], [ [], [[]] ] ] + let data = vec![0xc7, 0xc0, 0xc1, 0xc0, 0xc3, 0xc0, 0xc1, 0xc0]; + let rlp = rlp::Rlp::new(&data); + let _v0: Vec = rlp.at(0).unwrap().as_list().unwrap(); + let _v1: Vec = rlp.at(1).unwrap().at(0).unwrap().as_list().unwrap(); + let nested_rlp = rlp.at(2).unwrap(); + let _v2a: Vec = nested_rlp.at(0).unwrap().as_list().unwrap(); + let _v2b: Vec = nested_rlp.at(1).unwrap().at(0).unwrap().as_list().unwrap(); + }) + }); c.bench_function("decode_1000_empty_lists", |b| { let mut stream = rlp::RlpStream::new_list(1000); for _ in 0..1000 { diff --git a/rlp/src/impls.rs b/rlp/src/impls.rs index 3fbf3cf20..4f30b8a59 100644 --- a/rlp/src/impls.rs +++ b/rlp/src/impls.rs @@ -7,14 +7,14 @@ // except according to those terms. #[cfg(not(feature = "std"))] -use alloc::{borrow::ToOwned, vec::Vec, string::String}; +use alloc::{borrow::ToOwned, string::String, vec::Vec}; +use core::iter::{empty, once}; use core::{mem, str}; -use core::iter::{once, empty}; use crate::error::DecoderError; use crate::rlpin::Rlp; use crate::stream::RlpStream; -use crate::traits::{Encodable, Decodable}; +use crate::traits::{Decodable, Encodable}; pub fn decode_usize(bytes: &[u8]) -> Result { match bytes.len() { @@ -41,12 +41,10 @@ impl Encodable for bool { impl Decodable for bool { fn decode(rlp: &Rlp) -> Result { - rlp.decoder().decode_value(|bytes| { - match bytes.len() { - 0 => Ok(false), - 1 => Ok(bytes[0] != 0), - _ => Err(DecoderError::RlpIsTooBig), - } + rlp.decoder().decode_value(|bytes| match bytes.len() { + 0 => Ok(false), + 1 => Ok(bytes[0] != 0), + _ => Err(DecoderError::RlpIsTooBig), }) } } @@ -65,18 +63,19 @@ impl Encodable for Vec { impl Decodable for Vec { fn decode(rlp: &Rlp) -> Result { - rlp.decoder().decode_value(|bytes| { - Ok(bytes.to_vec()) - }) + rlp.decoder().decode_value(|bytes| Ok(bytes.to_vec())) } } -impl Encodable for Option where T: Encodable { +impl Encodable for Option +where + T: Encodable, +{ fn rlp_append(&self, s: &mut RlpStream) { match *self { None => { s.begin_list(0); - }, + } Some(ref value) => { s.begin_list(1); s.append(value); @@ -85,7 +84,10 @@ impl Encodable for Option where T: Encodable { } } -impl Decodable for Option where T: Decodable { +impl Decodable for Option +where + T: Decodable, +{ fn decode(rlp: &Rlp) -> Result { let items = rlp.item_count()?; match items { @@ -108,13 +110,11 @@ impl Encodable for u8 { impl Decodable for u8 { fn decode(rlp: &Rlp) -> Result { - rlp.decoder().decode_value(|bytes| { - match bytes.len() { - 1 if bytes[0] != 0 => Ok(bytes[0]), - 0 => Ok(0), - 1 => Err(DecoderError::RlpInvalidIndirection), - _ => Err(DecoderError::RlpIsTooBig), - } + rlp.decoder().decode_value(|bytes| match bytes.len() { + 1 if bytes[0] != 0 => Ok(bytes[0]), + 0 => Ok(0), + 1 => Err(DecoderError::RlpInvalidIndirection), + _ => Err(DecoderError::RlpIsTooBig), }) } } @@ -128,33 +128,31 @@ macro_rules! impl_encodable_for_u { s.encoder().encode_value(&buffer[leading_empty_bytes..]); } } - } + }; } macro_rules! impl_decodable_for_u { ($name: ident) => { impl Decodable for $name { fn decode(rlp: &Rlp) -> Result { - rlp.decoder().decode_value(|bytes| { - match bytes.len() { - 0 | 1 => u8::decode(rlp).map(|v| v as $name), - l if l <= mem::size_of::<$name>() => { - if bytes[0] == 0 { - return Err(DecoderError::RlpInvalidIndirection); - } - let mut res = 0 as $name; - for (i, byte) in bytes.iter().enumerate().take(l) { - let shift = (l - 1 - i) * 8; - res += (*byte as $name) << shift; - } - Ok(res) + rlp.decoder().decode_value(|bytes| match bytes.len() { + 0 | 1 => u8::decode(rlp).map(|v| v as $name), + l if l <= mem::size_of::<$name>() => { + if bytes[0] == 0 { + return Err(DecoderError::RlpInvalidIndirection); } - _ => Err(DecoderError::RlpIsTooBig), + let mut res = 0 as $name; + for (i, byte) in bytes.iter().enumerate().take(l) { + let shift = (l - 1 - i) * 8; + res += (*byte as $name) << shift; + } + Ok(res) } + _ => Err(DecoderError::RlpIsTooBig), }) } } - } + }; } impl_encodable_for_u!(u16); diff --git a/rlp/src/lib.rs b/rlp/src/lib.rs index 1baa52c2f..ab386e689 100644 --- a/rlp/src/lib.rs +++ b/rlp/src/lib.rs @@ -37,18 +37,18 @@ #[cfg(not(feature = "std"))] extern crate alloc; -mod traits; mod error; +mod impls; mod rlpin; mod stream; -mod impls; +mod traits; #[cfg(not(feature = "std"))] use alloc::vec::Vec; use core::borrow::Borrow; pub use self::error::DecoderError; -pub use self::rlpin::{Rlp, RlpIterator, PayloadInfo, Prototype}; +pub use self::rlpin::{PayloadInfo, Prototype, Rlp, RlpIterator}; pub use self::stream::RlpStream; pub use self::traits::{Decodable, Encodable}; @@ -68,12 +68,18 @@ pub const EMPTY_LIST_RLP: [u8; 1] = [0xC0; 1]; /// assert_eq!(animal, "cat".to_owned()); /// } /// ``` -pub fn decode(bytes: &[u8]) -> Result where T: Decodable { +pub fn decode(bytes: &[u8]) -> Result +where + T: Decodable, +{ let rlp = Rlp::new(bytes); rlp.as_val() } -pub fn decode_list(bytes: &[u8]) -> Vec where T: Decodable { +pub fn decode_list(bytes: &[u8]) -> Vec +where + T: Decodable, +{ let rlp = Rlp::new(bytes); rlp.as_list().expect("trusted rlp should be valid") } @@ -89,13 +95,20 @@ pub fn decode_list(bytes: &[u8]) -> Vec where T: Decodable { /// assert_eq!(out, vec![0x83, b'c', b'a', b't']); /// } /// ``` -pub fn encode(object: &E) -> Vec where E: Encodable { +pub fn encode(object: &E) -> Vec +where + E: Encodable, +{ let mut stream = RlpStream::new(); stream.append(object); stream.drain() } -pub fn encode_list(object: &[K]) -> Vec where E: Encodable, K: Borrow { +pub fn encode_list(object: &[K]) -> Vec +where + E: Encodable, + K: Borrow, +{ let mut stream = RlpStream::new(); stream.append_list(object); stream.drain() diff --git a/rlp/src/rlpin.rs b/rlp/src/rlpin.rs index 0d8c9ad2a..dd2ee9826 100644 --- a/rlp/src/rlpin.rs +++ b/rlp/src/rlpin.rs @@ -73,7 +73,9 @@ impl PayloadInfo { } /// Total size of the RLP. - pub fn total(&self) -> usize { self.header_len + self.value_len } + pub fn total(&self) -> usize { + self.header_len + self.value_len + } /// Create a new object from the given bytes RLP. The bytes pub fn from(header_bytes: &[u8]) -> Result { @@ -114,27 +116,26 @@ impl<'a> fmt::Display for Rlp<'a> { Ok(Prototype::Data(_)) => write!(f, "\"0x{}\"", self.data().unwrap().to_hex::()), Ok(Prototype::List(len)) => { write!(f, "[")?; - for i in 0..len-1 { + for i in 0..len - 1 { write!(f, "{}, ", self.at(i).unwrap())?; } write!(f, "{}", self.at(len - 1).unwrap())?; write!(f, "]") - }, - Err(err) => write!(f, "{:?}", err) + } + Err(err) => write!(f, "{:?}", err), } } } impl<'a> Rlp<'a> { pub fn new(bytes: &'a [u8]) -> Rlp<'a> { - Rlp { - bytes, - offset_cache: Cell::new(None), - count_cache: Cell::new(None) - } + Rlp { bytes, offset_cache: Cell::new(None), count_cache: Cell::new(None) } } - pub fn as_raw<'view>(&'view self) -> &'a [u8] where 'a: 'view { + pub fn as_raw<'view>(&'view self) -> &'a [u8] + where + 'a: 'view, + { self.bytes } @@ -153,7 +154,10 @@ impl<'a> Rlp<'a> { BasicDecoder::payload_info(self.bytes) } - pub fn data<'view>(&'view self) -> Result<&'a [u8], DecoderError> where 'a: 'view { + pub fn data<'view>(&'view self) -> Result<&'a [u8], DecoderError> + where + 'a: 'view, + { let pi = BasicDecoder::payload_info(self.bytes)?; Ok(&self.bytes[pi.header_len..(pi.header_len + pi.value_len)]) } @@ -169,7 +173,7 @@ impl<'a> Rlp<'a> { } } } else { - Err(DecoderError::RlpExpectedToBeList) + Err(DecoderError::RlpExpectedToBeList) } } @@ -182,7 +186,10 @@ impl<'a> Rlp<'a> { } } - pub fn at<'view>(&'view self, index: usize) -> Result, DecoderError> where 'a: 'view { + pub fn at<'view>(&'view self, index: usize) -> Result, DecoderError> + where + 'a: 'view, + { if !self.is_list() { return Err(DecoderError::RlpExpectedToBeList); } @@ -191,9 +198,9 @@ impl<'a> Rlp<'a> { // current search index, otherwise move to beginning of list let cache = self.offset_cache.get(); let (bytes, indexes_to_skip, bytes_consumed) = match cache { - Some(ref cache) if cache.index <= index => ( - Rlp::consume(self.bytes, cache.offset)?, index - cache.index, cache.offset - ), + Some(ref cache) if cache.index <= index => { + (Rlp::consume(self.bytes, cache.offset)?, index - cache.index, cache.offset) + } _ => { let (bytes, consumed) = self.consume_list_payload()?; (bytes, index, consumed) @@ -238,28 +245,43 @@ impl<'a> Rlp<'a> { b @ 0xb8..=0xbf => { let payload_idx = 1 + b as usize - 0xb7; payload_idx < self.bytes.len() && self.bytes[payload_idx] != 0 - }, - _ => false + } + _ => false, } } - pub fn iter<'view>(&'view self) -> RlpIterator<'a, 'view> where 'a: 'view { + pub fn iter<'view>(&'view self) -> RlpIterator<'a, 'view> + where + 'a: 'view, + { self.into_iter() } - pub fn as_val(&self) -> Result where T: Decodable { + pub fn as_val(&self) -> Result + where + T: Decodable, + { T::decode(self) } - pub fn as_list(&self) -> Result, DecoderError> where T: Decodable { + pub fn as_list(&self) -> Result, DecoderError> + where + T: Decodable, + { self.iter().map(|rlp| rlp.as_val()).collect() } - pub fn val_at(&self, index: usize) -> Result where T: Decodable { + pub fn val_at(&self, index: usize) -> Result + where + T: Decodable, + { self.at(index)?.as_val() } - pub fn list_at(&self, index: usize) -> Result, DecoderError> where T: Decodable { + pub fn list_at(&self, index: usize) -> Result, DecoderError> + where + T: Decodable, + { self.at(index)?.as_list() } @@ -300,20 +322,23 @@ impl<'a> Rlp<'a> { } /// Iterator over rlp-slice list elements. -pub struct RlpIterator<'a, 'view> where 'a: 'view { +pub struct RlpIterator<'a, 'view> +where + 'a: 'view, +{ rlp: &'view Rlp<'a>, index: usize, } -impl<'a, 'view> IntoIterator for &'view Rlp<'a> where 'a: 'view { +impl<'a, 'view> IntoIterator for &'view Rlp<'a> +where + 'a: 'view, +{ type Item = Rlp<'a>; type IntoIter = RlpIterator<'a, 'view>; fn into_iter(self) -> Self::IntoIter { - RlpIterator { - rlp: self, - index: 0, - } + RlpIterator { rlp: self, index: 0 } } } @@ -334,9 +359,7 @@ pub struct BasicDecoder<'a> { impl<'a> BasicDecoder<'a> { pub fn new(rlp: &'a [u8]) -> BasicDecoder<'a> { - BasicDecoder { - rlp, - } + BasicDecoder { rlp } } /// Return first item info. @@ -349,8 +372,9 @@ impl<'a> BasicDecoder<'a> { } pub fn decode_value(&self, f: F) -> Result - where F: Fn(&[u8]) -> Result { - + where + F: Fn(&[u8]) -> Result, + { let bytes = self.rlp; let l = *bytes.first().ok_or_else(|| DecoderError::RlpIsTooShort)?; @@ -375,8 +399,7 @@ impl<'a> BasicDecoder<'a> { } let len = decode_usize(&bytes[1..begin_of_value])?; - let last_index_of_value = begin_of_value.checked_add(len) - .ok_or(DecoderError::RlpInvalidLength)?; + let last_index_of_value = begin_of_value.checked_add(len).ok_or(DecoderError::RlpInvalidLength)?; if bytes.len() < last_index_of_value { return Err(DecoderError::RlpInconsistentLengthAndData); } diff --git a/rlp/src/stream.rs b/rlp/src/stream.rs index 1a4fb0d66..581f5efbe 100644 --- a/rlp/src/stream.rs +++ b/rlp/src/stream.rs @@ -21,11 +21,7 @@ struct ListInfo { impl ListInfo { fn new(position: usize, max: Option) -> ListInfo { - ListInfo { - position, - current: 0, - max, - } + ListInfo { position, current: 0, max } } } @@ -45,11 +41,7 @@ impl Default for RlpStream { impl RlpStream { /// Initializes instance of empty `Stream`. pub fn new() -> Self { - RlpStream { - unfinished_lists: Vec::with_capacity(16), - buffer: Vec::with_capacity(1024), - finished_list: false, - } + RlpStream { unfinished_lists: Vec::with_capacity(16), buffer: Vec::with_capacity(1024), finished_list: false } } /// Initializes the `Stream` as a list. @@ -113,7 +105,10 @@ impl RlpStream { /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); /// } /// ``` - pub fn append(&mut self, value: &E) -> &mut Self where E: Encodable { + pub fn append(&mut self, value: &E) -> &mut Self + where + E: Encodable, + { self.finished_list = false; value.rlp_append(self); if !self.finished_list { @@ -136,7 +131,8 @@ impl RlpStream { /// } /// ``` pub fn append_iter(&mut self, value: I) -> &mut Self - where I: IntoIterator, + where + I: IntoIterator, { self.finished_list = false; self.encoder().encode_iter(value); @@ -147,7 +143,11 @@ impl RlpStream { } /// Appends list of values to the end of stream, chainable. - pub fn append_list(&mut self, values: &[K]) -> &mut Self where E: Encodable, K: Borrow { + pub fn append_list(&mut self, values: &[K]) -> &mut Self + where + E: Encodable, + K: Borrow, + { self.begin_list(values.len()); for value in values { self.append(value.borrow()); @@ -157,7 +157,10 @@ impl RlpStream { /// Appends value to the end of stream, but do not count it as an appended item. /// It's useful for wrapper types - pub fn append_internal(&mut self, value: &E) -> &mut Self where E: Encodable { + pub fn append_internal(&mut self, value: &E) -> &mut Self + where + E: Encodable, + { value.rlp_append(self); self } @@ -184,7 +187,7 @@ impl RlpStream { self.buffer.push(0xc0u8); self.note_appended(1); self.finished_list = true; - }, + } _ => { // payload is longer than 1 byte only for lists > 55 bytes // by pushing always this 1 byte we may avoid unnecessary shift of data @@ -192,7 +195,7 @@ impl RlpStream { let position = self.buffer.len(); self.unfinished_lists.push(ListInfo::new(position, Some(len))); - }, + } } // return chainable self @@ -350,7 +353,6 @@ impl RlpStream { pub fn complete_unbounded_list(&mut self) { self.finalize_unbounded_list(); } - } pub struct BasicEncoder<'a> { @@ -359,9 +361,7 @@ pub struct BasicEncoder<'a> { impl<'a> BasicEncoder<'a> { fn new(stream: &'a mut RlpStream) -> Self { - BasicEncoder { - buffer: &mut stream.buffer - } + BasicEncoder { buffer: &mut stream.buffer } } fn insert_size(&mut self, size: usize, position: usize) -> u8 { @@ -382,7 +382,7 @@ impl<'a> BasicEncoder<'a> { match len { 0..=55 => { self.buffer[pos - 1] = 0xc0u8 + len as u8; - }, + } _ => { let inserted_bytes = self.insert_size(len, pos); self.buffer[pos - 1] = 0xf7u8 + inserted_bytes; @@ -396,7 +396,8 @@ impl<'a> BasicEncoder<'a> { /// Pushes encoded value to the end of buffer pub fn encode_iter(&mut self, value: I) - where I: IntoIterator, + where + I: IntoIterator, { let mut value = value.into_iter(); let len = match value.size_hint() { diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index 5d0310553..84a090e09 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -6,11 +6,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use core::{fmt, cmp}; +use core::{cmp, fmt}; use hex_literal::hex; use primitive_types::{H160, U256}; -use rlp::{Encodable, Decodable, Rlp, RlpStream, DecoderError}; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; #[test] fn test_rlp_display() { @@ -92,10 +92,13 @@ fn rlp_iter() { } } -struct ETestPair(T, Vec) where T: Encodable; +struct ETestPair(T, Vec) +where + T: Encodable; fn run_encode_tests(tests: Vec>) - where T: Encodable +where + T: Encodable, { for t in &tests { let res = rlp::encode(&t.0); @@ -103,10 +106,13 @@ fn run_encode_tests(tests: Vec>) } } -struct VETestPair(Vec, Vec) where T: Encodable; +struct VETestPair(Vec, Vec) +where + T: Encodable; fn run_encode_tests_list(tests: Vec>) - where T: Encodable +where + T: Encodable, { for t in &tests { let res = rlp::encode_list(&t.0); @@ -146,44 +152,52 @@ fn encode_u64() { #[test] fn encode_u256() { - let tests = vec![ETestPair(U256::from(0u64), vec![0x80u8]), - ETestPair(U256::from(0x0100_0000u64), vec![0x84, 0x01, 0x00, 0x00, 0x00]), - ETestPair(U256::from(0xffff_ffffu64), - vec![0x84, 0xff, 0xff, 0xff, 0xff]), - ETestPair(("8090a0b0c0d0e0f00910203040506077000000000000\ - 000100000000000012f0").into(), - vec![0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, - 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0])]; + let tests = vec![ + ETestPair(U256::from(0u64), vec![0x80u8]), + ETestPair(U256::from(0x0100_0000u64), vec![0x84, 0x01, 0x00, 0x00, 0x00]), + ETestPair(U256::from(0xffff_ffffu64), vec![0x84, 0xff, 0xff, 0xff, 0xff]), + ETestPair( + ("8090a0b0c0d0e0f00910203040506077000000000000\ + 000100000000000012f0") + .into(), + vec![ + 0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0, + ], + ), + ]; run_encode_tests(tests); } #[test] fn encode_str() { - let tests = vec![ETestPair("cat", vec![0x83, b'c', b'a', b't']), - ETestPair("dog", vec![0x83, b'd', b'o', b'g']), - ETestPair("Marek", vec![0x85, b'M', b'a', b'r', b'e', b'k']), - ETestPair("", vec![0x80]), - ETestPair("Lorem ipsum dolor sit amet, consectetur adipisicing elit", - vec![0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i', - b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', b'o', - b'r', b' ', b's', b'i', b't', b' ', b'a', b'm', b'e', - b't', b',', b' ', b'c', b'o', b'n', b's', b'e', b'c', - b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i', - b'p', b'i', b's', b'i', b'c', b'i', b'n', b'g', b' ', - b'e', b'l', b'i', b't'])]; + let tests = vec![ + ETestPair("cat", vec![0x83, b'c', b'a', b't']), + ETestPair("dog", vec![0x83, b'd', b'o', b'g']), + ETestPair("Marek", vec![0x85, b'M', b'a', b'r', b'e', b'k']), + ETestPair("", vec![0x80]), + ETestPair( + "Lorem ipsum dolor sit amet, consectetur adipisicing elit", + vec![ + 0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i', b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', + b'o', b'r', b' ', b's', b'i', b't', b' ', b'a', b'm', b'e', b't', b',', b' ', b'c', b'o', b'n', b's', + b'e', b'c', b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i', b'p', b'i', b's', b'i', b'c', b'i', + b'n', b'g', b' ', b'e', b'l', b'i', b't', + ], + ), + ]; run_encode_tests(tests); } #[test] fn encode_address() { - let tests = vec![ - ETestPair(H160::from(hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106")), - vec![0x94, 0xef, 0x2d, 0x6d, 0x19, 0x40, 0x84, 0xc2, 0xde, - 0x36, 0xe0, 0xda, 0xbf, 0xce, 0x45, 0xd0, 0x46, - 0xb3, 0x7d, 0x11, 0x06]) - ]; + let tests = vec![ETestPair( + H160::from(hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106")), + vec![ + 0x94, 0xef, 0x2d, 0x6d, 0x19, 0x40, 0x84, 0xc2, 0xde, 0x36, 0xe0, 0xda, 0xbf, 0xce, 0x45, 0xd0, 0x46, 0xb3, + 0x7d, 0x11, 0x06, + ], + )]; run_encode_tests(tests); } @@ -205,32 +219,44 @@ fn encode_vector_u64() { VETestPair(vec![], vec![0xc0]), VETestPair(vec![15u64], vec![0xc1, 0x0f]), VETestPair(vec![1, 2, 3, 7, 0xff], vec![0xc6, 1, 2, 3, 7, 0x81, 0xff]), - VETestPair(vec![0xffff_ffff, 1, 2, 3, 7, 0xff], vec![0xcb, 0x84, 0xff, 0xff, 0xff, 0xff, 1, 2, 3, 7, 0x81, 0xff]), + VETestPair( + vec![0xffff_ffff, 1, 2, 3, 7, 0xff], + vec![0xcb, 0x84, 0xff, 0xff, 0xff, 0xff, 1, 2, 3, 7, 0x81, 0xff], + ), ]; run_encode_tests_list(tests); } #[test] fn encode_vector_str() { - let tests = vec![VETestPair(vec!["cat", "dog"], - vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'])]; + let tests = vec![VETestPair(vec!["cat", "dog"], vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'])]; run_encode_tests_list(tests); } -struct DTestPair(T, Vec) where T: Decodable + fmt::Debug + cmp::Eq; +struct DTestPair(T, Vec) +where + T: Decodable + fmt::Debug + cmp::Eq; -struct VDTestPair(Vec, Vec) where T: Decodable + fmt::Debug + cmp::Eq; +struct VDTestPair(Vec, Vec) +where + T: Decodable + fmt::Debug + cmp::Eq; -fn run_decode_tests(tests: Vec>) where T: Decodable + fmt::Debug + cmp::Eq { +fn run_decode_tests(tests: Vec>) +where + T: Decodable + fmt::Debug + cmp::Eq, +{ for t in &tests { - let res : Result = rlp::decode(&t.1); + let res: Result = rlp::decode(&t.1); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&res, &t.0); } } -fn run_decode_tests_list(tests: Vec>) where T: Decodable + fmt::Debug + cmp::Eq { +fn run_decode_tests_list(tests: Vec>) +where + T: Decodable + fmt::Debug + cmp::Eq, +{ for t in &tests { let res: Vec = rlp::decode_list(&t.1); assert_eq!(res, t.0); @@ -251,20 +277,13 @@ fn decode_vector_u8() { #[test] fn decode_untrusted_u8() { - let tests = vec![ - DTestPair(0x0u8, vec![0x80]), - DTestPair(0x77u8, vec![0x77]), - DTestPair(0xccu8, vec![0x81, 0xcc]), - ]; + let tests = vec![DTestPair(0x0u8, vec![0x80]), DTestPair(0x77u8, vec![0x77]), DTestPair(0xccu8, vec![0x81, 0xcc])]; run_decode_tests(tests); } #[test] fn decode_untrusted_u16() { - let tests = vec![ - DTestPair(0x100u16, vec![0x82, 0x01, 0x00]), - DTestPair(0xffffu16, vec![0x82, 0xff, 0xff]), - ]; + let tests = vec![DTestPair(0x100u16, vec![0x82, 0x01, 0x00]), DTestPair(0xffffu16, vec![0x82, 0xff, 0xff])]; run_decode_tests(tests); } @@ -288,46 +307,52 @@ fn decode_untrusted_u64() { #[test] fn decode_untrusted_u256() { - let tests = vec![DTestPair(U256::from(0u64), vec![0x80u8]), - DTestPair(U256::from(0x0100_0000u64), vec![0x84, 0x01, 0x00, 0x00, 0x00]), - DTestPair(U256::from(0xffff_ffffu64), - vec![0x84, 0xff, 0xff, 0xff, 0xff]), - DTestPair(("8090a0b0c0d0e0f00910203040506077000000000000\ - 000100000000000012f0").into(), - vec![0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, - 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0])]; + let tests = vec![ + DTestPair(U256::from(0u64), vec![0x80u8]), + DTestPair(U256::from(0x0100_0000u64), vec![0x84, 0x01, 0x00, 0x00, 0x00]), + DTestPair(U256::from(0xffff_ffffu64), vec![0x84, 0xff, 0xff, 0xff, 0xff]), + DTestPair( + ("8090a0b0c0d0e0f00910203040506077000000000000\ + 000100000000000012f0") + .into(), + vec![ + 0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0, + ], + ), + ]; run_decode_tests(tests); } #[test] fn decode_untrusted_str() { - let tests = vec![DTestPair("cat".to_owned(), vec![0x83, b'c', b'a', b't']), - DTestPair("dog".to_owned(), vec![0x83, b'd', b'o', b'g']), - DTestPair("Marek".to_owned(), - vec![0x85, b'M', b'a', b'r', b'e', b'k']), - DTestPair("".to_owned(), vec![0x80]), - DTestPair("Lorem ipsum dolor sit amet, consectetur adipisicing elit" - .to_owned(), - vec![0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i', - b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', b'o', - b'r', b' ', b's', b'i', b't', b' ', b'a', b'm', b'e', - b't', b',', b' ', b'c', b'o', b'n', b's', b'e', b'c', - b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i', - b'p', b'i', b's', b'i', b'c', b'i', b'n', b'g', b' ', - b'e', b'l', b'i', b't'])]; + let tests = vec![ + DTestPair("cat".to_owned(), vec![0x83, b'c', b'a', b't']), + DTestPair("dog".to_owned(), vec![0x83, b'd', b'o', b'g']), + DTestPair("Marek".to_owned(), vec![0x85, b'M', b'a', b'r', b'e', b'k']), + DTestPair("".to_owned(), vec![0x80]), + DTestPair( + "Lorem ipsum dolor sit amet, consectetur adipisicing elit".to_owned(), + vec![ + 0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i', b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', + b'o', b'r', b' ', b's', b'i', b't', b' ', b'a', b'm', b'e', b't', b',', b' ', b'c', b'o', b'n', b's', + b'e', b'c', b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i', b'p', b'i', b's', b'i', b'c', b'i', + b'n', b'g', b' ', b'e', b'l', b'i', b't', + ], + ), + ]; run_decode_tests(tests); } #[test] fn decode_untrusted_address() { - let tests = vec![ - DTestPair(H160::from(hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106")), - vec![0x94, 0xef, 0x2d, 0x6d, 0x19, 0x40, 0x84, 0xc2, 0xde, - 0x36, 0xe0, 0xda, 0xbf, 0xce, 0x45, 0xd0, 0x46, - 0xb3, 0x7d, 0x11, 0x06]) - ]; + let tests = vec![DTestPair( + H160::from(hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106")), + vec![ + 0x94, 0xef, 0x2d, 0x6d, 0x19, 0x40, 0x84, 0xc2, 0xde, 0x36, 0xe0, 0xda, 0xbf, 0xce, 0x45, 0xd0, 0x46, 0xb3, + 0x7d, 0x11, 0x06, + ], + )]; run_decode_tests(tests); } @@ -337,21 +362,25 @@ fn decode_untrusted_vector_u64() { VDTestPair(vec![], vec![0xc0]), VDTestPair(vec![15u64], vec![0xc1, 0x0f]), VDTestPair(vec![1, 2, 3, 7, 0xff], vec![0xc6, 1, 2, 3, 7, 0x81, 0xff]), - VDTestPair(vec![0xffff_ffff, 1, 2, 3, 7, 0xff], vec![0xcb, 0x84, 0xff, 0xff, 0xff, 0xff, 1, 2, 3, 7, 0x81, 0xff]), + VDTestPair( + vec![0xffff_ffff, 1, 2, 3, 7, 0xff], + vec![0xcb, 0x84, 0xff, 0xff, 0xff, 0xff, 1, 2, 3, 7, 0x81, 0xff], + ), ]; run_decode_tests_list(tests); } #[test] fn decode_untrusted_vector_str() { - let tests = vec![VDTestPair(vec!["cat".to_owned(), "dog".to_owned()], - vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'])]; + let tests = vec![VDTestPair( + vec!["cat".to_owned(), "dog".to_owned()], + vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g'], + )]; run_decode_tests_list(tests); } #[test] -fn test_rlp_data_length_check() -{ +fn test_rlp_data_length_check() { let data = vec![0x84, b'c', b'a', b't']; let rlp = Rlp::new(&data); @@ -360,8 +389,7 @@ fn test_rlp_data_length_check() } #[test] -fn test_rlp_long_data_length_check() -{ +fn test_rlp_long_data_length_check() { let mut data: Vec = vec![0xb8, 255]; for _ in 0..253 { data.push(b'c'); @@ -374,8 +402,7 @@ fn test_rlp_long_data_length_check() } #[test] -fn test_the_exact_long_string() -{ +fn test_the_exact_long_string() { let mut data: Vec = vec![0xb8, 255]; for _ in 0..255 { data.push(b'c'); @@ -388,8 +415,7 @@ fn test_the_exact_long_string() } #[test] -fn test_rlp_2bytes_data_length_check() -{ +fn test_rlp_2bytes_data_length_check() { let mut data: Vec = vec![0xb9, 2, 255]; // 512+255 for _ in 0..700 { data.push(b'c'); @@ -419,7 +445,7 @@ fn test_rlp_list_length_overflow() { #[test] fn test_rlp_stream_size_limit() { - for limit in 40 .. 270 { + for limit in 40..270 { let item = [0u8; 1]; let mut stream = RlpStream::new(); while stream.append_raw_checked(&item, 1, limit) {} @@ -501,10 +527,7 @@ fn test_nested_list_roundtrip() { impl Encodable for Inner { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_unbounded_list() - .append(&self.0) - .append(&self.1) - .finalize_unbounded_list(); + s.begin_unbounded_list().append(&self.0).append(&self.1).finalize_unbounded_list(); } } @@ -519,9 +542,7 @@ fn test_nested_list_roundtrip() { impl Encodable for Nest { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_unbounded_list() - .append_list(&self.0) - .finalize_unbounded_list(); + s.begin_unbounded_list().append_list(&self.0).finalize_unbounded_list(); } } @@ -531,7 +552,6 @@ fn test_nested_list_roundtrip() { } } - let items = (0..4).map(|i| Inner(i, i + 1)).collect(); let nest = Nest(items); diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..cba0d885c --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,3 @@ +hard_tabs = true +max_width = 120 +use_small_heuristics = "Max" diff --git a/trace-time/CHANGELOG.md b/trace-time/CHANGELOG.md new file mode 100644 index 000000000..0231c53a1 --- /dev/null +++ b/trace-time/CHANGELOG.md @@ -0,0 +1,12 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.1.2] - 2019-10-24 +- Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/232) +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/trace-time/src/lib.rs b/trace-time/src/lib.rs index 7a832656e..65769ee81 100644 --- a/trace-time/src/lib.rs +++ b/trace-time/src/lib.rs @@ -36,10 +36,7 @@ pub struct PerfTimer { impl PerfTimer { /// Create an instance with given name. pub fn new(name: &'static str) -> PerfTimer { - PerfTimer { - name, - start: Instant::now(), - } + PerfTimer { name, start: Instant::now() } } } diff --git a/transaction-pool/CHANGELOG.md b/transaction-pool/CHANGELOG.md new file mode 100644 index 000000000..bcd99d5a9 --- /dev/null +++ b/transaction-pool/CHANGELOG.md @@ -0,0 +1,12 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [2.0.2] - 2019-10-24 +- Updated to 2018 edition idioms (https://github.com/paritytech/parity-common/pull/237) +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/transaction-pool/src/error.rs b/transaction-pool/src/error.rs index 74ce76652..348082f6a 100644 --- a/transaction-pool/src/error.rs +++ b/transaction-pool/src/error.rs @@ -33,12 +33,11 @@ pub type Result = result::Result>; impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - Error::AlreadyImported(h) => - write!(f, "[{:?}] already imported", h), - Error::TooCheapToEnter(hash, min_score) => - write!(f, "[{:x}] too cheap to enter the pool. Min score: {}", hash, min_score), - Error::TooCheapToReplace(old_hash, hash) => - write!(f, "[{:x}] too cheap to replace: {:x}", hash, old_hash), + Error::AlreadyImported(h) => write!(f, "[{:?}] already imported", h), + Error::TooCheapToEnter(hash, min_score) => { + write!(f, "[{:x}] too cheap to enter the pool. Min score: {}", hash, min_score) + } + Error::TooCheapToReplace(old_hash, hash) => write!(f, "[{:x}] too cheap to replace: {:x}", hash, old_hash), } } } @@ -46,14 +45,19 @@ impl fmt::Display for Error { impl error::Error for Error {} #[cfg(test)] -impl PartialEq for Error where H: PartialEq { +impl PartialEq for Error +where + H: PartialEq, +{ fn eq(&self, other: &Self) -> bool { use self::Error::*; match (self, other) { (&AlreadyImported(ref h1), &AlreadyImported(ref h2)) => h1 == h2, (&TooCheapToEnter(ref h1, ref s1), &TooCheapToEnter(ref h2, ref s2)) => h1 == h2 && s1 == s2, - (&TooCheapToReplace(ref old1, ref new1), &TooCheapToReplace(ref old2, ref new2)) => old1 == old2 && new1 == new2, + (&TooCheapToReplace(ref old1, ref new1), &TooCheapToReplace(ref old2, ref new2)) => { + old1 == old2 && new1 == new2 + } _ => false, } } diff --git a/transaction-pool/src/lib.rs b/transaction-pool/src/lib.rs index 669441829..66e93fffe 100644 --- a/transaction-pool/src/lib.rs +++ b/transaction-pool/src/lib.rs @@ -86,9 +86,9 @@ pub mod scoring; pub use self::error::Error; pub use self::listener::{Listener, NoopListener}; pub use self::options::Options; -pub use self::pool::{Pool, PendingIterator, UnorderedIterator, Transaction}; -pub use self::ready::{Ready, Readiness}; -pub use self::replace::{ShouldReplace, ReplaceTransaction}; +pub use self::pool::{PendingIterator, Pool, Transaction, UnorderedIterator}; +pub use self::ready::{Readiness, Ready}; +pub use self::replace::{ReplaceTransaction, ShouldReplace}; pub use self::scoring::Scoring; pub use self::status::{LightStatus, Status}; pub use self::verifier::Verifier; diff --git a/transaction-pool/src/listener.rs b/transaction-pool/src/listener.rs index a599c8763..566b318ee 100644 --- a/transaction-pool/src/listener.rs +++ b/transaction-pool/src/listener.rs @@ -14,8 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::{fmt::{Debug, LowerHex}, sync::Arc}; use crate::error::Error; +use std::{ + fmt::{Debug, LowerHex}, + sync::Arc, +}; /// Transaction pool listener. /// @@ -49,7 +52,8 @@ pub trait Listener { pub struct NoopListener; impl Listener for NoopListener {} -impl Listener for (A, B) where +impl Listener for (A, B) +where A: Listener, B: Listener, { diff --git a/transaction-pool/src/options.rs b/transaction-pool/src/options.rs index 291001a20..8e1c1002d 100644 --- a/transaction-pool/src/options.rs +++ b/transaction-pool/src/options.rs @@ -27,10 +27,6 @@ pub struct Options { impl Default for Options { fn default() -> Self { - Options { - max_count: 1024, - max_per_sender: 16, - max_mem_usage: 8 * 1024 * 1024, - } + Options { max_count: 1024, max_per_sender: 16, max_mem_usage: 8 * 1024 * 1024 } } } diff --git a/transaction-pool/src/pool.rs b/transaction-pool/src/pool.rs index 539e7dded..63bb0a07f 100644 --- a/transaction-pool/src/pool.rs +++ b/transaction-pool/src/pool.rs @@ -14,18 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::sync::Arc; -use std::slice; -use std::collections::{hash_map, HashMap, BTreeSet}; use log::{trace, warn}; +use std::collections::{hash_map, BTreeSet, HashMap}; +use std::slice; +use std::sync::Arc; use crate::{ error, listener::{Listener, NoopListener}, options::Options, - ready::{Ready, Readiness}, - replace::{ShouldReplace, ReplaceTransaction}, - scoring::{self, Scoring, ScoreWithRef}, + ready::{Readiness, Ready}, + replace::{ReplaceTransaction, ShouldReplace}, + scoring::{self, ScoreWithRef, Scoring}, status::{LightStatus, Status}, transactions::{AddResult, Transactions}, VerifiedTransaction, @@ -46,10 +46,7 @@ pub struct Transaction { impl Clone for Transaction { fn clone(&self) -> Self { - Transaction { - insertion_id: self.insertion_id, - transaction: self.transaction.clone(), - } + Transaction { insertion_id: self.insertion_id, transaction: self.transaction.clone() } } } @@ -101,7 +98,8 @@ impl> Pool { const INITIAL_NUMBER_OF_SENDERS: usize = 16; -impl Pool where +impl Pool +where T: VerifiedTransaction, S: Scoring, L: Listener, @@ -122,7 +120,6 @@ impl Pool where worst_transactions: Default::default(), insertion_id: 0, } - } /// Attempts to import new transaction to the pool, returns a `Arc` or an `Error`. @@ -142,30 +139,25 @@ impl Pool where let mem_usage = transaction.mem_usage(); if self.by_hash.contains_key(transaction.hash()) { - return Err(error::Error::AlreadyImported(transaction.hash().clone())) + return Err(error::Error::AlreadyImported(transaction.hash().clone())); } self.insertion_id += 1; - let transaction = Transaction { - insertion_id: self.insertion_id, - transaction: Arc::new(transaction), - }; + let transaction = Transaction { insertion_id: self.insertion_id, transaction: Arc::new(transaction) }; // TODO [ToDr] Most likely move this after the transaction is inserted. // Avoid using should_replace, but rather use scoring for that. { - let remove_worst = |s: &mut Self, transaction| { - match s.remove_worst(transaction, replace) { - Err(err) => { - s.listener.rejected(transaction, &err); - Err(err) - }, - Ok(None) => Ok(false), - Ok(Some(removed)) => { - s.listener.dropped(&removed, Some(transaction)); - s.finalize_remove(removed.hash()); - Ok(true) - }, + let remove_worst = |s: &mut Self, transaction| match s.remove_worst(transaction, replace) { + Err(err) => { + s.listener.rejected(transaction, &err); + Err(err) + } + Ok(None) => Ok(false), + Ok(Some(removed)) => { + s.listener.dropped(&removed, Some(transaction)); + s.finalize_remove(removed.hash()); + Ok(true) } }; @@ -185,7 +177,8 @@ impl Pool where } let (result, prev_state, current_state) = { - let transactions = self.transactions.entry(transaction.sender().clone()).or_insert_with(Transactions::default); + let transactions = + self.transactions.entry(transaction.sender().clone()).or_insert_with(Transactions::default); // get worst and best transactions for comparison let prev = transactions.worst_and_best(); let result = transactions.add(transaction, &self.scoring, self.options.max_per_sender); @@ -201,22 +194,21 @@ impl Pool where self.listener.added(&tx, None); self.finalize_insert(&tx, None); Ok(tx.transaction) - }, - AddResult::PushedOut { new, old } | - AddResult::Replaced { new, old } => { + } + AddResult::PushedOut { new, old } | AddResult::Replaced { new, old } => { self.listener.added(&new, Some(&old)); self.finalize_insert(&new, Some(&old)); Ok(new.transaction) - }, + } AddResult::TooCheap { new, old } => { let error = error::Error::TooCheapToReplace(old.hash().clone(), new.hash().clone()); self.listener.rejected(&new, &error); - return Err(error) - }, + return Err(error); + } AddResult::TooCheapToEnter(new, score) => { let error = error::Error::TooCheapToEnter(new.hash().clone(), format!("{:#x}", score)); self.listener.rejected(&new, &error); - return Err(error) + return Err(error); } } } @@ -248,28 +240,29 @@ impl Pool where let worst_collection = &mut self.worst_transactions; let best_collection = &mut self.best_transactions; - let is_same = |a: &(S::Score, Transaction), b: &(S::Score, Transaction)| { - a.0 == b.0 && a.1.hash() == b.1.hash() - }; + let is_same = + |a: &(S::Score, Transaction), b: &(S::Score, Transaction)| a.0 == b.0 && a.1.hash() == b.1.hash(); - let update = |collection: &mut BTreeSet<_>, (score, tx), remove| if remove { - collection.remove(&ScoreWithRef::new(score, tx)); - } else { - collection.insert(ScoreWithRef::new(score, tx)); + let update = |collection: &mut BTreeSet<_>, (score, tx), remove| { + if remove { + collection.remove(&ScoreWithRef::new(score, tx)); + } else { + collection.insert(ScoreWithRef::new(score, tx)); + } }; match (previous, current) { (None, Some((worst, best))) => { update(worst_collection, worst, false); update(best_collection, best, false); - }, + } (Some((worst, best)), None) => { // all transactions from that sender has been removed. // We can clear a hashmap entry. self.transactions.remove(worst.1.sender()); update(worst_collection, worst, true); update(best_collection, best, true); - }, + } (Some((w1, b1)), Some((w2, b2))) => { if !is_same(&w1, &w2) { update(worst_collection, w1, true); @@ -279,8 +272,8 @@ impl Pool where update(best_collection, b1, true); update(best_collection, b2, false); } - }, - (None, None) => {}, + } + (None, None) => {} } } @@ -288,13 +281,17 @@ impl Pool where /// /// Returns `None` in case we couldn't decide if the transaction should replace the worst transaction or not. /// In such case we will accept the transaction even though it is going to exceed the limit. - fn remove_worst(&mut self, transaction: &Transaction, replace: &dyn ShouldReplace) -> error::Result>, T::Hash> { + fn remove_worst( + &mut self, + transaction: &Transaction, + replace: &dyn ShouldReplace, + ) -> error::Result>, T::Hash> { let to_remove = match self.worst_transactions.iter().next_back() { // No elements to remove? and the pool is still full? None => { warn!("The pool is full but there are no transactions to remove."); - return Err(error::Error::TooCheapToEnter(transaction.hash().clone(), "unknown".into())) - }, + return Err(error::Error::TooCheapToEnter(transaction.hash().clone(), "unknown".into())); + } Some(old) => { let txs = &self.transactions; let get_replace_tx = |tx| { @@ -311,10 +308,13 @@ impl Pool where scoring::Choice::ReplaceOld => Some(old.clone()), // otherwise fail scoring::Choice::RejectNew => { - return Err(error::Error::TooCheapToEnter(transaction.hash().clone(), format!("{:#x}", old.score))) - }, + return Err(error::Error::TooCheapToEnter( + transaction.hash().clone(), + format!("{:#x}", old.score), + )) + } } - }, + } }; if let Some(to_remove) = to_remove { @@ -330,7 +330,11 @@ impl Pool where } /// Removes transaction from sender's transaction `HashMap`. - fn remove_from_set, &S) -> R>(&mut self, sender: &T::Sender, f: F) -> Option { + fn remove_from_set, &S) -> R>( + &mut self, + sender: &T::Sender, + f: F, + ) -> Option { let (prev, next, result) = if let Some(set) = self.transactions.get_mut(sender) { let prev = set.worst_and_best(); let result = f(set, &self.scoring); @@ -362,9 +366,7 @@ impl Pool where /// will either get a `cancelled` or `invalid` notification. pub fn remove(&mut self, hash: &T::Hash, is_invalid: bool) -> Option> { if let Some(tx) = self.finalize_remove(hash) { - self.remove_from_set(tx.sender(), |set, scoring| { - set.remove(&tx, scoring) - }); + self.remove_from_set(tx.sender(), |set, scoring| set.remove(&tx, scoring)); if is_invalid { self.listener.invalid(&tx); } else { @@ -378,9 +380,7 @@ impl Pool where /// Removes all stalled transactions from given sender. fn remove_stalled>(&mut self, sender: &T::Sender, ready: &mut R) -> usize { - let removed_from_set = self.remove_from_set(sender, |transactions, scoring| { - transactions.cull(ready, scoring) - }); + let removed_from_set = self.remove_from_set(sender, |transactions, scoring| transactions.cull(ready, scoring)); match removed_from_set { Some(removed) => { @@ -390,7 +390,7 @@ impl Pool where self.listener.culled(&tx); } len - }, + } None => 0, } } @@ -403,13 +403,13 @@ impl Pool where for sender in senders { removed += self.remove_stalled(sender, &mut ready); } - }, + } None => { let senders = self.transactions.keys().cloned().collect::>(); for sender in senders { removed += self.remove_stalled(&sender, &mut ready); } - }, + } } removed @@ -427,27 +427,24 @@ impl Pool where /// Returns true if the pool is at it's capacity. pub fn is_full(&self) -> bool { - self.by_hash.len() >= self.options.max_count - || self.mem_usage >= self.options.max_mem_usage + self.by_hash.len() >= self.options.max_count || self.mem_usage >= self.options.max_mem_usage } /// Returns senders ordered by priority of their transactions. - pub fn senders(&self) -> impl Iterator { + pub fn senders(&self) -> impl Iterator { self.best_transactions.iter().map(|tx| tx.transaction.sender()) } /// Returns an iterator of pending (ready) transactions. pub fn pending>(&self, ready: R) -> PendingIterator<'_, T, R, S, L> { - PendingIterator { - ready, - best_transactions: self.best_transactions.clone(), - pool: self, - } + PendingIterator { ready, best_transactions: self.best_transactions.clone(), pool: self } } /// Returns pending (ready) transactions from given sender. pub fn pending_from_sender>(&self, ready: R, sender: &T::Sender) -> PendingIterator<'_, T, R, S, L> { - let best_transactions = self.transactions.get(sender) + let best_transactions = self + .transactions + .get(sender) .and_then(|transactions| transactions.worst_and_best()) .map(|(_, best)| ScoreWithRef::new(best.0, best.1)) .map(|s| { @@ -457,20 +454,12 @@ impl Pool where }) .unwrap_or_default(); - PendingIterator { - ready, - best_transactions, - pool: self, - } + PendingIterator { ready, best_transactions, pool: self } } /// Returns unprioritized list of ready transactions. pub fn unordered_pending>(&self, ready: R) -> UnorderedIterator<'_, T, R, S> { - UnorderedIterator { - ready, - senders: self.transactions.iter(), - transactions: None, - } + UnorderedIterator { ready, senders: self.transactions.iter(), transactions: None } } /// Update score of transactions of a particular sender. @@ -547,7 +536,8 @@ impl Pool where /// /// NOTE: the transactions are not removed from the queue. /// You might remove them later by calling `cull`. -pub struct UnorderedIterator<'a, T, R, S> where +pub struct UnorderedIterator<'a, T, R, S> +where T: VerifiedTransaction + 'a, S: Scoring + 'a, { @@ -556,7 +546,8 @@ pub struct UnorderedIterator<'a, T, R, S> where transactions: Option>>, } -impl<'a, T, R, S> Iterator for UnorderedIterator<'a, T, R, S> where +impl<'a, T, R, S> Iterator for UnorderedIterator<'a, T, R, S> +where T: VerifiedTransaction, R: Ready, S: Scoring, @@ -570,7 +561,7 @@ impl<'a, T, R, S> Iterator for UnorderedIterator<'a, T, R, S> where match self.ready.is_ready(&tx) { Readiness::Ready => { return Some(tx.transaction.clone()); - }, + } state => trace!("[{:?}] Ignoring {:?} transaction.", tx.hash(), state), } } @@ -583,11 +574,11 @@ impl<'a, T, R, S> Iterator for UnorderedIterator<'a, T, R, S> where } } - /// An iterator over all pending (ready) transactions. /// NOTE: the transactions are not removed from the queue. /// You might remove them later by calling `cull`. -pub struct PendingIterator<'a, T, R, S, L> where +pub struct PendingIterator<'a, T, R, S, L> +where T: VerifiedTransaction + 'a, S: Scoring + 'a, L: 'a, @@ -597,7 +588,8 @@ pub struct PendingIterator<'a, T, R, S, L> where pool: &'a Pool, } -impl<'a, T, R, S, L> Iterator for PendingIterator<'a, T, R, S, L> where +impl<'a, T, R, S, L> Iterator for PendingIterator<'a, T, R, S, L> +where T: VerifiedTransaction, R: Ready, S: Scoring, @@ -616,18 +608,20 @@ impl<'a, T, R, S, L> Iterator for PendingIterator<'a, T, R, S, L> where match tx_state { Readiness::Ready | Readiness::Stale => { // retrieve next one from the same sender. - let next = self.pool.transactions + let next = self + .pool + .transactions .get(best.transaction.sender()) .and_then(|s| s.find_next(&best.transaction, &self.pool.scoring)); if let Some((score, tx)) = next { self.best_transactions.insert(ScoreWithRef::new(score, tx)); } - }, + } _ => (), } if tx_state == Readiness::Ready { - return Some(best.transaction.transaction) + return Some(best.transaction.transaction); } trace!("[{:?}] Ignoring {:?} transaction.", best.transaction.hash(), tx_state); @@ -636,4 +630,3 @@ impl<'a, T, R, S, L> Iterator for PendingIterator<'a, T, R, S, L> where None } } - diff --git a/transaction-pool/src/ready.rs b/transaction-pool/src/ready.rs index 0bee5188d..45e85ca29 100644 --- a/transaction-pool/src/ready.rs +++ b/transaction-pool/src/ready.rs @@ -35,13 +35,17 @@ pub trait Ready { fn is_ready(&mut self, tx: &T) -> Readiness; } -impl Ready for F where F: FnMut(&T) -> Readiness { +impl Ready for F +where + F: FnMut(&T) -> Readiness, +{ fn is_ready(&mut self, tx: &T) -> Readiness { (*self)(tx) } } -impl Ready for (A, B) where +impl Ready for (A, B) +where A: Ready, B: Ready, { diff --git a/transaction-pool/src/replace.rs b/transaction-pool/src/replace.rs index bc44e8d90..a278edb80 100644 --- a/transaction-pool/src/replace.rs +++ b/transaction-pool/src/replace.rs @@ -16,40 +16,34 @@ //! When queue limits are reached, decide whether to replace an existing transaction from the pool -use crate::{ - pool::Transaction, - scoring::Choice, -}; +use crate::{pool::Transaction, scoring::Choice}; /// Encapsulates a transaction to be compared, along with pooled transactions from the same sender pub struct ReplaceTransaction<'a, T> { - /// The transaction to be compared for replacement - pub transaction: &'a Transaction, - /// Other transactions currently in the pool for the same sender - pub pooled_by_sender: Option<&'a [Transaction]>, + /// The transaction to be compared for replacement + pub transaction: &'a Transaction, + /// Other transactions currently in the pool for the same sender + pub pooled_by_sender: Option<&'a [Transaction]>, } impl<'a, T> ReplaceTransaction<'a, T> { - /// Creates a new `ReplaceTransaction` - pub fn new(transaction: &'a Transaction, pooled_by_sender: Option<&'a [Transaction]>) -> Self { - ReplaceTransaction { - transaction, - pooled_by_sender, - } - } + /// Creates a new `ReplaceTransaction` + pub fn new(transaction: &'a Transaction, pooled_by_sender: Option<&'a [Transaction]>) -> Self { + ReplaceTransaction { transaction, pooled_by_sender } + } } impl<'a, T> ::std::ops::Deref for ReplaceTransaction<'a, T> { - type Target = Transaction; - fn deref(&self) -> &Self::Target { - &self.transaction - } + type Target = Transaction; + fn deref(&self) -> &Self::Target { + &self.transaction + } } /// Chooses whether a new transaction should replace an existing transaction if the pool is full. pub trait ShouldReplace { - /// Decides if `new` should push out `old` transaction from the pool. - /// - /// NOTE returning `InsertNew` here can lead to some transactions being accepted above pool limits. - fn should_replace(&self, old: &ReplaceTransaction<'_, T>, new: &ReplaceTransaction<'_, T>) -> Choice; + /// Decides if `new` should push out `old` transaction from the pool. + /// + /// NOTE returning `InsertNew` here can lead to some transactions being accepted above pool limits. + fn should_replace(&self, old: &ReplaceTransaction<'_, T>, new: &ReplaceTransaction<'_, T>) -> Choice; } diff --git a/transaction-pool/src/scoring.rs b/transaction-pool/src/scoring.rs index c3edbb1ac..313bd6bc3 100644 --- a/transaction-pool/src/scoring.rs +++ b/transaction-pool/src/scoring.rs @@ -16,8 +16,8 @@ //! A transactions ordering abstraction. -use std::{cmp, fmt}; use crate::pool::Transaction; +use std::{cmp, fmt}; /// Represents a decision what to do with /// a new transaction that tries to enter the pool. @@ -100,7 +100,9 @@ pub trait Scoring: fmt::Debug { /// /// If you return `true` for given transaction it's going to be accepted even though /// the per-sender limit is exceeded. - fn should_ignore_sender_limit(&self, _new: &T) -> bool { false } + fn should_ignore_sender_limit(&self, _new: &T) -> bool { + false + } } /// A score with a reference to the transaction. @@ -121,17 +123,13 @@ impl ScoreWithRef { impl Clone for ScoreWithRef { fn clone(&self) -> Self { - ScoreWithRef { - score: self.score.clone(), - transaction: self.transaction.clone(), - } + ScoreWithRef { score: self.score.clone(), transaction: self.transaction.clone() } } } impl Ord for ScoreWithRef { fn cmp(&self, other: &Self) -> cmp::Ordering { - other.score.cmp(&self.score) - .then(self.transaction.insertion_id.cmp(&other.transaction.insertion_id)) + other.score.cmp(&self.score).then(self.transaction.insertion_id.cmp(&other.transaction.insertion_id)) } } @@ -141,7 +139,7 @@ impl PartialOrd for ScoreWithRef { } } -impl PartialEq for ScoreWithRef { +impl PartialEq for ScoreWithRef { fn eq(&self, other: &Self) -> bool { self.score == other.score && self.transaction.insertion_id == other.transaction.insertion_id } @@ -149,19 +147,12 @@ impl PartialEq for ScoreWithRef { impl Eq for ScoreWithRef {} - #[cfg(test)] mod tests { use super::*; fn score(score: u64, insertion_id: u64) -> ScoreWithRef<(), u64> { - ScoreWithRef { - score, - transaction: Transaction { - insertion_id, - transaction: Default::default(), - }, - } + ScoreWithRef { score, transaction: Transaction { insertion_id, transaction: Default::default() } } } #[test] diff --git a/transaction-pool/src/tests/helpers.rs b/transaction-pool/src/tests/helpers.rs index 73d11f9e5..f757ac8d7 100644 --- a/transaction-pool/src/tests/helpers.rs +++ b/transaction-pool/src/tests/helpers.rs @@ -17,9 +17,9 @@ use std::cmp; use std::collections::HashMap; -use ethereum_types::{H160 as Sender, U256}; -use crate::{pool, scoring, Scoring, ShouldReplace, ReplaceTransaction, Ready, Readiness}; use super::Transaction; +use crate::{pool, scoring, Readiness, Ready, ReplaceTransaction, Scoring, ShouldReplace}; +use ethereum_types::{H160 as Sender, U256}; #[derive(Debug, Default)] pub struct DummyScoring { @@ -28,9 +28,7 @@ pub struct DummyScoring { impl DummyScoring { pub fn always_insert() -> Self { - DummyScoring { - always_insert: true, - } + DummyScoring { always_insert: true } } } @@ -54,7 +52,12 @@ impl Scoring for DummyScoring { } } - fn update_scores(&self, txs: &[pool::Transaction], scores: &mut [Self::Score], change: scoring::Change) { + fn update_scores( + &self, + txs: &[pool::Transaction], + scores: &mut [Self::Score], + change: scoring::Change, + ) { if let scoring::Change::Event(_) = change { // In case of event reset all scores to 0 for i in 0..txs.len() { @@ -74,7 +77,11 @@ impl Scoring for DummyScoring { } impl ShouldReplace for DummyScoring { - fn should_replace(&self, old: &ReplaceTransaction<'_, Transaction>, new: &ReplaceTransaction<'_, Transaction>) -> scoring::Choice { + fn should_replace( + &self, + old: &ReplaceTransaction<'_, Transaction>, + new: &ReplaceTransaction<'_, Transaction>, + ) -> scoring::Choice { if self.always_insert { scoring::Choice::InsertNew } else if new.gas_price > old.gas_price { @@ -105,7 +112,7 @@ impl Ready for NonceReady { cmp::Ordering::Equal => { *nonce += 1.into(); Readiness::Ready - }, + } cmp::Ordering::Less => Readiness::Stale, } } diff --git a/transaction-pool/src/tests/mod.rs b/transaction-pool/src/tests/mod.rs index 7cc7c5553..db5ea2885 100644 --- a/transaction-pool/src/tests/mod.rs +++ b/transaction-pool/src/tests/mod.rs @@ -22,8 +22,8 @@ use self::tx_builder::TransactionBuilder; use std::sync::Arc; -use ethereum_types::{H256, U256, Address}; use super::*; +use ethereum_types::{Address, H256, U256}; #[derive(Debug, PartialEq)] pub struct Transaction { @@ -39,9 +39,15 @@ impl VerifiedTransaction for Transaction { type Hash = H256; type Sender = Address; - fn hash(&self) -> &H256 { &self.hash } - fn mem_usage(&self) -> usize { self.mem_usage } - fn sender(&self) -> &Address { &self.sender } + fn hash(&self) -> &H256 { + &self.hash + } + fn mem_usage(&self) -> usize { + self.mem_usage + } + fn sender(&self) -> &Address { + &self.sender + } } pub type SharedTransaction = Arc; @@ -50,15 +56,14 @@ type TestPool = Pool; impl TestPool { pub fn with_limit(max_count: usize) -> Self { - Self::with_options(Options { - max_count, - ..Default::default() - }) + Self::with_options(Options { max_count, ..Default::default() }) } } -fn import, L: Listener>(txq: &mut Pool, tx: Transaction) - -> Result, Error<::Hash>> { +fn import, L: Listener>( + txq: &mut Pool, + tx: Transaction, +) -> Result, Error<::Hash>> { txq.import(tx, &mut DummyScoring::default()) } @@ -67,32 +72,20 @@ fn should_clear_queue() { // given let b = TransactionBuilder::default(); let mut txq = TestPool::default(); - assert_eq!(txq.light_status(), LightStatus { - mem_usage: 0, - transaction_count: 0, - senders: 0, - }); + assert_eq!(txq.light_status(), LightStatus { mem_usage: 0, transaction_count: 0, senders: 0 }); let tx1 = b.tx().nonce(0).new(); let tx2 = b.tx().nonce(1).mem_usage(1).new(); // add import(&mut txq, tx1).unwrap(); import(&mut txq, tx2).unwrap(); - assert_eq!(txq.light_status(), LightStatus { - mem_usage: 1, - transaction_count: 2, - senders: 1, - }); + assert_eq!(txq.light_status(), LightStatus { mem_usage: 1, transaction_count: 2, senders: 1 }); // when txq.clear(); // then - assert_eq!(txq.light_status(), LightStatus { - mem_usage: 0, - transaction_count: 0, - senders: 0, - }); + assert_eq!(txq.light_status(), LightStatus { mem_usage: 0, transaction_count: 0, senders: 0 }); } #[test] @@ -130,10 +123,7 @@ fn should_replace_transaction() { #[test] fn should_reject_if_above_count() { let b = TransactionBuilder::default(); - let mut txq = TestPool::with_options(Options { - max_count: 1, - ..Default::default() - }); + let mut txq = TestPool::with_options(Options { max_count: 1, ..Default::default() }); // Reject second let tx1 = b.tx().nonce(0).new(); @@ -156,10 +146,7 @@ fn should_reject_if_above_count() { #[test] fn should_reject_if_above_mem_usage() { let b = TransactionBuilder::default(); - let mut txq = TestPool::with_options(Options { - max_mem_usage: 1, - ..Default::default() - }); + let mut txq = TestPool::with_options(Options { max_mem_usage: 1, ..Default::default() }); // Reject second let tx1 = b.tx().nonce(1).mem_usage(1).new(); @@ -182,10 +169,7 @@ fn should_reject_if_above_mem_usage() { #[test] fn should_reject_if_above_sender_count() { let b = TransactionBuilder::default(); - let mut txq = TestPool::with_options(Options { - max_per_sender: 1, - ..Default::default() - }); + let mut txq = TestPool::with_options(Options { max_per_sender: 1, ..Default::default() }); // Reject second let tx1 = b.tx().nonce(1).new(); @@ -234,16 +218,8 @@ fn should_construct_pending() { import(&mut txq, b.tx().sender(1).nonce(5).new()).unwrap(); assert_eq!(txq.light_status().transaction_count, 11); - assert_eq!(txq.status(NonceReady::default()), Status { - stalled: 0, - pending: 9, - future: 2, - }); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 3, - pending: 6, - future: 2, - }); + assert_eq!(txq.status(NonceReady::default()), Status { stalled: 0, pending: 9, future: 2 }); + assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 3, pending: 6, future: 2 }); // when let mut current_gas = U256::zero(); @@ -306,16 +282,8 @@ fn should_return_unordered_iterator() { let tx9 = import(&mut txq, b.tx().sender(2).nonce(0).new()).unwrap(); assert_eq!(txq.light_status().transaction_count, 11); - assert_eq!(txq.status(NonceReady::default()), Status { - stalled: 0, - pending: 9, - future: 2, - }); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 3, - pending: 6, - future: 2, - }); + assert_eq!(txq.status(NonceReady::default()), Status { stalled: 0, pending: 9, future: 2 }); + assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 3, pending: 6, future: 2 }); // when let all: Vec<_> = txq.unordered_pending(NonceReady::default()).collect(); @@ -333,7 +301,9 @@ fn should_return_unordered_iterator() { vec![chain3.clone(), chain2.clone(), chain1.clone()], vec![chain3.clone(), chain1.clone(), chain2.clone()], vec![chain1.clone(), chain3.clone(), chain2.clone()], - ].into_iter().map(|mut v| { + ] + .into_iter() + .map(|mut v| { let mut first = v.pop().unwrap(); for mut x in v { first.append(&mut x); @@ -370,16 +340,8 @@ fn should_update_scoring_correctly() { import(&mut txq, b.tx().sender(1).nonce(5).new()).unwrap(); assert_eq!(txq.light_status().transaction_count, 11); - assert_eq!(txq.status(NonceReady::default()), Status { - stalled: 0, - pending: 9, - future: 2, - }); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 3, - pending: 6, - future: 2, - }); + assert_eq!(txq.status(NonceReady::default()), Status { stalled: 0, pending: 9, future: 2 }); + assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 3, pending: 6, future: 2 }); txq.update_scores(&Address::zero(), ()); @@ -441,26 +403,14 @@ fn should_cull_stalled_transactions() { import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); import(&mut txq, b.tx().sender(1).nonce(5).new()).unwrap(); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 2, - pending: 2, - future: 2, - }); + assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 2, pending: 2, future: 2 }); // when assert_eq!(txq.cull(None, NonceReady::new(1)), 2); // then - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 0, - pending: 2, - future: 2, - }); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 4, - senders: 2, - mem_usage: 0, - }); + assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 0, pending: 2, future: 2 }); + assert_eq!(txq.light_status(), LightStatus { transaction_count: 4, senders: 2, mem_usage: 0 }); } #[test] @@ -476,27 +426,15 @@ fn should_cull_stalled_transactions_from_a_sender() { import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); import(&mut txq, b.tx().sender(1).nonce(2).new()).unwrap(); - assert_eq!(txq.status(NonceReady::new(2)), Status { - stalled: 4, - pending: 1, - future: 0, - }); + assert_eq!(txq.status(NonceReady::new(2)), Status { stalled: 4, pending: 1, future: 0 }); // when let sender = Address::zero(); assert_eq!(txq.cull(Some(&[sender]), NonceReady::new(2)), 2); // then - assert_eq!(txq.status(NonceReady::new(2)), Status { - stalled: 2, - pending: 1, - future: 0, - }); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 3, - senders: 1, - mem_usage: 0, - }); + assert_eq!(txq.status(NonceReady::new(2)), Status { stalled: 2, pending: 1, future: 0 }); + assert_eq!(txq.light_status(), LightStatus { transaction_count: 3, senders: 1, mem_usage: 0 }); } #[test] @@ -509,27 +447,15 @@ fn should_re_insert_after_cull() { import(&mut txq, b.tx().nonce(1).new()).unwrap(); import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 2, - pending: 2, - future: 0, - }); + assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 2, pending: 2, future: 0 }); // when assert_eq!(txq.cull(None, NonceReady::new(1)), 2); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 0, - pending: 2, - future: 0, - }); + assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 0, pending: 2, future: 0 }); import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - assert_eq!(txq.status(NonceReady::new(1)), Status { - stalled: 2, - pending: 2, - future: 0, - }); + assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 2, pending: 2, future: 0 }); } #[test] @@ -568,26 +494,15 @@ fn should_return_is_full() { fn should_import_even_if_limit_is_reached_and_should_replace_returns_insert_new() { // given let b = TransactionBuilder::default(); - let mut txq = TestPool::with_scoring(DummyScoring::always_insert(), Options { - max_count: 1, - ..Default::default() - }); + let mut txq = TestPool::with_scoring(DummyScoring::always_insert(), Options { max_count: 1, ..Default::default() }); txq.import(b.tx().nonce(0).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 1, - senders: 1, - mem_usage: 0, - }); + assert_eq!(txq.light_status(), LightStatus { transaction_count: 1, senders: 1, mem_usage: 0 }); // when txq.import(b.tx().nonce(1).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); // then - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 2, - senders: 1, - mem_usage: 0, - }); + assert_eq!(txq.light_status(), LightStatus { transaction_count: 2, senders: 1, mem_usage: 0 }); } #[test] @@ -596,16 +511,9 @@ fn should_not_import_even_if_limit_is_reached_and_should_replace_returns_false() // given let b = TransactionBuilder::default(); - let mut txq = TestPool::with_scoring(DummyScoring::default(), Options { - max_count: 1, - ..Default::default() - }); + let mut txq = TestPool::with_scoring(DummyScoring::default(), Options { max_count: 1, ..Default::default() }); import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 1, - senders: 1, - mem_usage: 0, - }); + assert_eq!(txq.light_status(), LightStatus { transaction_count: 1, senders: 1, mem_usage: 0 }); // when let err = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap_err(); @@ -618,44 +526,31 @@ fn should_not_import_even_if_limit_is_reached_and_should_replace_returns_false() "0x5".into() ) ); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 1, - senders: 1, - mem_usage: 0, - }); + assert_eq!(txq.light_status(), LightStatus { transaction_count: 1, senders: 1, mem_usage: 0 }); } #[test] fn should_import_even_if_sender_limit_is_reached() { // given let b = TransactionBuilder::default(); - let mut txq = TestPool::with_scoring(DummyScoring::always_insert(), Options { - max_count: 1, - max_per_sender: 1, - ..Default::default() - }); + let mut txq = TestPool::with_scoring( + DummyScoring::always_insert(), + Options { max_count: 1, max_per_sender: 1, ..Default::default() }, + ); txq.import(b.tx().nonce(0).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 1, - senders: 1, - mem_usage: 0, - }); + assert_eq!(txq.light_status(), LightStatus { transaction_count: 1, senders: 1, mem_usage: 0 }); // when txq.import(b.tx().nonce(1).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); // then - assert_eq!(txq.light_status(), LightStatus { - transaction_count: 2, - senders: 1, - mem_usage: 0, - }); + assert_eq!(txq.light_status(), LightStatus { transaction_count: 2, senders: 1, mem_usage: 0 }); } mod listener { use std::cell::RefCell; - use std::rc::Rc; use std::fmt; + use std::rc::Rc; use super::*; @@ -693,11 +588,11 @@ mod listener { let b = TransactionBuilder::default(); let listener = MyListener::default(); let results = listener.0.clone(); - let mut txq = Pool::new(listener, DummyScoring::default(), Options { - max_per_sender: 1, - max_count: 2, - ..Default::default() - }); + let mut txq = Pool::new( + listener, + DummyScoring::default(), + Options { max_per_sender: 1, max_count: 2, ..Default::default() }, + ); assert!(results.borrow().is_empty()); // Regular import diff --git a/transaction-pool/src/tests/tx_builder.rs b/transaction-pool/src/tests/tx_builder.rs index dae2bb248..83f7b13a0 100644 --- a/transaction-pool/src/tests/tx_builder.rs +++ b/transaction-pool/src/tests/tx_builder.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use super::{Transaction, U256, H256, Address}; +use super::{Address, Transaction, H256, U256}; use ethereum_types::BigEndianHash; #[derive(Debug, Default, Clone)] @@ -52,7 +52,9 @@ impl TransactionBuilder { } pub fn new(self) -> Transaction { - let hash: U256 = self.nonce ^ (U256::from(100) * self.gas_price) ^ (U256::from(100_000) * U256::from(self.sender.to_low_u64_be())); + let hash: U256 = self.nonce + ^ (U256::from(100) * self.gas_price) + ^ (U256::from(100_000) * U256::from(self.sender.to_low_u64_be())); Transaction { hash: H256::from_uint(&hash), nonce: self.nonce, diff --git a/transaction-pool/src/transactions.rs b/transaction-pool/src/transactions.rs index 8256bf33a..5fc963d78 100644 --- a/transaction-pool/src/transactions.rs +++ b/transaction-pool/src/transactions.rs @@ -16,31 +16,22 @@ use std::{fmt, mem}; -use smallvec::SmallVec; use log::warn; +use smallvec::SmallVec; use crate::{ - ready::{Ready, Readiness}, - scoring::{self, Scoring}, pool::Transaction, + ready::{Readiness, Ready}, + scoring::{self, Scoring}, }; #[derive(Debug)] pub enum AddResult { Ok(T), TooCheapToEnter(T, S), - TooCheap { - old: T, - new: T, - }, - Replaced { - old: T, - new: T, - }, - PushedOut { - old: T, - new: T, - }, + TooCheap { old: T, new: T }, + Replaced { old: T, new: T }, + PushedOut { old: T, new: T }, } /// Represents all transactions from a particular sender ordered by nonce. @@ -54,10 +45,7 @@ pub struct Transactions> { impl> Default for Transactions { fn default() -> Self { - Transactions { - transactions: Default::default(), - scores: Default::default(), - } + Transactions { transactions: Default::default(), scores: Default::default() } } } @@ -96,7 +84,12 @@ impl> Transactions { }) } - fn push_cheapest_transaction(&mut self, tx: Transaction, scoring: &S, max_count: usize) -> AddResult, S::Score> { + fn push_cheapest_transaction( + &mut self, + tx: Transaction, + scoring: &S, + max_count: usize, + ) -> AddResult, S::Score> { let index = self.transactions.len(); if index == max_count && !scoring.should_ignore_sender_limit(&tx) { let min_score = self.scores[index - 1].clone(); @@ -122,16 +115,13 @@ impl> Transactions { // Insert at the end. if index == self.transactions.len() { - return self.push_cheapest_transaction(new, scoring, max_count) + return self.push_cheapest_transaction(new, scoring, max_count); } // Decide if the transaction should replace some other. match scoring.choose(&self.transactions[index], &new) { // New transaction should be rejected - scoring::Choice::RejectNew => AddResult::TooCheap { - old: self.transactions[index].clone(), - new, - }, + scoring::Choice::RejectNew => AddResult::TooCheap { old: self.transactions[index].clone(), new }, // New transaction should be kept along with old ones. scoring::Choice::InsertNew => { self.transactions.insert(index, new.clone()); @@ -141,26 +131,24 @@ impl> Transactions { if self.transactions.len() > max_count { let old = self.transactions.pop().expect("len is non-zero"); self.scores.pop(); - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::RemovedAt(self.transactions.len())); + scoring.update_scores( + &self.transactions, + &mut self.scores, + scoring::Change::RemovedAt(self.transactions.len()), + ); - AddResult::PushedOut { - old, - new, - } + AddResult::PushedOut { old, new } } else { AddResult::Ok(new) } - }, + } // New transaction is replacing some other transaction already in the queue. scoring::Choice::ReplaceOld => { let old = mem::replace(&mut self.transactions[index], new.clone()); scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::ReplacedAt(index)); - AddResult::Replaced { - old, - new, - } - }, + AddResult::Replaced { old, new } + } } } @@ -170,7 +158,7 @@ impl> Transactions { Err(_) => { warn!("Attempting to remove non-existent transaction {:?}", tx); return false; - }, + } }; self.transactions.remove(index); @@ -191,7 +179,7 @@ impl> Transactions { match ready.is_ready(tx) { Readiness::Stale => { first_non_stalled += 1; - }, + } Readiness::Ready | Readiness::Future => break, } } @@ -207,7 +195,7 @@ impl> Transactions { for _ in 0..first_non_stalled { self.scores.pop(); result.push( - self.transactions.pop().expect("first_non_stalled is never greater than transactions.len(); qed") + self.transactions.pop().expect("first_non_stalled is never greater than transactions.len(); qed"), ); } diff --git a/triehash/CHANGELOG.md b/triehash/CHANGELOG.md new file mode 100644 index 000000000..67629508f --- /dev/null +++ b/triehash/CHANGELOG.md @@ -0,0 +1,12 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.8.1] - 2019-10-24 +- Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/214) +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/triehash/benches/triehash.rs b/triehash/benches/triehash.rs index 8930f473e..fea116a46 100644 --- a/triehash/benches/triehash.rs +++ b/triehash/benches/triehash.rs @@ -42,7 +42,7 @@ fn random_bytes(min_count: usize, diff_count: usize, seed: &mut H256) -> Vec fn random_value(seed: &mut H256) -> Vec { *seed = H256(keccak256(seed.as_bytes())); match seed[0] % 2 { - 1 => vec![seed[31];1], + 1 => vec![seed[31]; 1], _ => seed.as_bytes().to_vec(), } } diff --git a/triehash/src/lib.rs b/triehash/src/lib.rs index 94bcbfaed..41b2a0d17 100644 --- a/triehash/src/lib.rs +++ b/triehash/src/lib.rs @@ -26,10 +26,7 @@ use hash_db::Hasher; use rlp::RlpStream; fn shared_prefix_len(first: &[T], second: &[T]) -> usize { - first.iter() - .zip(second.iter()) - .position(|(f, s)| f != s) - .unwrap_or_else(|| cmp::min(first.len(), second.len())) + first.iter().zip(second.iter()).position(|(f, s)| f != s).unwrap_or_else(|| cmp::min(first.len(), second.len())) } /// Generates a trie root hash for a vector of values @@ -90,9 +87,7 @@ where ::Out: cmp::Ord, { // first put elements into btree to sort them and to remove duplicates - let input = input - .into_iter() - .collect::>(); + let input = input.into_iter().collect::>(); let mut nibbles = Vec::with_capacity(input.keys().map(|k| k.as_ref().len()).sum::() * 2); let mut lens = Vec::with_capacity(input.len() + 1); @@ -106,9 +101,7 @@ where } // then move them to a vector - let input = input.into_iter().zip(lens.windows(2)) - .map(|((_, v), w)| (&nibbles[w[0]..w[1]], v)) - .collect::>(); + let input = input.into_iter().zip(lens.windows(2)).map(|((_, v), w)| (&nibbles[w[0]..w[1]], v)).collect::>(); let mut stream = RlpStream::new(); hash256rlp::(&input, 0, &mut stream); @@ -209,13 +202,12 @@ where } // get length of the longest shared prefix in slice keys - let shared_prefix = input.iter() + let shared_prefix = input + .iter() // skip first tuple .skip(1) // get minimum number of shared nibbles between first and each successive - .fold(key.len(), | acc, &(ref k, _) | { - cmp::min(shared_prefix_len(key, k.as_ref()), acc) - }); + .fold(key.len(), |acc, &(ref k, _)| cmp::min(shared_prefix_len(key, k.as_ref()), acc)); // if shared prefix is higher than current prefix append its // new part of the key to the stream @@ -237,17 +229,15 @@ where // iterate over all possible nibbles for i in 0..16 { // count how many successive elements have same next nibble - let len = input - .iter() - .skip(begin) - .take_while(|pair| pair.0.as_ref()[pre_len] == i) - .count(); + let len = input.iter().skip(begin).take_while(|pair| pair.0.as_ref()[pre_len] == i).count(); // if at least 1 successive element has the same nibble // append their suffixes match len { - 0 => { stream.append_empty_data(); }, - _ => hash256aux::(&input[begin..(begin + len)], pre_len + 1, stream) + 0 => { + stream.append_empty_data(); + } + _ => hash256aux::(&input[begin..(begin + len)], pre_len + 1, stream), } begin += len; } @@ -271,16 +261,16 @@ where let out = s.out(); match out.len() { 0..=31 => stream.append_raw(&out, 1), - _ => stream.append(&H::hash(&out).as_ref()) + _ => stream.append(&H::hash(&out).as_ref()), }; } #[cfg(test)] mod tests { - use super::{trie_root, shared_prefix_len, hex_prefix_encode}; - use keccak_hasher::KeccakHasher; + use super::{hex_prefix_encode, shared_prefix_len, trie_root}; use ethereum_types::H256; use hex_literal::hex; + use keccak_hasher::KeccakHasher; #[test] fn test_hex_prefix_encode() { @@ -318,9 +308,10 @@ mod tests { #[test] fn simple_test() { assert_eq!( - trie_root::(vec![ - (b"A", b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" as &[u8]) - ]), + trie_root::(vec![( + b"A", + b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" as &[u8] + )]), H256::from(hex!("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")).as_ref(), ); } @@ -343,22 +334,22 @@ mod tests { #[test] fn test_shared_prefix() { - let a = vec![1,2,3,4,5,6]; - let b = vec![4,2,3,4,5,6]; + let a = vec![1, 2, 3, 4, 5, 6]; + let b = vec![4, 2, 3, 4, 5, 6]; assert_eq!(shared_prefix_len(&a, &b), 0); } #[test] fn test_shared_prefix2() { - let a = vec![1,2,3,3,5]; - let b = vec![1,2,3]; + let a = vec![1, 2, 3, 3, 5]; + let b = vec![1, 2, 3]; assert_eq!(shared_prefix_len(&a, &b), 3); } #[test] fn test_shared_prefix3() { - let a = vec![1,2,3,4,5,6]; - let b = vec![1,2,3,4,5,6]; + let a = vec![1, 2, 3, 4, 5, 6]; + let b = vec![1, 2, 3, 4, 5, 6]; assert_eq!(shared_prefix_len(&a, &b), 6); } } diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md new file mode 100644 index 000000000..a4214eaf9 --- /dev/null +++ b/uint/CHANGELOG.md @@ -0,0 +1,16 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.8.2] - 2019-10-24 +### Fixed +- Fixed 2018 edition imports (https://github.com/paritytech/parity-common/pull/237) +- Removed `uninitialized` usage (https://github.com/paritytech/parity-common/pull/238) +### Dependencies +- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +### Changed +- Modified AsRef impl (https://github.com/paritytech/parity-common/pull/196) diff --git a/uint/benches/bigint.rs b/uint/benches/bigint.rs index 61f24e93d..ea0284cdd 100644 --- a/uint/benches/bigint.rs +++ b/uint/benches/bigint.rs @@ -12,7 +12,6 @@ //! rustup run cargo bench //! ``` - use criterion::{criterion_group, criterion_main}; use uint::{construct_uint, uint_full_mul_reg}; @@ -107,7 +106,7 @@ fn u128_div(c: &mut Criterion) { black_box(x / u128::from(*z)) }) }, - vec![(0u64, u64::max_value(), 100u64), (u64::max_value(), u64::max_value(), 99), (42, 42, 100500)], + vec![(0u64, u64::max_value(), 100u64), (u64::max_value(), u64::max_value(), 99), (42, 42, 100500)], ), ); } @@ -159,10 +158,7 @@ fn u256_mul(c: &mut Criterion) { vec![ (U256::max_value(), 1u64), (U256::from(3), u64::max_value()), - ( - U256::from_dec_str("21674844646682989462120101885968193938394323990565507610662749").unwrap(), - 173, - ), + (U256::from_dec_str("21674844646682989462120101885968193938394323990565507610662749").unwrap(), 173), ], ), ); @@ -248,14 +244,8 @@ fn u256_rem(c: &mut Criterion) { U256([2096410819092764509, 8483673822214032535, 36306297304129857, 3453]), ), ( - U256::from_str( - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", - ) - .unwrap(), - U256::from_str( - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0", - ) - .unwrap(), + U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0").unwrap(), ), ], ), @@ -267,53 +257,31 @@ fn u512_pairs() -> Vec<(U512, U512)> { (U512::from(1u64), U512::from(0u64)), (U512::from(u64::max_value()), U512::from(u64::from(u32::max_value()) + 1)), ( - U512([ - 12767554894655550452, - 16333049135534778834, - 140317443000293558, - 598963, - 0, - 0, - 0, - 0, - ]), + U512([12767554894655550452, 16333049135534778834, 140317443000293558, 598963, 0, 0, 0, 0]), U512([0, 0, 0, 0, 2096410819092764509, 8483673822214032535, 36306297304129857, 3453]), ), ( - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") - .unwrap(), - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0") - .unwrap(), + U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0").unwrap(), ), ] } fn u512_add(c: &mut Criterion) { - c.bench( - "u512_add", - ParameterizedBenchmark::new("", |b, (x, y)| b.iter(|| black_box(x + y)), u512_pairs()), - ); + c.bench("u512_add", ParameterizedBenchmark::new("", |b, (x, y)| b.iter(|| black_box(x + y)), u512_pairs())); } fn u512_sub(c: &mut Criterion) { c.bench( "u512_sub", - ParameterizedBenchmark::new( - "", - |b, (x, y)| b.iter(|| black_box(x.overflowing_sub(*y).0)), - u512_pairs(), - ), + ParameterizedBenchmark::new("", |b, (x, y)| b.iter(|| black_box(x.overflowing_sub(*y).0)), u512_pairs()), ); } fn u512_mul(c: &mut Criterion) { c.bench( "u512_mul", - ParameterizedBenchmark::new( - "", - |b, (x, y)| b.iter(|| black_box(x.overflowing_mul(*y).0)), - u512_pairs(), - ), + ParameterizedBenchmark::new("", |b, (x, y)| b.iter(|| black_box(x.overflowing_mul(*y).0)), u512_pairs()), ); } @@ -368,12 +336,8 @@ fn u512_rem(c: &mut Criterion) { fn conversions(c: &mut Criterion) { c.bench( "conversions biguint vs gmp", - ParameterizedBenchmark::new( - "BigUint", - |b, i| bench_convert_to_biguit(b, *i), - vec![0, 42, u64::max_value()], - ) - .with_function("gmp", |b, i| bench_convert_to_gmp(b, *i)), + ParameterizedBenchmark::new("BigUint", |b, i| bench_convert_to_biguit(b, *i), vec![0, 42, u64::max_value()]) + .with_function("gmp", |b, i| bench_convert_to_gmp(b, *i)), ); } @@ -405,19 +369,13 @@ fn u512_mul_u32_vs_u64(c: &mut Criterion) { } fn bench_u512_mul_u32(b: &mut Bencher, i: u32) { - let x = - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - b.iter(|| { - black_box(x * i) - }); + let x = U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + b.iter(|| black_box(x * i)); } fn bench_u512_mul_u64(b: &mut Bencher, i: u64) { - let x = - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - b.iter(|| { - black_box(x * i) - }); + let x = U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + b.iter(|| black_box(x * i)); } fn mulmod_u512_vs_biguint_vs_gmp(c: &mut Criterion) { @@ -436,10 +394,8 @@ fn mulmod_u512_vs_biguint_vs_gmp(c: &mut Criterion) { } fn bench_biguint_mulmod(b: &mut Bencher, z: U256) { - let x = - U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - let y = - U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let x = U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let y = U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); b.iter(|| { let w = to_biguint(x) * to_biguint(y); black_box(from_biguint(w % to_biguint(z))) @@ -447,10 +403,8 @@ fn bench_biguint_mulmod(b: &mut Bencher, z: U256) { } fn bench_gmp_mulmod(b: &mut Bencher, z: U256) { - let x = - U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - let y = - U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let x = U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let y = U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); b.iter(|| { let w = to_gmp(x) * to_gmp(y); black_box(from_gmp(w % to_gmp(z))) @@ -458,10 +412,8 @@ fn bench_gmp_mulmod(b: &mut Bencher, z: U256) { } fn bench_u512_mulmod(b: &mut Bencher, z: U256) { - let x = - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - let y = - U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let x = U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + let y = U512::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); let z = U512([z.0[0], z.0[1], z.0[2], z.0[3], 0, 0, 0, 0]); b.iter(|| { let w = x.overflowing_mul(y).0; @@ -472,9 +424,7 @@ fn bench_u512_mulmod(b: &mut Bencher, z: U256) { // NOTE: uses native `u128` and does not measure this crates performance, // but might be interesting as a comparison. fn u128_mul(c: &mut Criterion) { - c.bench_function("u128_mul", |b| { - b.iter(|| black_box(12345u128 * u128::from(u64::max_value()))) - }); + c.bench_function("u128_mul", |b| b.iter(|| black_box(12345u128 * u128::from(u64::max_value())))); } fn u256_bit_and(c: &mut Criterion) { @@ -658,8 +608,8 @@ fn u256_from_le(c: &mut Criterion) { c.bench_function("u256_from_le", |b| { b.iter(|| { let raw = black_box([ - 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, - 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, + 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, + 101, 103, 107, 109, 113, 127, ]); black_box(U256::from_little_endian(&raw[..])) }) @@ -670,8 +620,8 @@ fn u256_from_be(c: &mut Criterion) { c.bench_function("u256_from_be", |b| { b.iter(|| { let raw = black_box([ - 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, - 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, + 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, + 101, 103, 107, 109, 113, 127, ]); black_box(U256::from_big_endian(&raw[..])) }) @@ -680,14 +630,11 @@ fn u256_from_be(c: &mut Criterion) { fn from_fixed_array(c: &mut Criterion) { let ary512: [u8; 64] = [ - 255, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 0, 0, 0, 0, 0, 213, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45, 0, 0, 67, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 123, - ]; - let ary256: [u8; 32] = [ - 255, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 0, 0, 0, 0, 0, 213, 0, 0, - 0, 0, 0, 0, + 255, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 0, 0, 0, 0, 0, 213, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45, 0, 0, 67, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 123, ]; + let ary256: [u8; 32] = + [255, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 0, 0, 0, 0, 0, 213, 0, 0, 0, 0, 0, 0]; c.bench_function("from_fixed_array", move |b| { b.iter(|| { let _: U512 = black_box(ary512.into()); diff --git a/uint/examples/modular.rs b/uint/examples/modular.rs index 1364bd766..abc754f82 100644 --- a/uint/examples/modular.rs +++ b/uint/examples/modular.rs @@ -6,8 +6,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - - #[macro_use] extern crate uint; @@ -21,9 +19,8 @@ fn main() { // imagine the field 0..p // where the p is defined below // (it's a prime!) - let p = U256::from_dec_str( - "38873241744847760218045702002058062581688990428170398542849190507947196700873" - ).expect("p to be a good number in the example"); + let p = U256::from_dec_str("38873241744847760218045702002058062581688990428170398542849190507947196700873") + .expect("p to be a good number in the example"); // then, on this field, // (p-1) + (p+1) = 0 @@ -50,7 +47,7 @@ fn main() { let multiplicator = 3; let mul = { let mut result = p_minus_1; - for _ in 0..multiplicator-1 { + for _ in 0..multiplicator - 1 { result = (p_minus_1 + result) % p; } result diff --git a/uint/src/lib.rs b/uint/src/lib.rs index e72c32d5f..bba720be8 100644 --- a/uint/src/lib.rs +++ b/uint/src/lib.rs @@ -21,11 +21,11 @@ pub use core as core_; #[doc(hidden)] pub use rustc_hex; -#[cfg(feature="quickcheck")] +#[cfg(feature = "quickcheck")] #[doc(hidden)] pub use qc; -#[cfg(feature="quickcheck")] +#[cfg(feature = "quickcheck")] #[doc(hidden)] pub use rand; @@ -35,5 +35,6 @@ pub use static_assertions; pub use crunchy::unroll; #[macro_use] +#[rustfmt::skip] mod uint; pub use crate::uint::*; diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 274cc529c..64990bf5c 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -47,7 +47,7 @@ macro_rules! impl_map_from { From::from(value as $to) } } - } + }; } #[macro_export] @@ -61,27 +61,32 @@ macro_rules! impl_try_from_for_primitive { fn try_from(u: $from) -> $crate::core_::result::Result<$to, &'static str> { let $from(arr) = u; if !u.fits_word() || arr[0] > <$to>::max_value() as u64 { - Err(concat!("integer overflow when casting to ", stringify!($to))) + Err(concat!( + "integer overflow when casting to ", + stringify!($to) + )) } else { Ok(arr[0] as $to) } } } - } + }; } #[macro_export] #[doc(hidden)] macro_rules! uint_overflowing_binop { - ($name:ident, $n_words: tt, $self_expr: expr, $other: expr, $fn:expr) => ({ - use $crate::{core_ as core}; + ($name:ident, $n_words: tt, $self_expr: expr, $other: expr, $fn:expr) => {{ + use $crate::core_ as core; let $name(ref me) = $self_expr; let $name(ref you) = $other; let mut ret = [0u64; $n_words]; let ret_ptr = &mut ret as *mut [u64; $n_words] as *mut u64; let mut carry = 0u64; - $crate::static_assertions::const_assert!(core::isize::MAX as usize / core::mem::size_of::() > $n_words); + $crate::static_assertions::const_assert!( + core::isize::MAX as usize / core::mem::size_of::() > $n_words + ); // `unroll!` is recursive, but doesn’t use `$crate::unroll`, so we need to ensure that it // is in scope unqualified. @@ -113,7 +118,7 @@ macro_rules! uint_overflowing_binop { } ($name(ret), carry > 0) - }) + }}; } #[macro_export] @@ -125,57 +130,60 @@ macro_rules! uint_full_mul_reg { ($name:ident, $n_words:tt, $self_expr:expr, $other:expr) => { $crate::uint_full_mul_reg!($name, $n_words, $self_expr, $other, |_, _| true); }; - ($name:ident, $n_words:tt, $self_expr:expr, $other:expr, $check:expr) => ({{ - #![allow(unused_assignments)] + ($name:ident, $n_words:tt, $self_expr:expr, $other:expr, $check:expr) => {{ + { + #![allow(unused_assignments)] - let $name(ref me) = $self_expr; - let $name(ref you) = $other; - let mut ret = [0u64; $n_words * 2]; + let $name(ref me) = $self_expr; + let $name(ref you) = $other; + let mut ret = [0u64; $n_words * 2]; - use $crate::unroll; - unroll! { - for i in 0..$n_words { - let mut carry = 0u64; - let b = you[i]; - - unroll! { - for j in 0..$n_words { - if $check(me[j], carry) { - let a = me[j]; - - let (hi, low) = Self::split_u128(a as u128 * b as u128); - - let overflow = { - let existing_low = &mut ret[i + j]; - let (low, o) = low.overflowing_add(*existing_low); - *existing_low = low; - o - }; - - carry = { - let existing_hi = &mut ret[i + j + 1]; - let hi = hi + overflow as u64; - let (hi, o0) = hi.overflowing_add(carry); - let (hi, o1) = hi.overflowing_add(*existing_hi); - *existing_hi = hi; - - (o0 | o1) as u64 + use $crate::unroll; + unroll! { + for i in 0..$n_words { + let mut carry = 0u64; + let b = you[i]; + + unroll! { + for j in 0..$n_words { + if $check(me[j], carry) { + let a = me[j]; + + let (hi, low) = Self::split_u128(a as u128 * b as u128); + + let overflow = { + let existing_low = &mut ret[i + j]; + let (low, o) = low.overflowing_add(*existing_low); + *existing_low = low; + o + }; + + carry = { + let existing_hi = &mut ret[i + j + 1]; + let hi = hi + overflow as u64; + let (hi, o0) = hi.overflowing_add(carry); + let (hi, o1) = hi.overflowing_add(*existing_hi); + *existing_hi = hi; + + (o0 | o1) as u64 + } } } } } } - } - ret - }}); + ret + } + }}; } #[macro_export] #[doc(hidden)] macro_rules! uint_overflowing_mul { - ($name:ident, $n_words: tt, $self_expr: expr, $other: expr) => ({ - let ret: [u64; $n_words * 2] = $crate::uint_full_mul_reg!($name, $n_words, $self_expr, $other); + ($name:ident, $n_words: tt, $self_expr: expr, $other: expr) => {{ + let ret: [u64; $n_words * 2] = + $crate::uint_full_mul_reg!($name, $n_words, $self_expr, $other); // The safety of this is enforced by the compiler let ret: [[u64; $n_words]; 2] = unsafe { $crate::core_::mem::transmute(ret) }; @@ -196,25 +204,21 @@ macro_rules! uint_overflowing_mul { } ($name(ret[0]), any_nonzero(&ret[1])) - }) + }}; } #[macro_export] #[doc(hidden)] macro_rules! overflowing { - ($op: expr, $overflow: expr) => ( - { - let (overflow_x, overflow_overflow) = $op; - $overflow |= overflow_overflow; - overflow_x - } - ); - ($op: expr) => ( - { - let (overflow_x, _overflow_overflow) = $op; - overflow_x - } - ); + ($op: expr, $overflow: expr) => {{ + let (overflow_x, overflow_overflow) = $op; + $overflow |= overflow_overflow; + overflow_x + }}; + ($op: expr) => {{ + let (overflow_x, _overflow_overflow) = $op; + overflow_x + }}; } #[macro_export] @@ -223,8 +227,8 @@ macro_rules! panic_on_overflow { ($name: expr) => { if $name { panic!("arithmetic operation overflow") - } - } + } + }; } #[macro_export] @@ -281,7 +285,7 @@ macro_rules! impl_mul_from { *self = result } } - } + }; } #[macro_export] @@ -334,7 +338,7 @@ macro_rules! impl_mul_for_primitive { *self = result } } - } + }; } #[macro_export] @@ -1561,14 +1565,14 @@ macro_rules! impl_std_for_uint { s.parse().unwrap() } } - } + }; } #[cfg(not(feature = "std"))] #[macro_export] #[doc(hidden)] macro_rules! impl_std_for_uint { - ($name: ident, $n_words: tt) => {} + ($name: ident, $n_words: tt) => {}; } #[cfg(feature = "quickcheck")] @@ -1602,12 +1606,12 @@ macro_rules! impl_quickcheck_arbitrary_for_uint { res.as_ref().into() } } - } + }; } #[cfg(not(feature = "quickcheck"))] #[macro_export] #[doc(hidden)] macro_rules! impl_quickcheck_arbitrary_for_uint { - ($uint: ty, $n_bytes: tt) => {} + ($uint: ty, $n_bytes: tt) => {}; } diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 0dedc0723..ddeb747c3 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -6,11 +6,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use core::u64::MAX; -use core::str::FromStr; use core::convert::TryInto; -use uint::{FromDecStrErr, construct_uint, overflowing}; +use core::str::FromStr; +use core::u64::MAX; use crunchy::unroll; +use uint::{construct_uint, overflowing, FromDecStrErr}; construct_uint! { pub struct U256(4); @@ -77,22 +77,20 @@ fn uint256_from() { assert_eq!(U256([0x1010, 0, 0, 0]), U256::from(&[0x10u8, 0x10][..])); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from(&[0x12u8, 0xf0][..])); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from(&[0, 0x12u8, 0xf0][..])); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from(&[0, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from(&[1, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from(&[0, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from(&[1, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from(& - [ - 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, - 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, - 0, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0 + U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), + U256::from( + &[ + 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0 ][..] ) ); assert_eq!( U256([0x00192437100019fa, 0x243710, 0, 0]), - U256::from(&[0x24u8, 0x37, 0x10,0, 0x19, 0x24, 0x37, 0x10, 0, 0x19, 0xfa][..]) + U256::from(&[0x24u8, 0x37, 0x10, 0, 0x19, 0x24, 0x37, 0x10, 0, 0x19, 0xfa][..]) ); // test initializtion from string @@ -101,10 +99,10 @@ fn uint256_from() { assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), + U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() ); let sa = U256::from_str("0a").unwrap(); @@ -112,10 +110,10 @@ fn uint256_from() { assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), + U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() ); let sa = U256::from_str("0a").unwrap(); @@ -123,10 +121,10 @@ fn uint256_from() { assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), + U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() ); let sa = U256::from_str("0a").unwrap(); @@ -134,10 +132,10 @@ fn uint256_from() { assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), + U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() ); let sa = U256::from_str("0a").unwrap(); @@ -145,10 +143,10 @@ fn uint256_from() { assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), + U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() ); let sa = U256::from_str("0a").unwrap(); @@ -156,10 +154,10 @@ fn uint256_from() { assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), + U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() ); let sa = U256::from_str("0a").unwrap(); @@ -167,10 +165,10 @@ fn uint256_from() { assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), + U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() ); let sa = U256::from_str("0a").unwrap(); @@ -178,10 +176,10 @@ fn uint256_from() { assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0 , 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1 , 0, 0]), U256::from_str("0100000000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); assert_eq!( - U256([0x12f0, 1 , 0x0910203040506077, 0x8090a0b0c0d0e0f0]), + U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() ); } @@ -191,7 +189,7 @@ fn uint256_try_into_primitives() { macro_rules! try_into_uint_primitive_ok { ($primitive: ty) => { assert_eq!(U256::from(10).try_into() as Result<$primitive, _>, Ok(<$primitive>::from(10u8))); - } + }; } try_into_uint_primitive_ok!(u8); try_into_uint_primitive_ok!(u16); @@ -203,7 +201,7 @@ fn uint256_try_into_primitives() { macro_rules! try_into_iint_primitive_ok { ($primitive: ty) => { assert_eq!(U256::from(10).try_into() as Result<$primitive, _>, Ok(<$primitive>::from(10i8))); - } + }; } try_into_iint_primitive_ok!(i8); try_into_iint_primitive_ok!(i16); @@ -218,7 +216,7 @@ fn uint256_try_into_primitives() { U256::from(<$small>::max_value() as $big + 1).try_into() as Result<$small, _>, Err(concat!("integer overflow when casting to ", stringify!($small))) ); - } + }; } try_into_primitive_err!(u8, u16); try_into_primitive_err!(u16, u32); @@ -283,7 +281,7 @@ fn uint256_bits_test() { } #[test] -#[cfg_attr(feature="dev", allow(eq_op))] +#[cfg_attr(feature = "dev", allow(eq_op))] fn uint256_comp_test() { let small = U256([10u64, 0, 0, 0]); let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]); @@ -437,10 +435,7 @@ fn uint256_overflowing_pow() { U256::from(2).overflowing_pow(U256::from(0xff)), (U256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap(), false) ); - assert_eq!( - U256::from(2).overflowing_pow(U256::from(0x100)), - (U256::zero(), true) - ); + assert_eq!(U256::from(2).overflowing_pow(U256::from(0x100)), (U256::zero(), true)); } #[test] @@ -459,9 +454,9 @@ fn uint256_mul2() { #[test] fn uint256_overflowing_mul() { assert_eq!( - U256::from_str("100000000000000000000000000000000").unwrap().overflowing_mul( - U256::from_str("100000000000000000000000000000000").unwrap() - ), + U256::from_str("100000000000000000000000000000000") + .unwrap() + .overflowing_mul(U256::from_str("100000000000000000000000000000000").unwrap()), (U256::zero(), true) ); } @@ -479,8 +474,7 @@ fn uint512_mul() { #[test] fn uint256_mul_overflow() { assert_eq!( - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - .overflowing_mul( + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap().overflowing_mul( U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() ), (U256::from_str("1").unwrap(), true) @@ -492,35 +486,28 @@ fn uint256_mul_overflow() { #[allow(unused_must_use)] fn uint256_mul_overflow_panic() { U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - * - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(); + * U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(); } #[test] fn uint256_sub_overflow() { assert_eq!( - U256::from_str("0").unwrap() - .overflowing_sub( - U256::from_str("1").unwrap() - ), + U256::from_str("0").unwrap().overflowing_sub(U256::from_str("1").unwrap()), (U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), true) - ); + ); } #[test] #[should_panic] #[allow(unused_must_use)] fn uint256_sub_overflow_panic() { - U256::from_str("0").unwrap() - - - U256::from_str("1").unwrap(); + U256::from_str("0").unwrap() - U256::from_str("1").unwrap(); } #[test] fn uint256_shl() { assert_eq!( - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - << 4, + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() << 4, U256::from_str("fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0").unwrap() ); } @@ -528,13 +515,11 @@ fn uint256_shl() { #[test] fn uint256_shl_words() { assert_eq!( - U256::from_str("0000000000000001ffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - << 64, + U256::from_str("0000000000000001ffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() << 64, U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000").unwrap() ); assert_eq!( - U256::from_str("0000000000000000ffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - << 64, + U256::from_str("0000000000000000ffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() << 64, U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000").unwrap() ); } @@ -543,17 +528,16 @@ fn uint256_shl_words() { fn uint256_mul() { assert_eq!( U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - * - U256::from_str("2").unwrap(), + * U256::from_str("2").unwrap(), U256::from_str("fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe").unwrap() - ); + ); } #[test] fn uint256_div() { - assert_eq!(U256::from(10u64) / U256::from(1u64), U256::from(10u64)); - assert_eq!(U256::from(10u64) / U256::from(2u64), U256::from(5u64)); - assert_eq!(U256::from(10u64) / U256::from(3u64), U256::from(3u64)); + assert_eq!(U256::from(10u64) / U256::from(1u64), U256::from(10u64)); + assert_eq!(U256::from(10u64) / U256::from(2u64), U256::from(5u64)); + assert_eq!(U256::from(10u64) / U256::from(3u64), U256::from(3u64)); } #[test] @@ -566,7 +550,10 @@ fn uint256_rem() { fn uint256_from_dec_str() { assert_eq!(U256::from_dec_str("10").unwrap(), U256::from(10u64)); assert_eq!(U256::from_dec_str("1024").unwrap(), U256::from(1024u64)); - assert_eq!(U256::from_dec_str("115792089237316195423570985008687907853269984665640564039457584007913129639936"), Err(FromDecStrErr::InvalidLength)); + assert_eq!( + U256::from_dec_str("115792089237316195423570985008687907853269984665640564039457584007913129639936"), + Err(FromDecStrErr::InvalidLength) + ); assert_eq!(U256::from_dec_str("0x11"), Err(FromDecStrErr::InvalidCharacter)); } @@ -601,16 +588,14 @@ fn u512_multi_adds() { let (_, overflow) = U512([0, 0, 0, 0, 0, 0, 2, 1]).overflowing_add(U512([0, 0, 0, 0, 0, 0, 3, 1])); assert!(!overflow); - let (_, overflow) = U512([MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX]) - .overflowing_add(U512([MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX])); + let (_, overflow) = + U512([MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX]).overflowing_add(U512([MAX, MAX, MAX, MAX, MAX, MAX, MAX, MAX])); assert!(overflow); - let (_, overflow) = U512([0, 0, 0, 0, 0, 0, 0, MAX]) - .overflowing_add(U512([0, 0, 0, 0, 0, 0, 0, MAX])); + let (_, overflow) = U512([0, 0, 0, 0, 0, 0, 0, MAX]).overflowing_add(U512([0, 0, 0, 0, 0, 0, 0, MAX])); assert!(overflow); - let (_, overflow) = U512([0, 0, 0, 0, 0, 0, 0, MAX]) - .overflowing_add(U512([0, 0, 0, 0, 0, 0, 0, 0])); + let (_, overflow) = U512([0, 0, 0, 0, 0, 0, 0, MAX]).overflowing_add(U512([0, 0, 0, 0, 0, 0, 0, 0])); assert!(!overflow); } @@ -626,8 +611,7 @@ fn u256_multi_adds() { assert_eq!(result, U256([0, 0, 5, 2])); assert!(!overflow); - let (_, overflow) = U256([MAX, MAX, MAX, MAX]) - .overflowing_add(U256([MAX, MAX, MAX, MAX])); + let (_, overflow) = U256([MAX, MAX, MAX, MAX]).overflowing_add(U256([MAX, MAX, MAX, MAX])); assert!(overflow); let (_, overflow) = U256([0, 0, 0, MAX]).overflowing_add(U256([0, 0, 0, MAX])); @@ -645,12 +629,10 @@ fn u256_multi_subs() { let (_, overflow) = U256([0, 0, 2, 1]).overflowing_sub(U256([0, 0, 3, 1])); assert!(overflow); - let (result, overflow) = - U256([MAX, MAX, MAX, MAX]) - .overflowing_sub(U256([MAX/2, MAX/2, MAX/2, MAX/2])); + let (result, overflow) = U256([MAX, MAX, MAX, MAX]).overflowing_sub(U256([MAX / 2, MAX / 2, MAX / 2, MAX / 2])); assert!(!overflow); - assert_eq!(U256([MAX/2+1, MAX/2+1, MAX/2+1, MAX/2+1]), result); + assert_eq!(U256([MAX / 2 + 1, MAX / 2 + 1, MAX / 2 + 1, MAX / 2 + 1]), result); let (result, overflow) = U256([0, 0, 0, 1]).overflowing_sub(U256([0, 0, 1, 0])); assert!(!overflow); @@ -679,61 +661,51 @@ fn u512_multi_subs() { #[test] fn u256_multi_carry_all() { let (result, _) = U256([MAX, 0, 0, 0]).overflowing_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U256([1, MAX-1, 0, 0]), result); + assert_eq!(U256([1, MAX - 1, 0, 0]), result); let (result, _) = U256([0, MAX, 0, 0]).overflowing_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U256([0, 1, MAX-1, 0]), result); + assert_eq!(U256([0, 1, MAX - 1, 0]), result); let (result, _) = U256([MAX, MAX, 0, 0]).overflowing_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U256([1, MAX, MAX-1, 0]), result); + assert_eq!(U256([1, MAX, MAX - 1, 0]), result); let (result, _) = U256([MAX, 0, 0, 0]).overflowing_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U256([1, MAX, MAX-1, 0]), result); + assert_eq!(U256([1, MAX, MAX - 1, 0]), result); - let (result, _) = U256([MAX, MAX, 0, 0]) - .overflowing_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U256([1, 0, MAX-1, MAX]), result); + let (result, _) = U256([MAX, MAX, 0, 0]).overflowing_mul(U256([MAX, MAX, 0, 0])); + assert_eq!(U256([1, 0, MAX - 1, MAX]), result); let (result, _) = U256([MAX, 0, 0, 0]).overflowing_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U256([1, MAX, MAX, MAX-1]), result); + assert_eq!(U256([1, MAX, MAX, MAX - 1]), result); let (result, _) = U256([MAX, MAX, MAX, 0]).overflowing_mul(U256([MAX, 0, 0, 0])); - assert_eq!(U256([1, MAX, MAX, MAX-1]), result); + assert_eq!(U256([1, MAX, MAX, MAX - 1]), result); - let (result, _) = U256([MAX, 0, 0, 0]).overflowing_mul( - U256([MAX, MAX, MAX, MAX])); + let (result, _) = U256([MAX, 0, 0, 0]).overflowing_mul(U256([MAX, MAX, MAX, MAX])); assert_eq!(U256([1, MAX, MAX, MAX]), result); - let (result, _) = U256([MAX, MAX, MAX, MAX]) - .overflowing_mul(U256([MAX, 0, 0, 0])); + let (result, _) = U256([MAX, MAX, MAX, MAX]).overflowing_mul(U256([MAX, 0, 0, 0])); assert_eq!(U256([1, MAX, MAX, MAX]), result); - let (result, _) = U256([MAX, MAX, MAX, 0]) - .overflowing_mul(U256([MAX, MAX, 0, 0])); - assert_eq!(U256([1, 0, MAX, MAX-1]), result); + let (result, _) = U256([MAX, MAX, MAX, 0]).overflowing_mul(U256([MAX, MAX, 0, 0])); + assert_eq!(U256([1, 0, MAX, MAX - 1]), result); - let (result, _) = U256([MAX, MAX, 0, 0]) - .overflowing_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U256([1, 0, MAX, MAX-1]), result); + let (result, _) = U256([MAX, MAX, 0, 0]).overflowing_mul(U256([MAX, MAX, MAX, 0])); + assert_eq!(U256([1, 0, MAX, MAX - 1]), result); - let (result, _) = U256([MAX, MAX, MAX, MAX]) - .overflowing_mul(U256([MAX, MAX, 0, 0])); + let (result, _) = U256([MAX, MAX, MAX, MAX]).overflowing_mul(U256([MAX, MAX, 0, 0])); assert_eq!(U256([1, 0, MAX, MAX]), result); - let (result, _) = U256([MAX, MAX, 0, 0]) - .overflowing_mul(U256([MAX, MAX, MAX, MAX])); + let (result, _) = U256([MAX, MAX, 0, 0]).overflowing_mul(U256([MAX, MAX, MAX, MAX])); assert_eq!(U256([1, 0, MAX, MAX]), result); - let (result, _) = U256([MAX, MAX, MAX, 0]) - .overflowing_mul(U256([MAX, MAX, MAX, 0])); - assert_eq!(U256([1, 0, 0, MAX-1]), result); + let (result, _) = U256([MAX, MAX, MAX, 0]).overflowing_mul(U256([MAX, MAX, MAX, 0])); + assert_eq!(U256([1, 0, 0, MAX - 1]), result); - let (result, _) = U256([MAX, MAX, MAX, 0]) - .overflowing_mul(U256([MAX, MAX, MAX, MAX])); + let (result, _) = U256([MAX, MAX, MAX, 0]).overflowing_mul(U256([MAX, MAX, MAX, MAX])); assert_eq!(U256([1, 0, 0, MAX]), result); - let (result, _) = U256([MAX, MAX, MAX, MAX]) - .overflowing_mul(U256([MAX, MAX, MAX, 0])); + let (result, _) = U256([MAX, MAX, MAX, MAX]).overflowing_mul(U256([MAX, MAX, MAX, 0])); assert_eq!(U256([1, 0, 0, MAX]), result); let (result, _) = U256([0, 0, 0, MAX]).overflowing_mul(U256([0, 0, 0, MAX])); @@ -742,8 +714,7 @@ fn u256_multi_carry_all() { let (result, _) = U256([1, 0, 0, 0]).overflowing_mul(U256([0, 0, 0, MAX])); assert_eq!(U256([0, 0, 0, MAX]), result); - let (result, _) = U256([MAX, MAX, MAX, MAX]) - .overflowing_mul(U256([MAX, MAX, MAX, MAX])); + let (result, _) = U256([MAX, MAX, MAX, MAX]).overflowing_mul(U256([MAX, MAX, MAX, MAX])); assert_eq!(U256([1, 0, 0, 0]), result); } @@ -812,7 +783,14 @@ fn u256_multi_muls_overflow() { #[test] fn u512_div() { - let fuzz_data = [0x38,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xff,0xff,0xff,0x7,0x0,0x0,0x0,0x0,0xc1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x8,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xfe,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]; + let fuzz_data = [ + 0x38, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0x7, 0x0, 0x0, 0x0, 0x0, 0xc1, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + ]; let a = U512::from_little_endian(&fuzz_data[..64]); let b = U512::from_little_endian(&fuzz_data[64..]); let (x, y) = (a / b, a % b); @@ -829,39 +807,50 @@ fn big_endian() { source.to_big_endian(&mut target); assert_eq!( - vec![0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 1u8], - target); + vec![ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 1u8 + ], + target + ); let source = U256([512, 0, 0, 0]); let mut target = vec![0u8; 32]; source.to_big_endian(&mut target); assert_eq!( - vec![0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 2u8, 0u8], - target); + vec![ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 2u8, 0u8 + ], + target + ); let source = U256([0, 512, 0, 0]); let mut target = vec![0u8; 32]; source.to_big_endian(&mut target); assert_eq!( - vec![0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, - 0u8, 0u8, 0u8, 0u8, 0u8, 2u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8], - target); + vec![ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + 0u8, 2u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8 + ], + target + ); let source = U256::from_str("0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20").unwrap(); source.to_big_endian(&mut target); assert_eq!( - vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, - 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20], - target); + vec![ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, + 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20 + ], + target + ); } #[test] fn u256_multi_muls2() { - let (result, _) = U256([0, 0, 0, 0]).overflowing_mul(U256([0, 0, 0, 0])); assert_eq!(U256([0, 0, 0, 0]), result); @@ -917,7 +906,9 @@ fn u256_multi_muls2() { #[test] fn example() { let mut val: U256 = 1023.into(); - for _ in 0..200 { val = val * U256::from(2) } + for _ in 0..200 { + val = val * U256::from(2) + } assert_eq!(&format!("{}", val), "1643897619276947051879427220465009342380213662639797070513307648"); } @@ -925,14 +916,8 @@ fn example() { fn little_endian() { let number: U256 = "00022cca1da3f6e5722b7d3cc5bbfb486465ebc5a708dd293042f932d7eee119".into(); let expected = [ - 0x19, 0xe1, 0xee, 0xd7, - 0x32, 0xf9, 0x42, 0x30, - 0x29, 0xdd, 0x08, 0xa7, - 0xc5, 0xeb, 0x65, 0x64, - 0x48, 0xfb, 0xbb, 0xc5, - 0x3c, 0x7d, 0x2b, 0x72, - 0xe5, 0xf6, 0xa3, 0x1d, - 0xca, 0x2c, 0x02, 0x00 + 0x19, 0xe1, 0xee, 0xd7, 0x32, 0xf9, 0x42, 0x30, 0x29, 0xdd, 0x08, 0xa7, 0xc5, 0xeb, 0x65, 0x64, 0x48, 0xfb, + 0xbb, 0xc5, 0x3c, 0x7d, 0x2b, 0x72, 0xe5, 0xf6, 0xa3, 0x1d, 0xca, 0x2c, 0x02, 0x00, ]; let mut result = [0u8; 32]; number.to_little_endian(&mut result); @@ -942,10 +927,8 @@ fn little_endian() { #[test] fn slice_roundtrip() { let raw = [ - 1u8, 2, 3, 5, 7, 11, 13, 17, - 19, 23, 29, 31, 37, 41, 43, 47, - 53, 59, 61, 67, 71, 73, 79, 83, - 89, 97, 101, 103, 107, 109, 113, 127 + 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, + 107, 109, 113, 127, ]; let u256: U256 = (&raw[..]).into(); @@ -960,10 +943,8 @@ fn slice_roundtrip() { #[test] fn slice_roundtrip_le() { let raw = [ - 1u8, 2, 3, 5, 7, 11, 13, 17, - 19, 23, 29, 31, 37, 41, 43, 47, - 53, 59, 61, 67, 71, 73, 79, 83, - 89, 97, 101, 103, 107, 109, 113, 127 + 1u8, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, + 107, 109, 113, 127, ]; let u256 = U256::from_little_endian(&raw[..]); @@ -978,10 +959,8 @@ fn slice_roundtrip_le() { #[test] fn slice_roundtrip_le2() { let raw = [ - 2, 3, 5, 7, 11, 13, 17, - 19, 23, 29, 31, 37, 41, 43, 47, - 53, 59, 61, 67, 71, 73, 79, 83, - 89, 97, 101, 103, 107, 109, 113, 127 + 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, + 109, 113, 127, ]; let u256 = U256::from_little_endian(&raw[..]); @@ -995,12 +974,8 @@ fn slice_roundtrip_le2() { #[test] fn from_little_endian() { - let source: [u8; 32] = [ - 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - ]; + let source: [u8; 32] = + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; let number = U256::from_little_endian(&source[..]); @@ -1009,12 +984,8 @@ fn from_little_endian() { #[test] fn from_big_endian() { - let source: [u8; 32] = [ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - ]; + let source: [u8; 32] = + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; let number = U256::from_big_endian(&source[..]); @@ -1023,40 +994,27 @@ fn from_big_endian() { #[test] fn into_fixed_array() { - let expected: [u8; 32] = [ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - ]; - let ary : [u8; 32] = U256::from(1).into(); + let expected: [u8; 32] = + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; + let ary: [u8; 32] = U256::from(1).into(); assert_eq!(ary, expected); } #[test] fn test_u256_from_fixed_array() { - let ary = [ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 0, 0, 0, 0, 0, 123, - ]; - let num : U256 = ary.into(); - assert_eq!( num, U256::from(std::u64::MAX) + 1 + 123); + let ary = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 123]; + let num: U256 = ary.into(); + assert_eq!(num, U256::from(std::u64::MAX) + 1 + 123); - let a_ref : &U256 = &ary.into(); - assert_eq!( a_ref, &(U256::from(std::u64::MAX) + 1 + 123)); + let a_ref: &U256 = &ary.into(); + assert_eq!(a_ref, &(U256::from(std::u64::MAX) + 1 + 123)); } #[test] fn test_from_ref_to_fixed_array() { - let ary : &[u8; 32] = &[ - 1,0,1,2,1,0,1,2, - 3,0,3,4,3,0,3,4, - 5,0,5,6,5,0,5,6, - 7,0,7,8,7,0,7,8 - ]; - let big : U256 = ary.into(); + let ary: &[u8; 32] = + &[1, 0, 1, 2, 1, 0, 1, 2, 3, 0, 3, 4, 3, 0, 3, 4, 5, 0, 5, 6, 5, 0, 5, 6, 7, 0, 7, 8, 7, 0, 7, 8]; + let big: U256 = ary.into(); // the numbers are each row of 8 bytes reversed and cast to u64 assert_eq!(big, U256([504410889324070664, 360293493601469702, 216176097878868740, 72058702156267778u64])); } @@ -1064,20 +1022,14 @@ fn test_from_ref_to_fixed_array() { #[test] fn test_u512_from_fixed_array() { let ary = [ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 123 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 123, ]; - let num : U512 = ary.into(); - assert_eq!( num, U512::from(123) ); + let num: U512 = ary.into(); + assert_eq!(num, U512::from(123)); - let a_ref : &U512 = &ary.into(); - assert_eq!( a_ref, &U512::from(123) ); + let a_ref: &U512 = &ary.into(); + assert_eq!(a_ref, &U512::from(123)); } #[test] @@ -1096,7 +1048,7 @@ fn trailing_zeros() { assert_eq!(U256::from("0000000000000000000000000000000000000000000000000000000000000000").trailing_zeros(), 256); } -#[cfg(feature="quickcheck")] +#[cfg(feature = "quickcheck")] pub mod laws { use super::construct_uint; macro_rules! uint_laws { From 7eb170c27217d0410e4ba7ba8b952165cc33d0ea Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 13 Nov 2019 13:26:53 +0100 Subject: [PATCH 027/359] upgrade tiny-keccak to 2.0 (#260) * upgrade tiny-keccak to 2.0 * address Marek's feedback * [ethbloom] fix benches --- ethbloom/Cargo.toml | 2 +- ethbloom/benches/bloom.rs | 10 ++++++++- ethbloom/src/lib.rs | 10 +++++++-- keccak-hash/Cargo.toml | 2 +- keccak-hash/src/lib.rs | 41 ++++++++++++++++++++---------------- parity-crypto/Cargo.toml | 2 +- parity-crypto/src/lib.rs | 4 ++-- triehash/Cargo.toml | 2 +- triehash/benches/triehash.rs | 10 ++++++++- 9 files changed, 55 insertions(+), 28 deletions(-) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index b65c88813..56a092776 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -10,7 +10,7 @@ repository = "https://github.com/paritytech/parity-common" edition = "2018" [dependencies] -tiny-keccak = "1.5.0" +tiny-keccak = { version = "2.0", features = ["keccak"] } crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } fixed-hash = { path = "../fixed-hash", version = "0.5", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.2", default-features = false, optional = true } diff --git a/ethbloom/benches/bloom.rs b/ethbloom/benches/bloom.rs index 07f11a92f..005cfd88f 100644 --- a/ethbloom/benches/bloom.rs +++ b/ethbloom/benches/bloom.rs @@ -1,7 +1,7 @@ use criterion::{criterion_group, criterion_main, Criterion}; use ethbloom::{Bloom, Input}; use hex_literal::hex; -use tiny_keccak::keccak256; +use tiny_keccak::{Hasher, Keccak}; fn test_bloom() -> Bloom { use std::str::FromStr; @@ -26,6 +26,14 @@ fn test_bloom() -> Bloom { .unwrap() } +fn keccak256(input: &[u8]) -> [u8; 32] { + let mut out = [0u8; 32]; + let mut keccak256 = Keccak::v256(); + keccak256.update(input); + keccak256.finalize(&mut out); + out +} + fn test_topic() -> Vec { hex!("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").to_vec() } diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index 61afa629e..d21f62022 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -52,7 +52,7 @@ use fixed_hash::*; use impl_rlp::impl_fixed_hash_rlp; #[cfg(feature = "serialize")] use impl_serde::impl_fixed_hash_serde; -use tiny_keccak::keccak256; +use tiny_keccak::{Hasher, Keccak}; // 3 according to yellowpaper const BLOOM_BITS: u32 = 3; @@ -87,7 +87,13 @@ enum Hash<'a> { impl<'a> From> for Hash<'a> { fn from(input: Input<'a>) -> Self { match input { - Input::Raw(raw) => Hash::Owned(keccak256(raw)), + Input::Raw(raw) => { + let mut out = [0u8; 32]; + let mut keccak256 = Keccak::v256(); + keccak256.update(raw); + keccak256.finalize(&mut out); + Hash::Owned(out) + }, Input::Hash(hash) => Hash::Ref(hash), } } diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index d90da7964..14b6b19ba 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -9,7 +9,7 @@ license = "GPL-3.0" edition = "2018" [dependencies] -tiny-keccak = "1.5.0" +tiny-keccak = { version = "2.0", features = ["keccak"] } primitive-types = { path = "../primitive-types", version = "0.6", default-features = false } [dev-dependencies] diff --git a/keccak-hash/src/lib.rs b/keccak-hash/src/lib.rs index 4f19937ae..e66650b39 100644 --- a/keccak-hash/src/lib.rs +++ b/keccak-hash/src/lib.rs @@ -16,12 +16,11 @@ #![cfg_attr(not(feature = "std"), no_std)] -use core::slice; #[cfg(feature = "std")] use std::io; pub use primitive_types::H256; -use tiny_keccak::Keccak; +use tiny_keccak::{Hasher, Keccak}; /// Get the KECCAK (i.e. Keccak) hash of the empty bytes string. pub const KECCAK_EMPTY: H256 = H256([ @@ -47,35 +46,41 @@ pub fn keccak>(s: T) -> H256 { H256(result) } -pub unsafe fn keccak_256_unchecked(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) { - // This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This - // means that we can reuse the input buffer for both input and output. - Keccak::keccak256(slice::from_raw_parts(input, inputlen), slice::from_raw_parts_mut(out, outlen)); +/// Computes in-place keccak256 hash of `data`. +pub fn keccak256(data: &mut [u8]) { + let mut keccak256 = Keccak::v256(); + keccak256.update(data.as_ref()); + keccak256.finalize(data); } -pub unsafe fn keccak_512_unchecked(out: *mut u8, outlen: usize, input: *const u8, inputlen: usize) { - // This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This - // means that we can reuse the input buffer for both input and output. - Keccak::keccak512(slice::from_raw_parts(input, inputlen), slice::from_raw_parts_mut(out, outlen)); +/// Computes in-place keccak512 hash of `data`. +pub fn keccak512(data: &mut [u8]) { + let mut keccak512 = Keccak::v512(); + keccak512.update(data.as_ref()); + keccak512.finalize(data); } -pub fn keccak_256(input: &[u8], mut output: &mut [u8]) { - Keccak::keccak256(input, &mut output); +pub fn keccak_256(input: &[u8], output: &mut [u8]) { + write_keccak(input, output); } -pub fn keccak_512(input: &[u8], mut output: &mut [u8]) { - Keccak::keccak512(input, &mut output); +pub fn keccak_512(input: &[u8], output: &mut [u8]) { + let mut keccak512 = Keccak::v512(); + keccak512.update(input); + keccak512.finalize(output); } pub fn write_keccak>(s: T, dest: &mut [u8]) { - Keccak::keccak256(s.as_ref(), dest); + let mut keccak256 = Keccak::v256(); + keccak256.update(s.as_ref()); + keccak256.finalize(dest); } #[cfg(feature = "std")] pub fn keccak_pipe(r: &mut dyn io::BufRead, w: &mut dyn io::Write) -> Result { let mut output = [0u8; 32]; let mut input = [0u8; 1024]; - let mut keccak = Keccak::new_keccak256(); + let mut keccak256 = Keccak::v256(); // read file loop { @@ -83,11 +88,11 @@ pub fn keccak_pipe(r: &mut dyn io::BufRead, w: &mut dyn io::Write) -> Result, { fn keccak256(&self) -> [u8; 32] { - let mut keccak = Keccak::new_keccak256(); + let mut keccak = Keccak::v256(); let mut result = [0u8; 32]; keccak.update(self.as_ref()); keccak.finalize(&mut result); diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index 6342f7d03..701aae36b 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -15,7 +15,7 @@ rlp = { version = "0.4", path = "../rlp" } criterion = "0.3.0" keccak-hasher = "0.15.2" ethereum-types = { version = "0.8.0", path = "../ethereum-types" } -tiny-keccak = "1.5.0" +tiny-keccak = { version = "2.0", features = ["keccak"] } trie-standardmap = "0.15.2" hex-literal = "0.2.1" diff --git a/triehash/benches/triehash.rs b/triehash/benches/triehash.rs index fea116a46..684484265 100644 --- a/triehash/benches/triehash.rs +++ b/triehash/benches/triehash.rs @@ -17,10 +17,18 @@ use criterion::{criterion_group, criterion_main, Criterion}; use ethereum_types::H256; use keccak_hasher::KeccakHasher; -use tiny_keccak::keccak256; +use tiny_keccak::{Hasher, Keccak}; use trie_standardmap::{Alphabet, StandardMap, ValueMode}; use triehash::trie_root; +fn keccak256(input: &[u8]) -> [u8; 32] { + let mut keccak256 = Keccak::v256(); + let mut out = [0u8; 32]; + keccak256.update(input); + keccak256.finalize(&mut out); + out +} + fn random_word(alphabet: &[u8], min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { assert!(min_count + diff_count <= 32); *seed = H256(keccak256(seed.as_bytes())); From 53678a194e3313c71cb00252deaf2769fa35c9e7 Mon Sep 17 00:00:00 2001 From: Robert Vojta Date: Tue, 19 Nov 2019 17:48:29 +0100 Subject: [PATCH 028/359] Migrate primitive types to 2018 edition (#262) * Migrate primitive-types/impls/rlp to 2018 edition Signed-off-by: Robert Vojta * Migrate primitive-types/impls/codec to 2018 edition Signed-off-by: Robert Vojta * Migrate primitive-types to 2018 edition Signed-off-by: Robert Vojta --- primitive-types/Cargo.toml | 1 + primitive-types/impls/codec/Cargo.toml | 1 + primitive-types/impls/codec/src/lib.rs | 2 +- primitive-types/impls/rlp/Cargo.toml | 1 + primitive-types/impls/rlp/src/lib.rs | 4 ++-- primitive-types/src/lib.rs | 26 +++++--------------------- 6 files changed, 11 insertions(+), 24 deletions(-) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 46fa7317e..b289a67c1 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Parity Technologies "] license = "Apache-2.0/MIT" homepage = "https://github.com/paritytech/parity-common" description = "Primitive types shared by Ethereum and Substrate" +edition = "2018" [dependencies] fixed-hash = { version = "0.5", path = "../fixed-hash", default-features = false } diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index d4527aa48..12fda74f2 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Parity Technologies "] license = "Apache-2.0/MIT" homepage = "https://github.com/paritytech/parity-common" description = "Parity Codec serialization support for uint and fixed hash." +edition = "2018" [dependencies] parity-scale-codec = { version = "1.0.6", default-features = false } diff --git a/primitive-types/impls/codec/src/lib.rs b/primitive-types/impls/codec/src/lib.rs index ea478de98..9e5714ce0 100644 --- a/primitive-types/impls/codec/src/lib.rs +++ b/primitive-types/impls/codec/src/lib.rs @@ -11,7 +11,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #[doc(hidden)] -pub extern crate parity_scale_codec as codec; +pub use parity_scale_codec as codec; /// Add Parity Codec serialization support to an integer created by `construct_uint!`. #[macro_export] diff --git a/primitive-types/impls/rlp/Cargo.toml b/primitive-types/impls/rlp/Cargo.toml index 6f6f469cd..62e957c85 100644 --- a/primitive-types/impls/rlp/Cargo.toml +++ b/primitive-types/impls/rlp/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Parity Technologies "] license = "Apache-2.0/MIT" homepage = "https://github.com/paritytech/parity-common" description = "RLP serialization support for uint and fixed hash." +edition = "2018" [dependencies] rlp = { version = "0.4", path = "../../../rlp", default-features = false } diff --git a/primitive-types/impls/rlp/src/lib.rs b/primitive-types/impls/rlp/src/lib.rs index da82d0d79..16a711370 100644 --- a/primitive-types/impls/rlp/src/lib.rs +++ b/primitive-types/impls/rlp/src/lib.rs @@ -11,10 +11,10 @@ #![cfg_attr(not(feature = "std"), no_std)] #[doc(hidden)] -pub extern crate rlp; +pub use rlp; #[doc(hidden)] -pub extern crate core as core_; +pub use core as core_; /// Add RLP serialization support to an integer created by `construct_uint!`. #[macro_export] diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index ff7eb8210..5e3f77ec4 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -14,28 +14,9 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -extern crate core; - -#[macro_use] -extern crate uint; - -#[macro_use] -extern crate fixed_hash; - -#[cfg(feature = "impl-serde")] -#[macro_use] -extern crate impl_serde; - -#[cfg(feature = "impl-codec")] -#[macro_use] -extern crate impl_codec; - -#[cfg(feature = "impl-rlp")] -#[macro_use] -extern crate impl_rlp; - use core::convert::TryFrom; +use fixed_hash::{construct_fixed_hash, impl_fixed_hash_conversions}; +use uint::{construct_uint, uint_full_mul_reg}; /// Error type for conversion. #[derive(Debug, PartialEq, Eq)] @@ -73,6 +54,7 @@ construct_fixed_hash! { #[cfg(feature = "impl-serde")] mod serde { use super::*; + use impl_serde::{impl_fixed_hash_serde, impl_uint_serde}; impl_uint_serde!(U128, 2); impl_uint_serde!(U256, 4); @@ -86,6 +68,7 @@ mod serde { #[cfg(feature = "impl-codec")] mod codec { use super::*; + use impl_codec::{impl_fixed_hash_codec, impl_uint_codec}; impl_uint_codec!(U128, 2); impl_uint_codec!(U256, 4); @@ -99,6 +82,7 @@ mod codec { #[cfg(feature = "impl-rlp")] mod rlp { use super::*; + use impl_rlp::{impl_fixed_hash_rlp, impl_uint_rlp}; impl_uint_rlp!(U128, 2); impl_uint_rlp!(U256, 4); From e055e3e2d8c76293aa1247442fb86458b589749c Mon Sep 17 00:00:00 2001 From: Robert Vojta Date: Wed, 20 Nov 2019 11:56:21 +0100 Subject: [PATCH 029/359] Make fixed-hash test structs public (#267) This change silents all dead_code warnings for #[cfg(test)]. Signed-off-by: Robert Vojta --- fixed-hash/src/tests.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fixed-hash/src/tests.rs b/fixed-hash/src/tests.rs index c587281b4..da14bed13 100644 --- a/fixed-hash/src/tests.rs +++ b/fixed-hash/src/tests.rs @@ -1,8 +1,8 @@ -construct_fixed_hash! { struct H32(4); } -construct_fixed_hash! { struct H64(8); } -construct_fixed_hash! { struct H128(16); } -construct_fixed_hash! { struct H160(20); } -construct_fixed_hash! { struct H256(32); } +construct_fixed_hash! { pub struct H32(4); } +construct_fixed_hash! { pub struct H64(8); } +construct_fixed_hash! { pub struct H128(16); } +construct_fixed_hash! { pub struct H160(20); } +construct_fixed_hash! { pub struct H256(32); } impl_fixed_hash_conversions!(H256, H160); From 6779351000d6749baf2b935e66d1670866d89477 Mon Sep 17 00:00:00 2001 From: Jim Posen Date: Wed, 20 Nov 2019 15:09:38 +0100 Subject: [PATCH 030/359] Introduce Rlp::at_with_offset method. (#269) * Introduce Rlp::at_with_offset. * Method docs for Rlp::at and Rlp::at_with_offset. --- rlp/src/rlpin.rs | 21 +++++++++++++++++++-- rlp/tests/tests.rs | 29 +++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/rlp/src/rlpin.rs b/rlp/src/rlpin.rs index dd2ee9826..5dd3730b6 100644 --- a/rlp/src/rlpin.rs +++ b/rlp/src/rlpin.rs @@ -186,9 +186,25 @@ impl<'a> Rlp<'a> { } } + /// Returns an Rlp item in a list at the given index. + /// + /// Returns an error if this Rlp is not a list or if the index is out of range. pub fn at<'view>(&'view self, index: usize) -> Result, DecoderError> where 'a: 'view, + { + let (rlp, _offset) = self.at_with_offset(index)?; + Ok(rlp) + } + + /// Returns an Rlp item in a list at the given index along with the byte offset into the + /// raw data slice. + /// + /// Returns an error if this Rlp is not a list or if the index is out of range. + pub fn at_with_offset<'view>(&'view self, index: usize) + -> Result<(Rlp<'a>, usize), DecoderError> + where + 'a: 'view, { if !self.is_list() { return Err(DecoderError::RlpExpectedToBeList); @@ -211,11 +227,12 @@ impl<'a> Rlp<'a> { let (bytes, consumed) = Rlp::consume_items(bytes, indexes_to_skip)?; // update the cache - self.offset_cache.set(Some(OffsetCache::new(index, bytes_consumed + consumed))); + let offset = bytes_consumed + consumed; + self.offset_cache.set(Some(OffsetCache::new(index, offset))); // construct new rlp let found = BasicDecoder::payload_info(bytes)?; - Ok(Rlp::new(&bytes[0..found.header_len + found.value_len])) + Ok((Rlp::new(&bytes[0..found.header_len + found.value_len]), offset)) } pub fn is_null(&self) -> bool { diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index 84a090e09..6f51b9bd3 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -53,6 +53,35 @@ fn rlp_at() { } } +#[test] +fn rlp_at_with_offset() { + let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']; + { + let rlp = Rlp::new(&data); + assert!(rlp.is_list()); + let animals: Vec = rlp.as_list().unwrap(); + assert_eq!(animals, vec!["cat".to_owned(), "dog".to_owned()]); + + let (cat, cat_offset) = rlp.at_with_offset(0).unwrap(); + assert!(cat.is_data()); + assert_eq!(cat_offset, 1); + assert_eq!(cat.as_raw(), &[0x83, b'c', b'a', b't']); + assert_eq!(cat.as_val::().unwrap(), "cat".to_owned()); + + let (dog, dog_offset) = rlp.at_with_offset(1).unwrap(); + assert!(dog.is_data()); + assert_eq!(dog_offset, 5); + assert_eq!(dog.as_raw(), &[0x83, b'd', b'o', b'g']); + assert_eq!(dog.as_val::().unwrap(), "dog".to_owned()); + + let (cat_again, cat_offset) = rlp.at_with_offset(0).unwrap(); + assert!(cat_again.is_data()); + assert_eq!(cat_offset, 1); + assert_eq!(cat_again.as_raw(), &[0x83, b'c', b'a', b't']); + assert_eq!(cat_again.as_val::().unwrap(), "cat".to_owned()); + } +} + #[test] fn rlp_at_err() { let data = vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o']; From 74b6e10a4823259ab60603e352ce9b211c8e7c1d Mon Sep 17 00:00:00 2001 From: Jim Posen Date: Wed, 20 Nov 2019 16:12:51 +0100 Subject: [PATCH 031/359] Bump rlp crate version. (#270) --- rlp/CHANGELOG.md | 4 ++++ rlp/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index 10d37b23d..e0a32ca9b 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.4.4] - 2019-11-20 +### Added +- Method `Rlp::at_with_offset` (https://github.com/paritytech/parity-common/pull/269) + ## [0.4.3] - 2019-10-24 ### Dependencies - Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 0fd6e9b5b..ea0da9b10 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rlp" -version = "0.4.3" +version = "0.4.4" description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" license = "MIT/Apache-2.0" From a2987e8a76335f7e038d7ad6279099601a5a0749 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 22 Nov 2019 12:48:25 +0100 Subject: [PATCH 032/359] kvdb-rocksdb: configurable memory budget per column (#256) * kvdb-rocksdb: configurable memory budget per column * kvdb-rocksdb: export keep_log_file_num * kvdb-rocksdb: less invasive changes * kvdb-rocksdb: small cleanup * kvdb-rocksdb: make memory_budget_per_col private * kvdb-rocksdb: make column_config a method on config * kvdb-rocksdb: other minor improvements * Update kvdb-rocksdb/src/lib.rs Co-Authored-By: David * kvdb-rocksdb: configurable memory budget per column * kvdb-rocksdb: export keep_log_file_num * kvdb-rocksdb: less invasive changes * kvdb-rocksdb: small cleanup * kvdb-rocksdb: make memory_budget_per_col private * kvdb-rocksdb: make column_config a method on config * kvdb-rocksdb: other minor improvements * kvdb-rocksdb: remove write_limiter The API for setting the write limiter is not available upstream and we only want to set it for an HDD, which is a poor target for parity-ethereum-like workload anyway. * kvdb-rocksdb: document CompactionProfile better --- kvdb-rocksdb/src/lib.rs | 177 +++++++++++++++++++++++----------------- 1 file changed, 102 insertions(+), 75 deletions(-) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index c4a9c0b15..3cdf19e75 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify @@ -44,9 +44,17 @@ where io::Error::new(io::ErrorKind::Other, e) } +// Used for memory budget. +type MiB = usize; + const KB: usize = 1024; const MB: usize = 1024 * KB; -const DB_DEFAULT_MEMORY_BUDGET_MB: usize = 128; + +/// The default column memory budget in MiB. +pub const DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB: MiB = 128; + +/// The default memory budget in MiB. +pub const DB_DEFAULT_MEMORY_BUDGET_MB: MiB = 512; enum KeyState { Insert(DBValue), @@ -54,14 +62,17 @@ enum KeyState { } /// Compaction profile for the database settings +/// Note, that changing these parameters may trigger +/// the compaction process of RocksDB on startup. +/// https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#level_compaction_dynamic_level_bytes-is-true #[derive(Clone, Copy, PartialEq, Debug)] pub struct CompactionProfile { /// L0-L1 target file size + /// The mimimum size should be calculated in accordance with the + /// number of levels and the expected size of the database. pub initial_file_size: u64, /// block size pub block_size: usize, - /// rate limiter for background flushes and compactions, bytes/sec, if any - pub write_rate_limit: Option, } impl Default for CompactionProfile { @@ -101,9 +112,10 @@ impl CompactionProfile { let hdd_check_file = db_path .to_str() .and_then(|path_str| Command::new("df").arg(path_str).output().ok()) - .and_then(|df_res| match df_res.status.success() { - true => Some(df_res.stdout), - false => None, + .and_then(|df_res| if df_res.status.success() { + Some(df_res.stdout) + } else { + None }) .and_then(rotational_from_df_output); // Read out the file and match compaction profile. @@ -134,7 +146,7 @@ impl CompactionProfile { /// Default profile suitable for SSD storage pub fn ssd() -> CompactionProfile { - CompactionProfile { initial_file_size: 64 * MB as u64, block_size: 16 * KB, write_rate_limit: None } + CompactionProfile { initial_file_size: 64 * MB as u64, block_size: 8 * MB } } /// Slow HDD compaction profile @@ -142,7 +154,6 @@ impl CompactionProfile { CompactionProfile { initial_file_size: 256 * MB as u64, block_size: 64 * KB, - write_rate_limit: Some(16 * MB as u64), } } } @@ -152,29 +163,62 @@ impl CompactionProfile { pub struct DatabaseConfig { /// Max number of open files. pub max_open_files: i32, - /// Memory budget (in MiB) used for setting block cache size, write buffer size. - pub memory_budget: Option, - /// Compaction profile + /// Memory budget (in MiB) used for setting block cache size and + /// write buffer size for each column including the default one. + /// If the memory budget of a column is not specified, + /// `DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB` is used for that column. + pub memory_budget: HashMap, MiB>, + /// Compaction profile. pub compaction: CompactionProfile, - /// Set number of columns + /// Set number of columns. pub columns: Option, + /// Specify the maximum number of info/debug log files to be kept. + pub keep_log_file_num: i32, } impl DatabaseConfig { /// Create new `DatabaseConfig` with default parameters and specified set of columns. /// Note that cache sizes must be explicitly set. pub fn with_columns(columns: Option) -> Self { - let mut config = Self::default(); - config.columns = columns; - config + Self { columns, ..Default::default() } + } + + /// Returns the total memory budget in bytes. + pub fn memory_budget(&self) -> MiB { + if self.memory_budget.is_empty() && self.columns.is_none() { + return DB_DEFAULT_MEMORY_BUDGET_MB * MB; + } + (0..=self.columns.unwrap_or(0)) + .map(|i| self.memory_budget.get(&i.checked_sub(1)).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB) + .sum() } - pub fn memory_budget(&self) -> usize { - self.memory_budget.unwrap_or(DB_DEFAULT_MEMORY_BUDGET_MB) * MB + /// Returns the memory budget of the specified column in bytes. + fn memory_budget_per_col(&self, col: Option) -> MiB { + self.memory_budget.get(&col).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB } - pub fn memory_budget_per_col(&self) -> usize { - self.memory_budget() / self.columns.unwrap_or(1) as usize + // Get column family configuration with the given block based options. + fn column_config(&self, block_opts: &BlockBasedOptions, col: Option) -> io::Result { + let memory_budget_per_col = self.memory_budget_per_col(col); + let mut opts = Options::new(); + + opts.set_parsed_options("level_compaction_dynamic_level_bytes=true").map_err(other_io_err)?; + + opts.set_block_based_table_factory(block_opts); + + opts.set_parsed_options(&format!( + "block_based_table_factory={{{};{}}}", + "cache_index_and_filter_blocks=true", "pin_l0_filter_and_index_blocks_in_cache=true" + )) + .map_err(other_io_err)?; + + opts.optimize_level_style_compaction(memory_budget_per_col as i32); + opts.set_target_file_size_base(self.compaction.initial_file_size); + + opts.set_parsed_options("compression_per_level=").map_err(other_io_err)?; + + Ok(opts) } } @@ -182,9 +226,10 @@ impl Default for DatabaseConfig { fn default() -> DatabaseConfig { DatabaseConfig { max_open_files: 512, - memory_budget: None, + memory_budget: HashMap::new(), compaction: CompactionProfile::default(), columns: None, + keep_log_file_num: 1, } } } @@ -192,7 +237,6 @@ impl Default for DatabaseConfig { /// Database iterator (for flushed data only) // The compromise of holding only a virtual borrow vs. holding a lock on the // inner DB (to prevent closing via restoration) may be re-evaluated in the future. -// pub struct DatabaseIterator<'a> { iter: InterleaveOrdered<::std::vec::IntoIter<(Box<[u8]>, Box<[u8]>)>, DBIterator>, _marker: PhantomData<&'a Database>, @@ -211,28 +255,6 @@ struct DBAndColumns { cfs: Vec, } -// get column family configuration from database config. -fn col_config(config: &DatabaseConfig, block_opts: &BlockBasedOptions) -> io::Result { - let mut opts = Options::new(); - - opts.set_parsed_options("level_compaction_dynamic_level_bytes=true").map_err(other_io_err)?; - - opts.set_block_based_table_factory(block_opts); - - opts.set_parsed_options(&format!( - "block_based_table_factory={{{};{}}}", - "cache_index_and_filter_blocks=true", "pin_l0_filter_and_index_blocks_in_cache=true" - )) - .map_err(other_io_err)?; - - opts.optimize_level_style_compaction(config.memory_budget_per_col() as i32); - opts.set_target_file_size_base(config.compaction.initial_file_size); - - opts.set_parsed_options("compression_per_level=").map_err(other_io_err)?; - - Ok(opts) -} - /// Key-Value database. pub struct Database { db: RwLock>, @@ -278,16 +300,24 @@ impl Database { pub fn open(config: &DatabaseConfig, path: &str) -> io::Result { let mut opts = Options::new(); - if let Some(rate_limit) = config.compaction.write_rate_limit { - opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit)).map_err(other_io_err)?; - } opts.set_use_fsync(false); opts.create_if_missing(true); opts.set_max_open_files(config.max_open_files); - opts.set_parsed_options("keep_log_file_num=1").map_err(other_io_err)?; + opts.set_parsed_options(&format!("keep_log_file_num={}", config.keep_log_file_num)).map_err(other_io_err)?; opts.set_parsed_options("bytes_per_sync=1048576").map_err(other_io_err)?; - opts.set_db_write_buffer_size(config.memory_budget_per_col() / 2); - opts.increase_parallelism(cmp::max(1, ::num_cpus::get() as i32 / 2)); + + let columns = config.columns.unwrap_or(0); + + if columns == 0 { + let budget = config.memory_budget() / 2; + opts.set_db_write_buffer_size(budget); + // from https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#memtable + // Memtable size is controlled by the option `write_buffer_size`. + // If you increase your memtable size, be sure to also increase your L1 size! + // L1 size is controlled by the option `max_bytes_for_level_base`. + opts.set_parsed_options(&format!("max_bytes_for_level_base={}", budget)).map_err(other_io_err)?; + } + opts.increase_parallelism(cmp::max(1, num_cpus::get() as i32 / 2)); let mut block_opts = BlockBasedOptions::new(); @@ -308,14 +338,12 @@ impl Database { fs::remove_file(db_corrupted)?; } - let columns = config.columns.unwrap_or(0) as usize; - - let mut cf_options = Vec::with_capacity(columns); + let mut cf_options = Vec::with_capacity(columns as usize); let cfnames: Vec<_> = (0..columns).map(|c| format!("col{}", c)).collect(); let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect(); - for _ in 0..config.columns.unwrap_or(0) { - cf_options.push(col_config(&config, &block_opts)?); + for i in 0..columns { + cf_options.push(config.column_config(&block_opts, Some(i))?); } let write_opts = WriteOptions::new(); @@ -359,31 +387,30 @@ impl Database { warn!("DB corrupted: {}, attempting repair", s); DB::repair(&opts, path).map_err(other_io_err)?; - match cfnames.is_empty() { - true => DB::open(&opts, path).map_err(other_io_err)?, - false => { - let db = DB::open_cf(&opts, path, &cfnames, &cf_options).map_err(other_io_err)?; - cfs = cfnames - .iter() - .map(|n| db.cf_handle(n).expect("rocksdb opens a cf_handle for each cfname; qed")) - .collect(); - db - } + if cfnames.is_empty() { + DB::open(&opts, path).map_err(other_io_err)? + } else { + let db = DB::open_cf(&opts, path, &cfnames, &cf_options).map_err(other_io_err)?; + cfs = cfnames + .iter() + .map(|n| db.cf_handle(n).expect("rocksdb opens a cf_handle for each cfname; qed")) + .collect(); + db } } Err(s) => return Err(other_io_err(s)), }; let num_cols = cfs.len(); Ok(Database { - db: RwLock::new(Some(DBAndColumns { db: db, cfs: cfs })), + db: RwLock::new(Some(DBAndColumns { db, cfs })), config: config.clone(), - write_opts: write_opts, + write_opts, overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), flushing: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), flushing_lock: Mutex::new(false), path: path.to_owned(), - read_opts: read_opts, - block_opts: block_opts, + read_opts, + block_opts, }) } @@ -654,9 +681,8 @@ impl Database { pub fn drop_column(&self) -> io::Result<()> { match *self.db.write() { Some(DBAndColumns { ref mut db, ref mut cfs }) => { - if let Some(col) = cfs.pop() { + if let Some(_col) = cfs.pop() { let name = format!("col{}", cfs.len()); - drop(col); db.drop_cf(&name).map_err(other_io_err)?; } Ok(()) @@ -669,9 +695,10 @@ impl Database { pub fn add_column(&self) -> io::Result<()> { match *self.db.write() { Some(DBAndColumns { ref mut db, ref mut cfs }) => { - let col = cfs.len() as u32; + let col = cfs.len(); let name = format!("col{}", col); - cfs.push(db.create_cf(&name, &col_config(&self.config, &self.block_opts)?).map_err(other_io_err)?); + let col_config = self.config.column_config(&self.block_opts, Some(col as u32))?; + cfs.push(db.create_cf(&name, &col_config).map_err(other_io_err)?); Ok(()) } None => Ok(()), @@ -823,9 +850,9 @@ mod tests { let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); assert_eq!(db.num_columns(), 0); - for i in 0..5 { + for i in 1..=5 { db.add_column().unwrap(); - assert_eq!(db.num_columns(), i + 1); + assert_eq!(db.num_columns(), i); } } From a0da00e44f1f56fa23c20a2ba173ccdbdb6b40da Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 25 Nov 2019 16:07:38 +0100 Subject: [PATCH 033/359] kvdb-web: async-awaitify (#259) * kvdb-web: async-awaitify * kvdb-web: cargo fmt * Update kvdb-web/Cargo.toml --- kvdb-web/Cargo.toml | 13 ++--- kvdb-web/src/lib.rs | 107 +++++++++++------------------------ kvdb-web/tests/indexed_db.rs | 52 +++++++---------- 3 files changed, 62 insertions(+), 110 deletions(-) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index aaf237da8..c9157d922 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -9,16 +9,16 @@ license = "GPL-3.0" edition = "2018" [dependencies] -wasm-bindgen = "0.2.51" -js-sys = "0.3.28" +wasm-bindgen = "0.2.54" +js-sys = "0.3.31" kvdb = { version = "0.1", path = "../kvdb" } kvdb-memorydb = { version = "0.1", path = "../kvdb-memorydb" } -futures-preview = "0.3.0-alpha.19" +futures = "0.3" log = "0.4.8" send_wrapper = "0.3.0" [dependencies.web-sys] -version = "0.3.28" +version = "0.3.31" features = [ 'console', 'Window', @@ -37,7 +37,6 @@ features = [ ] [dev-dependencies] -wasm-bindgen-test = "0.2.49" -futures-preview = { version = "0.3.0-alpha.19", features = ['compat'] } -futures01 = { package = "futures", version = "0.1" } +wasm-bindgen-test = "0.3.4" console_log = "0.1.2" +wasm-bindgen-futures = "0.4.4" diff --git a/kvdb-web/src/lib.rs b/kvdb-web/src/lib.rs index bc9687ed4..946057830 100644 --- a/kvdb-web/src/lib.rs +++ b/kvdb-web/src/lib.rs @@ -28,8 +28,6 @@ use kvdb::{DBTransaction, DBValue}; use kvdb_memorydb::{self as in_memory, InMemory}; use send_wrapper::SendWrapper; use std::io; -use std::rc::Rc; -use std::sync::Mutex; pub use error::Error; pub use kvdb::KeyValueDB; @@ -44,7 +42,7 @@ pub struct Database { version: u32, columns: u32, in_memory: InMemory, - indexed_db: Mutex>, + indexed_db: SendWrapper, } // The default column is represented as `None`. @@ -57,71 +55,38 @@ fn number_to_column(col: u32) -> Column { impl Database { /// Opens the database with the given name, /// and the specified number of columns (not including the default one). - pub fn open(name: String, columns: u32) -> impl Future> { - // let's try to open the latest version of the db first - let open_request = indexed_db::open(name.as_str(), None, columns); + pub async fn open(name: String, columns: u32) -> Result { let name_clone = name.clone(); - open_request - .then(move |db| { - let db = match db { - Ok(db) => db, - Err(err) => return future::Either::Right(future::err(err)), - }; - - // If we need more column than the latest version has, - // then bump the version (+ 1 for the default column). - // In order to bump the version, we close the database - // and reopen it with a higher version than it was opened with previously. - // cf. https://github.com/paritytech/parity-common/pull/202#discussion_r321221751 - if columns + 1 > db.columns { - let next_version = db.version + 1; - drop(db); - future::Either::Left(indexed_db::open(name.as_str(), Some(next_version), columns).boxed()) - } else { - future::Either::Left(future::ok(db).boxed()) - } - // populate the in_memory db from the IndexedDB - }) - .then(move |db| { - let db = match db { - Ok(db) => db, - Err(err) => return future::Either::Right(future::err(err)), - }; - - let indexed_db::IndexedDB { version, inner, .. } = db; - let rc = Rc::new(inner.take()); - let weak = Rc::downgrade(&rc); - // read the columns from the IndexedDB - future::Either::Left( - stream::iter(0..=columns) - .map(move |n| { - let db = weak.upgrade().expect("rc should live at least as long; qed"); - indexed_db::idb_cursor(&db, n).fold(DBTransaction::new(), move |mut txn, (key, value)| { - let column = number_to_column(n); - txn.put_vec(column, key.as_ref(), value); - future::ready(txn) - }) - // write each column into memory - }) - .fold(in_memory::create(columns), |m, txn| { - txn.then(|txn| { - m.write_buffered(txn); - future::ready(m) - }) - }) - .then(move |in_memory| { - future::ok(Database { - name: name_clone, - version, - columns, - in_memory, - indexed_db: Mutex::new(SendWrapper::new( - Rc::try_unwrap(rc).expect("should have only 1 ref at this point; qed"), - )), - }) - }), - ) - }) + // let's try to open the latest version of the db first + let db = indexed_db::open(name.as_str(), None, columns).await?; + + // If we need more column than the latest version has, + // then bump the version (+ 1 for the default column). + // In order to bump the version, we close the database + // and reopen it with a higher version than it was opened with previously. + // cf. https://github.com/paritytech/parity-common/pull/202#discussion_r321221751 + let db = if columns + 1 > db.columns { + let next_version = db.version + 1; + drop(db); + indexed_db::open(name.as_str(), Some(next_version), columns).await? + } else { + db + }; + // populate the in_memory db from the IndexedDB + let indexed_db::IndexedDB { version, inner, .. } = db; + let in_memory = in_memory::create(columns); + // read the columns from the IndexedDB + for n in 0..=columns { + let column = number_to_column(n); + let mut txn = DBTransaction::new(); + let mut stream = indexed_db::idb_cursor(&*inner, n); + while let Some((key, value)) = stream.next().await { + txn.put_vec(column, key.as_ref(), value); + } + // write each column into memory + in_memory.write_buffered(txn); + } + Ok(Database { name: name_clone, version, columns, in_memory, indexed_db: inner }) } /// Get the database name. @@ -137,9 +102,7 @@ impl Database { impl Drop for Database { fn drop(&mut self) { - if let Ok(db) = self.indexed_db.lock() { - db.close(); - } + self.indexed_db.close(); } } @@ -153,9 +116,7 @@ impl KeyValueDB for Database { } fn write_buffered(&self, transaction: DBTransaction) { - if let Ok(guard) = self.indexed_db.lock() { - let _ = indexed_db::idb_commit_transaction(&*guard, &transaction, self.columns); - } + let _ = indexed_db::idb_commit_transaction(&*self.indexed_db, &transaction, self.columns); self.in_memory.write_buffered(transaction); } diff --git a/kvdb-web/tests/indexed_db.rs b/kvdb-web/tests/indexed_db.rs index 0824a5760..2a9ddc14e 100644 --- a/kvdb-web/tests/indexed_db.rs +++ b/kvdb-web/tests/indexed_db.rs @@ -16,52 +16,44 @@ //! IndexedDB tests. -use futures::compat; -use futures::future::{self, FutureExt as _, TryFutureExt as _}; +use futures::future::TryFutureExt as _; use kvdb_web::{Database, KeyValueDB as _}; -use wasm_bindgen::JsValue; use wasm_bindgen_test::*; wasm_bindgen_test_configure!(run_in_browser); -#[wasm_bindgen_test(async)] -fn reopen_the_database_with_more_columns() -> impl futures01::Future { +#[wasm_bindgen_test] +async fn reopen_the_database_with_more_columns() { let _ = console_log::init_with_level(log::Level::Trace); - fn open_db(col: u32) -> impl future::Future { - Database::open("MyAsyncTest".into(), col).unwrap_or_else(|err| panic!("{}", err)) + async fn open_db(col: u32) -> Database { + Database::open("MyAsyncTest".into(), col).unwrap_or_else(|err| panic!("{}", err)).await } - let fut = open_db(1) - .then(|db| { - // Write a value into the database - let mut batch = db.transaction(); - batch.put(None, b"hello", b"world"); - db.write_buffered(batch); + let db = open_db(1).await; - assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); + // Write a value into the database + let mut batch = db.transaction(); + batch.put(None, b"hello", b"world"); + db.write_buffered(batch); - // Check the database version - assert_eq!(db.version(), 1); + assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); - // Close the database - drop(db); + // Check the database version + assert_eq!(db.version(), 1); - // Reopen it again with 3 columns - open_db(3) - }) - .map(|db| { - // The value should still be present - assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); - assert!(db.get(None, b"trash").unwrap().is_none()); + // Close the database + drop(db); - // The version should be bumped - assert_eq!(db.version(), 2); + // Reopen it again with 3 columns + let db = open_db(3).await; - Ok(()) - }); + // The value should still be present + assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); + assert!(db.get(None, b"trash").unwrap().is_none()); - compat::Compat::new(fut) + // The version should be bumped + assert_eq!(db.version(), 2); } From 83e79c25e3d18885354ab9b5bcb9442fdce22585 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Tue, 26 Nov 2019 09:12:21 +0100 Subject: [PATCH 034/359] [fixed-hash]: re-export `alloc_` (#268) alloc is required by the `rustc-hex` implementation which make the `std` build to fail. This commit re-exports `alloc` for both std and no-std to fix this problem --- fixed-hash/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/fixed-hash/src/lib.rs b/fixed-hash/src/lib.rs index 297490134..bfc210a84 100644 --- a/fixed-hash/src/lib.rs +++ b/fixed-hash/src/lib.rs @@ -10,7 +10,6 @@ // Re-export liballoc using an alias so that the macros can work without // requiring `extern crate alloc` downstream. -#[cfg(not(feature = "std"))] #[doc(hidden)] pub extern crate alloc as alloc_; From 92ec1826884281820ebbbfc428e6c0fbdcccb9bf Mon Sep 17 00:00:00 2001 From: Robert Vojta Date: Tue, 26 Nov 2019 09:15:11 +0100 Subject: [PATCH 035/359] Use 2018 edition for rustfmt (#266) * Use 2018 edition for rustfmt Signed-off-by: Robert Vojta * Run rustfmt check in the script stage after_script exit codes are ignored and do not affect build status. Signed-off-by: Robert Vojta * Fix ethbloom formatting Signed-off-by: Robert Vojta * Move cargo fmt from matrix to script Signed-off-by: Robert Vojta --- .travis.yml | 5 +++-- ethbloom/src/lib.rs | 2 +- rustfmt.toml | 1 + 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 2d48dc81b..c0471b8df 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,8 +8,6 @@ matrix: rust: stable before_script: - rustup component add rustfmt - after_script: - - cargo fmt -- --check - os: linux rust: beta - os: linux @@ -22,6 +20,9 @@ matrix: install: - curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh script: + - if [ "$TRAVIS_RUST_VERSION" == "stable" ] && [ "$TRAVIS_OS_NAME" == "linux" ]; then + cargo fmt -- --check; + fi - cargo check --all --tests - cargo build --all - cargo test --all --exclude uint --exclude fixed-hash diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index d21f62022..9ef11e3ee 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -93,7 +93,7 @@ impl<'a> From> for Hash<'a> { keccak256.update(raw); keccak256.finalize(&mut out); Hash::Owned(out) - }, + } Input::Hash(hash) => Hash::Ref(hash), } } diff --git a/rustfmt.toml b/rustfmt.toml index cba0d885c..c699603f5 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,3 +1,4 @@ hard_tabs = true max_width = 120 use_small_heuristics = "Max" +edition = "2018" From 66f84c911c531ab743e012172489df1d682fad76 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 27 Nov 2019 16:12:11 +0100 Subject: [PATCH 036/359] travis: try to fix wasmpack chrome test on macOS (#263) * travis: try to fix wasmpack chrome test on macOS * travis: debug chromedriver version * travis: try with addons * travis: try latest LTS npm * travis: remove trailing semicolon * cargo fmt --- .travis.yml | 17 +++++++++++++---- kvdb-rocksdb/src/lib.rs | 11 ++--------- rlp/src/rlpin.rs | 7 +++---- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/.travis.yml b/.travis.yml index c0471b8df..ff03ac379 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,7 +13,17 @@ matrix: - os: linux rust: nightly - os: osx - osx_image: xcode11 + osx_image: xcode11.2 + addons: + chrome: stable + firefox: latest + install: + - curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.1/install.sh | sh + - source ~/.nvm/nvm.sh + - nvm install --lts + - npm install -g chromedriver + - curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + - which chromedriver rust: stable allow_failures: - rust: nightly @@ -41,6 +51,5 @@ script: - cd parity-util-mem/ && cargo test --no-default-features --features=dlmalloc-global && cd .. - cd rlp/ && cargo test --no-default-features && cargo check --benches && cd .. - cd triehash/ && cargo check --benches && cd .. - - if [ "$TRAVIS_OS_NAME" == "linux" ]; then - cd kvdb-web/ && wasm-pack test --headless --chrome --firefox && cd ..; - fi + - cd kvdb-web/ && wasm-pack test --headless --chrome --firefox && cd .. + diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 3cdf19e75..75d2eb615 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -112,11 +112,7 @@ impl CompactionProfile { let hdd_check_file = db_path .to_str() .and_then(|path_str| Command::new("df").arg(path_str).output().ok()) - .and_then(|df_res| if df_res.status.success() { - Some(df_res.stdout) - } else { - None - }) + .and_then(|df_res| if df_res.status.success() { Some(df_res.stdout) } else { None }) .and_then(rotational_from_df_output); // Read out the file and match compaction profile. if let Some(hdd_check) = hdd_check_file { @@ -151,10 +147,7 @@ impl CompactionProfile { /// Slow HDD compaction profile pub fn hdd() -> CompactionProfile { - CompactionProfile { - initial_file_size: 256 * MB as u64, - block_size: 64 * KB, - } + CompactionProfile { initial_file_size: 256 * MB as u64, block_size: 64 * KB } } } diff --git a/rlp/src/rlpin.rs b/rlp/src/rlpin.rs index 5dd3730b6..f1c488626 100644 --- a/rlp/src/rlpin.rs +++ b/rlp/src/rlpin.rs @@ -201,10 +201,9 @@ impl<'a> Rlp<'a> { /// raw data slice. /// /// Returns an error if this Rlp is not a list or if the index is out of range. - pub fn at_with_offset<'view>(&'view self, index: usize) - -> Result<(Rlp<'a>, usize), DecoderError> - where - 'a: 'view, + pub fn at_with_offset<'view>(&'view self, index: usize) -> Result<(Rlp<'a>, usize), DecoderError> + where + 'a: 'view, { if !self.is_list() { return Err(DecoderError::RlpExpectedToBeList); From 8fb8f13c8084ba8770dcfadca71579eeaac05685 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Thu, 28 Nov 2019 14:21:18 +0100 Subject: [PATCH 037/359] [kvdb-rocksdb] switch to upstream (#257) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Switch from `parity-rocksdb` to upstream `rust-rocksdb` * wip * wip * kvdb-rocksdb: working iterator * kvdb-rocksdb: cleanup * kvdb-rocksdb: more cleanup * kvdb-rocksdb: use options from updated upstream * kvdb-rocksdb: set bloom filter as recommended by tuning guide * kvdb-rocksdb: fix build * kvdb-rocksdb: set_level_compaction_dynamic_level_bytes * kvdb-rocksdb: switch to just published version * kvdb-rocksdb: preserve the old compression_per_level setting * kvdb-rocksdb: add some iter module docs * kvdb-rocksdb: remove path on kvdb dependency temporarily * kvdb-rocksdb: use only lz4 and snappy features * kvdb-rocksdb: support zstd compression as well * Also add `kvdb` as path dependency * Apply suggestions from code review Co-Authored-By: Bastian Köcher * kvdb-rocksdb: fix build * kvdb-rocksdb: use open_cf_descriptors * kvdb-rocksdb: remove redundant .into() * Disable `zstd` again * kvdb-rocksdb: set block_size to 64 KB again * kvdb-rocksdb: cargo fmt * kvdb-rocksdb: set back block_size to 16 KB * moar cargo fmt * Add tests for budget calculation * Add test to check the rocksdb settings * kvdb-rocksdb: do not account for default column memory budget * kvdb-rocksdb: please the CI * kvdb-rocksdb: remove lz4 feature as it has no effect for now --- kvdb-rocksdb/Cargo.toml | 5 +- kvdb-rocksdb/src/iter.rs | 128 ++++++++++ kvdb-rocksdb/src/lib.rs | 538 ++++++++++++++++++++++++--------------- 3 files changed, 465 insertions(+), 206 deletions(-) create mode 100644 kvdb-rocksdb/src/iter.rs diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index cda3cc24b..bbf2e42cb 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -11,12 +11,13 @@ edition = "2018" elastic-array = "0.10.2" fs-swap = "0.2.4" interleaved-ordered = "0.1.1" -kvdb = { version = "0.1", path = "../kvdb" } +kvdb = { path = "../kvdb", version = "0.1" } log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.9.0" regex = "1.3.1" -parity-rocksdb = "0.5.1" +rocksdb = { version = "0.13", features = ["snappy"], default-features = false } +owning_ref = "0.4.0" [dev-dependencies] tempdir = "0.3.7" diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs new file mode 100644 index 000000000..52934e1a8 --- /dev/null +++ b/kvdb-rocksdb/src/iter.rs @@ -0,0 +1,128 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! This module contains an implementation of a RocksDB iterator +//! wrapped inside a `RwLock`. Since `RwLock` "owns" the inner data, +//! we're using `owning_ref` to work around the borrowing rules of Rust. + +use crate::DBAndColumns; +use owning_ref::{OwningHandle, StableAddress}; +use parking_lot::RwLockReadGuard; +use rocksdb::{DBIterator, IteratorMode}; +use std::ops::{Deref, DerefMut}; + +/// A tuple holding key and value data, used as the iterator item type. +pub type KeyValuePair = (Box<[u8]>, Box<[u8]>); + +/// Iterator with built-in synchronization. +pub struct ReadGuardedIterator<'a, I, T> { + inner: OwningHandle>, DerefWrapper>>, +} + +// We can't implement `StableAddress` for a `RwLockReadGuard` +// directly due to orphan rules. +#[repr(transparent)] +struct UnsafeStableAddress<'a, T>(RwLockReadGuard<'a, T>); + +impl<'a, T> Deref for UnsafeStableAddress<'a, T> { + type Target = T; + fn deref(&self) -> &Self::Target { + self.0.deref() + } +} + +// RwLockReadGuard dereferences to a stable address; qed +unsafe impl<'a, T> StableAddress for UnsafeStableAddress<'a, T> {} + +struct DerefWrapper(T); + +impl Deref for DerefWrapper { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for DerefWrapper { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl<'a, I: Iterator, T> Iterator for ReadGuardedIterator<'a, I, T> { + type Item = I::Item; + + fn next(&mut self) -> Option { + self.inner.deref_mut().as_mut().and_then(|iter| iter.next()) + } +} + +/// Instantiate iterators yielding `KeyValuePair`s. +pub trait IterationHandler { + type Iterator: Iterator; + + /// Create an `Iterator` over the default DB column or over a `ColumnFamily` if a column number + /// is passed. + fn iter(&self, col: Option) -> Self::Iterator; + /// Create an `Iterator` over the default DB column or over a `ColumnFamily` if a column number + /// is passed. The iterator starts from the first key having the provided `prefix`. + fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Self::Iterator; +} + +impl<'a, T> ReadGuardedIterator<'a, <&'a T as IterationHandler>::Iterator, T> +where + &'a T: IterationHandler, +{ + pub fn new(read_lock: RwLockReadGuard<'a, Option>, col: Option) -> Self { + Self { inner: Self::new_inner(read_lock, |db| db.iter(col)) } + } + + pub fn new_from_prefix(read_lock: RwLockReadGuard<'a, Option>, col: Option, prefix: &[u8]) -> Self { + Self { inner: Self::new_inner(read_lock, |db| db.iter_from_prefix(col, prefix)) } + } + + fn new_inner( + rlock: RwLockReadGuard<'a, Option>, + f: impl FnOnce(&'a T) -> <&'a T as IterationHandler>::Iterator, + ) -> OwningHandle>, DerefWrapper::Iterator>>> { + OwningHandle::new_with_fn(UnsafeStableAddress(rlock), move |rlock| { + let rlock = unsafe { rlock.as_ref().expect("initialized as non-null; qed") }; + DerefWrapper(rlock.as_ref().map(f)) + }) + } +} + +impl<'a> IterationHandler for &'a DBAndColumns { + type Iterator = DBIterator<'a>; + + fn iter(&self, col: Option) -> Self::Iterator { + col.map_or_else( + || self.db.iterator(IteratorMode::Start), + |c| { + self.db + .iterator_cf(self.get_cf(c as usize), IteratorMode::Start) + .expect("iterator params are valid; qed") + }, + ) + } + + fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Self::Iterator { + col.map_or_else( + || self.db.prefix_iterator(prefix), + |c| self.db.prefix_iterator_cf(self.get_cf(c as usize), prefix).expect("iterator params are valid; qed"), + ) + } +} diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 75d2eb615..3a0905273 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -14,17 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::{cmp, collections::HashMap, error, fs, io, marker::PhantomData, mem, path::Path, result}; +mod iter; + +use std::{cmp, collections::HashMap, convert::identity, error, fs, io, mem, path::Path, result}; -use interleaved_ordered::{interleave_ordered, InterleaveOrdered}; -use parity_rocksdb::{ - BlockBasedOptions, Cache, Column, DBIterator, Direction, IteratorMode, Options, ReadOptions, Writable, WriteBatch, - WriteOptions, DB, -}; use parking_lot::{Mutex, MutexGuard, RwLock}; +use rocksdb::{ + BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Error, Options, ReadOptions, WriteBatch, WriteOptions, DB, +}; +use crate::iter::KeyValuePair; use elastic_array::ElasticArray32; use fs_swap::{swap, swap_nonatomic}; +use interleaved_ordered::interleave_ordered; use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; use log::{debug, warn}; @@ -68,7 +70,7 @@ enum KeyState { #[derive(Clone, Copy, PartialEq, Debug)] pub struct CompactionProfile { /// L0-L1 target file size - /// The mimimum size should be calculated in accordance with the + /// The minimum size should be calculated in accordance with the /// number of levels and the expected size of the database. pub initial_file_size: u64, /// block size @@ -142,7 +144,7 @@ impl CompactionProfile { /// Default profile suitable for SSD storage pub fn ssd() -> CompactionProfile { - CompactionProfile { initial_file_size: 64 * MB as u64, block_size: 8 * MB } + CompactionProfile { initial_file_size: 64 * MB as u64, block_size: 16 * KB } } /// Slow HDD compaction profile @@ -178,40 +180,31 @@ impl DatabaseConfig { /// Returns the total memory budget in bytes. pub fn memory_budget(&self) -> MiB { - if self.memory_budget.is_empty() && self.columns.is_none() { - return DB_DEFAULT_MEMORY_BUDGET_MB * MB; + match self.columns { + None => self.memory_budget.get(&None).unwrap_or(&DB_DEFAULT_MEMORY_BUDGET_MB) * MB, + Some(columns) => (0..columns) + .map(|i| self.memory_budget.get(&Some(i)).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB) + .sum(), } - (0..=self.columns.unwrap_or(0)) - .map(|i| self.memory_budget.get(&i.checked_sub(1)).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB) - .sum() } /// Returns the memory budget of the specified column in bytes. - fn memory_budget_per_col(&self, col: Option) -> MiB { - self.memory_budget.get(&col).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB + fn memory_budget_for_col(&self, col: u32) -> MiB { + self.memory_budget.get(&Some(col)).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB } // Get column family configuration with the given block based options. - fn column_config(&self, block_opts: &BlockBasedOptions, col: Option) -> io::Result { - let memory_budget_per_col = self.memory_budget_per_col(col); - let mut opts = Options::new(); - - opts.set_parsed_options("level_compaction_dynamic_level_bytes=true").map_err(other_io_err)?; + fn column_config(&self, block_opts: &BlockBasedOptions, col: u32) -> Options { + let column_mem_budget = self.memory_budget_for_col(col); + let mut opts = Options::default(); + opts.set_level_compaction_dynamic_level_bytes(true); opts.set_block_based_table_factory(block_opts); - - opts.set_parsed_options(&format!( - "block_based_table_factory={{{};{}}}", - "cache_index_and_filter_blocks=true", "pin_l0_filter_and_index_blocks_in_cache=true" - )) - .map_err(other_io_err)?; - - opts.optimize_level_style_compaction(memory_budget_per_col as i32); + opts.optimize_level_style_compaction(column_mem_budget); opts.set_target_file_size_base(self.compaction.initial_file_size); + opts.set_compression_per_level(&[]); - opts.set_parsed_options("compression_per_level=").map_err(other_io_err)?; - - Ok(opts) + opts } } @@ -227,35 +220,25 @@ impl Default for DatabaseConfig { } } -/// Database iterator (for flushed data only) -// The compromise of holding only a virtual borrow vs. holding a lock on the -// inner DB (to prevent closing via restoration) may be re-evaluated in the future. -pub struct DatabaseIterator<'a> { - iter: InterleaveOrdered<::std::vec::IntoIter<(Box<[u8]>, Box<[u8]>)>, DBIterator>, - _marker: PhantomData<&'a Database>, +struct DBAndColumns { + db: DB, + column_names: Vec, } -impl<'a> Iterator for DatabaseIterator<'a> { - type Item = (Box<[u8]>, Box<[u8]>); - - fn next(&mut self) -> Option { - self.iter.next() +impl DBAndColumns { + fn get_cf(&self, i: usize) -> &ColumnFamily { + self.db.cf_handle(&self.column_names[i]).expect("the specified column name is correct; qed") } } -struct DBAndColumns { - db: DB, - cfs: Vec, -} - /// Key-Value database. pub struct Database { db: RwLock>, config: DatabaseConfig, + path: String, write_opts: WriteOptions, read_opts: ReadOptions, block_opts: BlockBasedOptions, - path: String, // Dirty values added with `write_buffered`. Cleaned on `flush`. overlay: RwLock, KeyState>>>, // Values currently being flushed. Cleared when `flush` completes. @@ -266,9 +249,9 @@ pub struct Database { } #[inline] -fn check_for_corruption>(path: P, res: result::Result) -> io::Result { +fn check_for_corruption>(path: P, res: result::Result) -> io::Result { if let Err(ref s) = res { - if s.starts_with("Corruption:") { + if is_corrupted(s) { warn!("DB corrupted: {}. Repair will be triggered on next restart", s); let _ = fs::File::create(path.as_ref().join(Database::CORRUPTION_FILE_NAME)); } @@ -277,8 +260,52 @@ fn check_for_corruption>(path: P, res: result::Result bool { - s.starts_with("Corruption:") || s.starts_with("Invalid argument: You have to open all column families") +fn is_corrupted(err: &Error) -> bool { + err.as_ref().starts_with("Corruption:") + || err.as_ref().starts_with("Invalid argument: You have to open all column families") +} + +/// Generate the options for RocksDB, based on the given `DatabaseConfig`. +fn generate_options(config: &DatabaseConfig) -> Options { + let mut opts = Options::default(); + let columns = config.columns.unwrap_or(0); + + if columns == 0 { + let budget = config.memory_budget() / 2; + opts.set_db_write_buffer_size(budget); + // from https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#memtable + // Memtable size is controlled by the option `write_buffer_size`. + // If you increase your memtable size, be sure to also increase your L1 size! + // L1 size is controlled by the option `max_bytes_for_level_base`. + opts.set_max_bytes_for_level_base(budget as u64); + } + + opts.set_use_fsync(false); + opts.create_if_missing(true); + opts.set_max_open_files(config.max_open_files); + opts.set_bytes_per_sync(1 * MB as u64); + opts.set_keep_log_file_num(1); + opts.increase_parallelism(cmp::max(1, num_cpus::get() as i32 / 2)); + + opts +} + +/// Generate the block based options for RocksDB, based on the given `DatabaseConfig`. +fn generate_block_based_options(config: &DatabaseConfig) -> BlockBasedOptions { + let mut block_opts = BlockBasedOptions::default(); + block_opts.set_block_size(config.compaction.block_size); + // Set cache size as recommended by + // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#block-cache-size + let cache_size = config.memory_budget() / 3; + block_opts.set_lru_cache(cache_size); + // "index and filter blocks will be stored in block cache, together with all other data blocks." + // See: https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#indexes-and-filter-blocks + block_opts.set_cache_index_and_filter_blocks(true); + // Don't evict L0 filter/index blocks from the cache + block_opts.set_pin_l0_filter_and_index_blocks_in_cache(true); + block_opts.set_bloom_filter(10, true); + + block_opts } impl Database { @@ -291,36 +318,12 @@ impl Database { /// Open database file. Creates if it does not exist. pub fn open(config: &DatabaseConfig, path: &str) -> io::Result { - let mut opts = Options::new(); - - opts.set_use_fsync(false); - opts.create_if_missing(true); - opts.set_max_open_files(config.max_open_files); - opts.set_parsed_options(&format!("keep_log_file_num={}", config.keep_log_file_num)).map_err(other_io_err)?; - opts.set_parsed_options("bytes_per_sync=1048576").map_err(other_io_err)?; - + let opts = generate_options(config); + let block_opts = generate_block_based_options(config); let columns = config.columns.unwrap_or(0); - if columns == 0 { - let budget = config.memory_budget() / 2; - opts.set_db_write_buffer_size(budget); - // from https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#memtable - // Memtable size is controlled by the option `write_buffer_size`. - // If you increase your memtable size, be sure to also increase your L1 size! - // L1 size is controlled by the option `max_bytes_for_level_base`. - opts.set_parsed_options(&format!("max_bytes_for_level_base={}", budget)).map_err(other_io_err)?; - } - opts.increase_parallelism(cmp::max(1, num_cpus::get() as i32 / 2)); - - let mut block_opts = BlockBasedOptions::new(); - - { - block_opts.set_block_size(config.compaction.block_size); - // Set cache size as recommended by - // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#block-cache-size - let cache_size = config.memory_budget() / 3; - let cache = Cache::new(cache_size); - block_opts.set_cache(cache); + if config.columns.is_some() && config.memory_budget.contains_key(&None) { + warn!("Memory budget for the default column (None) is ignored if columns.is_some()"); } // attempt database repair if it has been previously marked as corrupted @@ -331,47 +334,36 @@ impl Database { fs::remove_file(db_corrupted)?; } - let mut cf_options = Vec::with_capacity(columns as usize); - let cfnames: Vec<_> = (0..columns).map(|c| format!("col{}", c)).collect(); - let cfnames: Vec<&str> = cfnames.iter().map(|n| n as &str).collect(); - - for i in 0..columns { - cf_options.push(config.column_config(&block_opts, Some(i))?); - } + let column_names: Vec<_> = (0..columns).map(|c| format!("col{}", c)).collect(); - let write_opts = WriteOptions::new(); - let mut read_opts = ReadOptions::new(); + let write_opts = WriteOptions::default(); + let mut read_opts = ReadOptions::default(); read_opts.set_verify_checksums(false); - let mut cfs: Vec = Vec::new(); - let db = match config.columns { - Some(_) => { - match DB::open_cf(&opts, path, &cfnames, &cf_options) { - Ok(db) => { - cfs = cfnames - .iter() - .map(|n| db.cf_handle(n).expect("rocksdb opens a cf_handle for each cfname; qed")) - .collect(); - Ok(db) - } - Err(_) => { - // retry and create CFs - match DB::open_cf(&opts, path, &[], &[]) { - Ok(mut db) => { - cfs = cfnames - .iter() - .enumerate() - .map(|(i, n)| db.create_cf(n, &cf_options[i])) - .collect::<::std::result::Result<_, _>>() + let db = if config.columns.is_some() { + let cf_descriptors: Vec<_> = (0..columns) + .map(|i| ColumnFamilyDescriptor::new(&column_names[i as usize], config.column_config(&block_opts, i))) + .collect(); + + match DB::open_cf_descriptors(&opts, path, cf_descriptors) { + Err(_) => { + // retry and create CFs + match DB::open_cf(&opts, path, &[] as &[&str]) { + Ok(mut db) => { + for (i, name) in column_names.iter().enumerate() { + let _ = db + .create_cf(name, &config.column_config(&block_opts, i as u32)) .map_err(other_io_err)?; - Ok(db) } - err => err, + Ok(db) } + err => err, } } + ok => ok, } - None => DB::open(&opts, path), + } else { + DB::open(&opts, path) }; let db = match db { @@ -380,29 +372,29 @@ impl Database { warn!("DB corrupted: {}, attempting repair", s); DB::repair(&opts, path).map_err(other_io_err)?; - if cfnames.is_empty() { - DB::open(&opts, path).map_err(other_io_err)? - } else { - let db = DB::open_cf(&opts, path, &cfnames, &cf_options).map_err(other_io_err)?; - cfs = cfnames - .iter() - .map(|n| db.cf_handle(n).expect("rocksdb opens a cf_handle for each cfname; qed")) + if config.columns.is_some() { + let cf_descriptors: Vec<_> = (0..columns) + .map(|i| { + ColumnFamilyDescriptor::new(&column_names[i as usize], config.column_config(&block_opts, i)) + }) .collect(); - db + + DB::open_cf_descriptors(&opts, path, cf_descriptors).map_err(other_io_err)? + } else { + DB::open(&opts, path).map_err(other_io_err)? } } Err(s) => return Err(other_io_err(s)), }; - let num_cols = cfs.len(); Ok(Database { - db: RwLock::new(Some(DBAndColumns { db, cfs })), + db: RwLock::new(Some(DBAndColumns { db, column_names })), config: config.clone(), - write_opts, - overlay: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), - flushing: RwLock::new((0..(num_cols + 1)).map(|_| HashMap::new()).collect()), + overlay: RwLock::new((0..=columns).map(|_| HashMap::new()).collect()), + flushing: RwLock::new((0..=columns).map(|_| HashMap::new()).collect()), flushing_lock: Mutex::new(false), path: path.to_owned(), read_opts, + write_opts, block_opts, }) } @@ -437,8 +429,8 @@ impl Database { /// Commit buffered changes to database. Must be called under `flush_lock` fn write_flushing_with_lock(&self, _lock: &mut MutexGuard<'_, bool>) -> io::Result<()> { match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let batch = WriteBatch::new(); + Some(ref cfs) => { + let mut batch = WriteBatch::default(); mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write()); { for (c, column) in self.flushing.read().iter().enumerate() { @@ -446,14 +438,16 @@ impl Database { match *state { KeyState::Delete => { if c > 0 { - batch.delete_cf(cfs[c - 1], key).map_err(other_io_err)?; + let cf = cfs.get_cf(c - 1); + batch.delete_cf(cf, key).map_err(other_io_err)?; } else { batch.delete(key).map_err(other_io_err)?; } } KeyState::Insert(ref value) => { if c > 0 { - batch.put_cf(cfs[c - 1], key, value).map_err(other_io_err)?; + let cf = cfs.get_cf(c - 1); + batch.put_cf(cf, key, value).map_err(other_io_err)?; } else { batch.put(key, value).map_err(other_io_err)?; } @@ -463,7 +457,7 @@ impl Database { } } - check_for_corruption(&self.path, db.write_opt(batch, &self.write_opts))?; + check_for_corruption(&self.path, cfs.db.write_opt(batch, &self.write_opts))?; for column in self.flushing.write().iter_mut() { column.clear(); @@ -493,8 +487,8 @@ impl Database { /// Commit transaction to database. pub fn write(&self, tr: DBTransaction) -> io::Result<()> { match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let batch = WriteBatch::new(); + Some(ref cfs) => { + let mut batch = WriteBatch::default(); let ops = tr.ops; for op in ops { // remove any buffered operation for this key @@ -503,16 +497,16 @@ impl Database { match op { DBOp::Insert { col, key, value } => match col { None => batch.put(&key, &value).map_err(other_io_err)?, - Some(c) => batch.put_cf(cfs[c as usize], &key, &value).map_err(other_io_err)?, + Some(c) => batch.put_cf(cfs.get_cf(c as usize), &key, &value).map_err(other_io_err)?, }, DBOp::Delete { col, key } => match col { None => batch.delete(&key).map_err(other_io_err)?, - Some(c) => batch.delete_cf(cfs[c as usize], &key).map_err(other_io_err)?, + Some(c) => batch.delete_cf(cfs.get_cf(c as usize), &key).map_err(other_io_err)?, }, } } - check_for_corruption(&self.path, db.write_opt(batch, &self.write_opts)) + check_for_corruption(&self.path, cfs.db.write_opt(batch, &self.write_opts)) } None => Err(other_io_err("Database is closed")), } @@ -521,7 +515,7 @@ impl Database { /// Get value by key. pub fn get(&self, col: Option, key: &[u8]) -> io::Result> { match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { + Some(ref cfs) => { let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; match overlay.get(key) { Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), @@ -533,9 +527,10 @@ impl Database { Some(&KeyState::Delete) => Ok(None), None => col .map_or_else( - || db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))), + || cfs.db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))), |c| { - db.get_cf_opt(cfs[c as usize], key, &self.read_opts) + cfs.db + .get_cf_opt(cfs.get_cf(c as usize), key, &self.read_opts) .map(|r| r.map(|v| DBValue::from_slice(&v))) }, ) @@ -551,26 +546,18 @@ impl Database { /// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values. // TODO: support prefix seek for unflushed data pub fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { - self.iter_from_prefix(col, prefix).and_then(|mut iter| { - match iter.next() { - // TODO: use prefix_same_as_start read option (not available in C API currently) - Some((k, v)) => { - if k[0..prefix.len()] == prefix[..] { - Some(v) - } else { - None - } - } - _ => None, - } - }) + self.iter_from_prefix(col, prefix).next().map(|(_, v)| v) } /// Get database iterator for flushed data. - pub fn iter(&self, col: Option) -> Option> { - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; + /// Will hold a lock until the iterator is dropped + /// preventing the database from being closed. + pub fn iter<'a>(&'a self, col: Option) -> impl Iterator + 'a { + let read_lock = self.db.read(); + let optional = if read_lock.is_some() { + let c = Self::to_overlay_column(col); + let overlay_data = { + let overlay = &self.overlay.read()[c]; let mut overlay_data = overlay .iter() .filter_map(|(k, v)| match *v { @@ -581,40 +568,33 @@ impl Database { }) .collect::>(); overlay_data.sort(); + overlay_data + }; - let iter = col.map_or_else( - || db.iterator_opt(IteratorMode::Start, &self.read_opts), - |c| { - db.iterator_cf_opt(cfs[c as usize], IteratorMode::Start, &self.read_opts) - .expect("iterator params are valid; qed") - }, - ); - - Some(DatabaseIterator { iter: interleave_ordered(overlay_data, iter), _marker: PhantomData }) - } - None => None, - } + let guarded = iter::ReadGuardedIterator::new(read_lock, col); + Some(interleave_ordered(overlay_data, guarded)) + } else { + None + }; + optional.into_iter().flat_map(identity) } - - fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Option> { - match *self.db.read() { - Some(DBAndColumns { ref db, ref cfs }) => { - let iter = col.map_or_else( - || db.iterator_opt(IteratorMode::From(prefix, Direction::Forward), &self.read_opts), - |c| { - db.iterator_cf_opt( - cfs[c as usize], - IteratorMode::From(prefix, Direction::Forward), - &self.read_opts, - ) - .expect("iterator params are valid; qed") - }, - ); - - Some(DatabaseIterator { iter: interleave_ordered(Vec::new(), iter), _marker: PhantomData }) - } - None => None, - } + /// Get database iterator from prefix for flushed data. + /// Will hold a lock until the iterator is dropped + /// preventing the database from being closed. + fn iter_from_prefix<'a>( + &'a self, + col: Option, + prefix: &'a [u8], + ) -> impl Iterator + 'a { + let read_lock = self.db.read(); + let optional = if read_lock.is_some() { + let guarded = iter::ReadGuardedIterator::new_from_prefix(read_lock, col, prefix); + Some(interleave_ordered(Vec::new(), guarded)) + } else { + None + }; + // workaround for https://github.com/facebook/rocksdb/issues/2343 + optional.into_iter().flat_map(identity).filter(move |(k, _)| k.starts_with(prefix)) } /// Close the database @@ -665,7 +645,7 @@ impl Database { self.db .read() .as_ref() - .and_then(|db| if db.cfs.is_empty() { None } else { Some(db.cfs.len()) }) + .and_then(|db| if db.column_names.is_empty() { None } else { Some(db.column_names.len()) }) .map(|n| n as u32) .unwrap_or(0) } @@ -673,9 +653,8 @@ impl Database { /// Drop a column family. pub fn drop_column(&self) -> io::Result<()> { match *self.db.write() { - Some(DBAndColumns { ref mut db, ref mut cfs }) => { - if let Some(_col) = cfs.pop() { - let name = format!("col{}", cfs.len()); + Some(DBAndColumns { ref mut db, ref mut column_names }) => { + if let Some(name) = column_names.pop() { db.drop_cf(&name).map_err(other_io_err)?; } Ok(()) @@ -687,11 +666,12 @@ impl Database { /// Add a column family. pub fn add_column(&self) -> io::Result<()> { match *self.db.write() { - Some(DBAndColumns { ref mut db, ref mut cfs }) => { - let col = cfs.len(); + Some(DBAndColumns { ref mut db, ref mut column_names }) => { + let col = column_names.len() as u32; let name = format!("col{}", col); - let col_config = self.config.column_config(&self.block_opts, Some(col as u32))?; - cfs.push(db.create_cf(&name, &col_config).map_err(other_io_err)?); + let col_config = self.config.column_config(&self.block_opts, col as u32); + let _ = db.create_cf(&name, &col_config).map_err(other_io_err)?; + column_names.push(name); Ok(()) } None => Ok(()), @@ -722,18 +702,18 @@ impl KeyValueDB for Database { Database::flush(self) } - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, col: Option) -> Box + 'a> { let unboxed = Database::iter(self, col); - Box::new(unboxed.into_iter().flat_map(|inner| inner)) + Box::new(unboxed.into_iter()) } fn iter_from_prefix<'a>( &'a self, col: Option, prefix: &'a [u8], - ) -> Box, Box<[u8]>)> + 'a> { + ) -> Box + 'a> { let unboxed = Database::iter_from_prefix(self, col, prefix); - Box::new(unboxed.into_iter().flat_map(|inner| inner)) + Box::new(unboxed.into_iter()) } fn restore(&self, new_db: &str) -> io::Result<()> { @@ -752,6 +732,7 @@ impl Drop for Database { mod tests { use super::*; use ethereum_types::H256; + use std::io::Read; use std::str::FromStr; use tempdir::TempDir; @@ -760,22 +741,32 @@ mod tests { let db = Database::open(config, tempdir.path().to_str().unwrap()).unwrap(); let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); + let key3 = H256::from_str("04c00000000b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); + let key4 = H256::from_str("04c01111110b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); + let key5 = H256::from_str("04c02222220b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); let mut batch = db.transaction(); batch.put(None, key1.as_bytes(), b"cat"); batch.put(None, key2.as_bytes(), b"dog"); + batch.put(None, key3.as_bytes(), b"caterpillar"); + batch.put(None, key4.as_bytes(), b"beef"); + batch.put(None, key5.as_bytes(), b"fish"); db.write(batch).unwrap(); assert_eq!(&*db.get(None, key1.as_bytes()).unwrap().unwrap(), b"cat"); - let contents: Vec<_> = db.iter(None).into_iter().flat_map(|inner| inner).collect(); - assert_eq!(contents.len(), 2); + let contents: Vec<_> = db.iter(None).into_iter().collect(); + assert_eq!(contents.len(), 5); assert_eq!(&*contents[0].0, key1.as_bytes()); assert_eq!(&*contents[0].1, b"cat"); assert_eq!(&*contents[1].0, key2.as_bytes()); assert_eq!(&*contents[1].1, b"dog"); + let mut prefix_iter = db.iter_from_prefix(None, &[0x04, 0xc0]); + assert_eq!(*prefix_iter.next().unwrap().1, b"caterpillar"[..]); + assert_eq!(*prefix_iter.next().unwrap().1, b"beef"[..]); + assert_eq!(*prefix_iter.next().unwrap().1, b"fish"[..]); + let mut batch = db.transaction(); batch.delete(None, key1.as_bytes()); db.write(batch).unwrap(); @@ -861,11 +852,11 @@ mod tests { let config = DatabaseConfig::default(); let config_5 = DatabaseConfig::with_columns(Some(5)); - let tempdir = TempDir::new("").unwrap(); + let tempdir = TempDir::new("drop_columns").unwrap(); // open 5, remove all. { - let db = Database::open(&config_5, tempdir.path().to_str().unwrap()).unwrap(); + let db = Database::open(&config_5, tempdir.path().to_str().unwrap()).expect("open with 5 columns"); assert_eq!(db.num_columns(), 5); for i in (0..5).rev() { @@ -881,6 +872,55 @@ mod tests { } } + #[test] + fn test_iter_by_prefix() { + let tempdir = TempDir::new("").unwrap(); + let config = DatabaseConfig::default(); + let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); + + let key1 = b"0"; + let key2 = b"ab"; + let key3 = b"abc"; + let key4 = b"abcd"; + + let mut batch = db.transaction(); + batch.put(None, key1, key1); + batch.put(None, key2, key2); + batch.put(None, key3, key3); + batch.put(None, key4, key4); + db.write(batch).unwrap(); + + // empty prefix + let contents: Vec<_> = db.iter_from_prefix(None, b"").into_iter().collect(); + assert_eq!(contents.len(), 4); + assert_eq!(&*contents[0].0, key1); + assert_eq!(&*contents[1].0, key2); + assert_eq!(&*contents[2].0, key3); + assert_eq!(&*contents[3].0, key4); + + // prefix a + let contents: Vec<_> = db.iter_from_prefix(None, b"a").into_iter().collect(); + assert_eq!(contents.len(), 3); + assert_eq!(&*contents[0].0, key2); + assert_eq!(&*contents[1].0, key3); + assert_eq!(&*contents[2].0, key4); + + // prefix abc + let contents: Vec<_> = db.iter_from_prefix(None, b"abc").into_iter().collect(); + assert_eq!(contents.len(), 2); + assert_eq!(&*contents[0].0, key3); + assert_eq!(&*contents[1].0, key4); + + // prefix abcde + let contents: Vec<_> = db.iter_from_prefix(None, b"abcde").into_iter().collect(); + assert_eq!(contents.len(), 0); + + // prefix 0 + let contents: Vec<_> = db.iter_from_prefix(None, b"0").into_iter().collect(); + assert_eq!(contents.len(), 1); + assert_eq!(&*contents[0].0, key1); + } + #[test] fn write_clears_buffered_ops() { let tempdir = TempDir::new("").unwrap(); @@ -897,4 +937,94 @@ mod tests { assert_eq!(db.get(None, b"foo").unwrap().unwrap().as_ref(), b"baz"); } + + #[test] + fn default_memory_budget() { + let c = DatabaseConfig::default(); + assert_eq!(c.columns, None); + assert_eq!(c.memory_budget(), DB_DEFAULT_MEMORY_BUDGET_MB * MB, "total memory budget is default"); + assert_eq!( + c.memory_budget_for_col(0), + DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB * MB, + "total memory budget for column 0 is the default" + ); + assert_eq!( + c.memory_budget_for_col(999), + DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB * MB, + "total memory budget for any column is the default" + ); + } + + #[test] + fn memory_budget() { + let mut c = DatabaseConfig::with_columns(Some(3)); + c.memory_budget = [(0, 10), (1, 15), (2, 20)].iter().cloned().map(|(c, b)| (Some(c), b)).collect(); + assert_eq!(c.memory_budget(), 45 * MB, "total budget is the sum of the column budget"); + } + + #[test] + fn rocksdb_settings() { + const NUM_COLS: usize = 2; + let mut cfg = DatabaseConfig::with_columns(Some(NUM_COLS as u32)); + cfg.max_open_files = 123; // is capped by the OS fd limit (typically 1024) + cfg.compaction.block_size = 323232; + cfg.compaction.initial_file_size = 102030; + cfg.memory_budget = [(0, 30), (1, 300)].iter().cloned().map(|(c, b)| (Some(c), b)).collect(); + + let db_path = TempDir::new("config_test").expect("the OS can create tmp dirs"); + let _db = Database::open(&cfg, db_path.path().to_str().unwrap()).expect("can open a db"); + let mut rocksdb_log = std::fs::File::open(format!("{}/LOG", db_path.path().to_str().unwrap())) + .expect("rocksdb creates a LOG file"); + let mut settings = String::new(); + rocksdb_log.read_to_string(&mut settings).unwrap(); + // Check column count + assert!(settings.contains("Options for column family [default]"), "no default col"); + assert!(settings.contains("Options for column family [col0]"), "no col0"); + assert!(settings.contains("Options for column family [col1]"), "no col1"); + + // Check max_open_files + assert!(settings.contains("max_open_files: 123")); + + // Check block size + assert!(settings.contains(" block_size: 323232")); + + // LRU cache (default column) + assert!(settings.contains("block_cache_options:\n capacity : 8388608")); + // LRU cache for non-default columns is ⅓ of memory budget (including default column) + let lru_size = (330 * MB) / 3; + let needle = format!("block_cache_options:\n capacity : {}", lru_size); + let lru = settings.match_indices(&needle).collect::>().len(); + assert_eq!(lru, NUM_COLS); + + // Index/filters share cache + let include_indexes = settings.matches("cache_index_and_filter_blocks: 1").collect::>().len(); + assert_eq!(include_indexes, NUM_COLS); + // Pin index/filters on L0 + let pins = settings.matches("pin_l0_filter_and_index_blocks_in_cache: 1").collect::>().len(); + assert_eq!(pins, NUM_COLS); + + // Check target file size, aka initial file size + let l0_sizes = settings.matches("target_file_size_base: 102030").collect::>().len(); + assert_eq!(l0_sizes, NUM_COLS); + // The default column uses the default of 64Mb regardless of the setting. + assert!(settings.contains("target_file_size_base: 67108864")); + + // Check compression settings + let snappy_compression = settings.matches("Options.compression: Snappy").collect::>().len(); + // All columns use Snappy + assert_eq!(snappy_compression, NUM_COLS + 1); + // …even for L7 + let snappy_bottommost = settings.matches("Options.bottommost_compression: Disabled").collect::>().len(); + assert_eq!(snappy_bottommost, NUM_COLS + 1); + + // 7 levels + let levels = settings.matches("Options.num_levels: 7").collect::>().len(); + assert_eq!(levels, NUM_COLS + 1); + + // Don't fsync every store + assert!(settings.contains("Options.use_fsync: 0")); + + // We're using the old format + assert!(settings.contains("format_version: 2")); + } } From 2c26418244653e4a0134f1dfaf5a6079fa5589cf Mon Sep 17 00:00:00 2001 From: David Date: Thu, 28 Nov 2019 15:56:35 +0100 Subject: [PATCH 038/359] [kvdb-rocksdb] Release 0.2 (#273) --- kvdb-rocksdb/CHANGELOG.md | 6 ++++++ kvdb-rocksdb/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index cc514b497..7e6565d65 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,12 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.2.0] - 2019-11-28 +- Switched away from using [parity-rocksdb](https://crates.io/crates/parity-rocksdb) in favour of upstream [rust-rocksdb](https://crates.io/crates/rocksdb) (see [PR #257](https://github.com/paritytech/parity-common/pull/257) for details) +- Revamped configuration handling, allowing per-column memory budgeting (see [PR #256](https://github.com/paritytech/parity-common/pull/256) for details) +### Dependencies +- rust-rocksdb v0.13 + ## [0.1.6] - 2019-10-24 - Updated to 2018 edition idioms (https://github.com/paritytech/parity-common/pull/237) ### Dependencies diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index bbf2e42cb..c01b51c6a 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.1.6" +version = "0.2.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by rocksDB" From a09549015d82d4db556cb7e3b07004e8a515ec94 Mon Sep 17 00:00:00 2001 From: David Date: Thu, 5 Dec 2019 10:52:58 +0100 Subject: [PATCH 039/359] [kvdb-rocksdb] Use "pinned" gets to avoid allocations (#274) * Use "pinned" gets to avoid allocations Needs benchmarks to prove it actually matters. * Fix test * Rename `get_colf` to just `cf` Add todos to measure `#[inline]` * Formatting * Using #[inline] does not help read perf * Add Changelog * Update CHANGELOG.md --- kvdb-rocksdb/CHANGELOG.md | 3 +++ kvdb-rocksdb/src/iter.rs | 8 ++------ kvdb-rocksdb/src/lib.rs | 28 ++++++++++++++++------------ 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 7e6565d65..d673404cb 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,6 +5,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- use `get_pinned` API to save one allocation for each call to `get()` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) +- rename `drop_column` to `remove_last_column` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) +- rename `get_cf` to `cf` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) ## [0.2.0] - 2019-11-28 - Switched away from using [parity-rocksdb](https://crates.io/crates/parity-rocksdb) in favour of upstream [rust-rocksdb](https://crates.io/crates/rocksdb) (see [PR #257](https://github.com/paritytech/parity-common/pull/257) for details) diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs index 52934e1a8..079563aad 100644 --- a/kvdb-rocksdb/src/iter.rs +++ b/kvdb-rocksdb/src/iter.rs @@ -111,18 +111,14 @@ impl<'a> IterationHandler for &'a DBAndColumns { fn iter(&self, col: Option) -> Self::Iterator { col.map_or_else( || self.db.iterator(IteratorMode::Start), - |c| { - self.db - .iterator_cf(self.get_cf(c as usize), IteratorMode::Start) - .expect("iterator params are valid; qed") - }, + |c| self.db.iterator_cf(self.cf(c as usize), IteratorMode::Start).expect("iterator params are valid; qed"), ) } fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Self::Iterator { col.map_or_else( || self.db.prefix_iterator(prefix), - |c| self.db.prefix_iterator_cf(self.get_cf(c as usize), prefix).expect("iterator params are valid; qed"), + |c| self.db.prefix_iterator_cf(self.cf(c as usize), prefix).expect("iterator params are valid; qed"), ) } } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 3a0905273..df7b9f833 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -226,7 +226,7 @@ struct DBAndColumns { } impl DBAndColumns { - fn get_cf(&self, i: usize) -> &ColumnFamily { + fn cf(&self, i: usize) -> &ColumnFamily { self.db.cf_handle(&self.column_names[i]).expect("the specified column name is correct; qed") } } @@ -438,7 +438,7 @@ impl Database { match *state { KeyState::Delete => { if c > 0 { - let cf = cfs.get_cf(c - 1); + let cf = cfs.cf(c - 1); batch.delete_cf(cf, key).map_err(other_io_err)?; } else { batch.delete(key).map_err(other_io_err)?; @@ -446,7 +446,7 @@ impl Database { } KeyState::Insert(ref value) => { if c > 0 { - let cf = cfs.get_cf(c - 1); + let cf = cfs.cf(c - 1); batch.put_cf(cf, key, value).map_err(other_io_err)?; } else { batch.put(key, value).map_err(other_io_err)?; @@ -497,11 +497,11 @@ impl Database { match op { DBOp::Insert { col, key, value } => match col { None => batch.put(&key, &value).map_err(other_io_err)?, - Some(c) => batch.put_cf(cfs.get_cf(c as usize), &key, &value).map_err(other_io_err)?, + Some(c) => batch.put_cf(cfs.cf(c as usize), &key, &value).map_err(other_io_err)?, }, DBOp::Delete { col, key } => match col { None => batch.delete(&key).map_err(other_io_err)?, - Some(c) => batch.delete_cf(cfs.get_cf(c as usize), &key).map_err(other_io_err)?, + Some(c) => batch.delete_cf(cfs.cf(c as usize), &key).map_err(other_io_err)?, }, } } @@ -527,10 +527,14 @@ impl Database { Some(&KeyState::Delete) => Ok(None), None => col .map_or_else( - || cfs.db.get_opt(key, &self.read_opts).map(|r| r.map(|v| DBValue::from_slice(&v))), + || { + cfs.db + .get_pinned_opt(key, &self.read_opts) + .map(|r| r.map(|v| DBValue::from_slice(&v))) + }, |c| { cfs.db - .get_cf_opt(cfs.get_cf(c as usize), key, &self.read_opts) + .get_pinned_cf_opt(cfs.cf(c as usize), key, &self.read_opts) .map(|r| r.map(|v| DBValue::from_slice(&v))) }, ) @@ -650,8 +654,8 @@ impl Database { .unwrap_or(0) } - /// Drop a column family. - pub fn drop_column(&self) -> io::Result<()> { + /// Remove the last column family in the database. The deletion is definitive. + pub fn remove_last_column(&self) -> io::Result<()> { match *self.db.write() { Some(DBAndColumns { ref mut db, ref mut column_names }) => { if let Some(name) = column_names.pop() { @@ -663,7 +667,7 @@ impl Database { } } - /// Add a column family. + /// Add a new column family to the DB. pub fn add_column(&self) -> io::Result<()> { match *self.db.write() { Some(DBAndColumns { ref mut db, ref mut column_names }) => { @@ -848,7 +852,7 @@ mod tests { } #[test] - fn drop_columns() { + fn remove_columns() { let config = DatabaseConfig::default(); let config_5 = DatabaseConfig::with_columns(Some(5)); @@ -860,7 +864,7 @@ mod tests { assert_eq!(db.num_columns(), 5); for i in (0..5).rev() { - db.drop_column().unwrap(); + db.remove_last_column().unwrap(); assert_eq!(db.num_columns(), i); } } From b5dc4108f0711c1227ba6e37ecd23b12538c5189 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 9 Dec 2019 13:09:05 +0300 Subject: [PATCH 040/359] Compile triehash for no_std (#280) * compile triehash for no_std * up minor version * removed obsolete feature * typo * upd CHANGELOG * std by default * no_std prelude * style fixes --- triehash/CHANGELOG.md | 2 +- triehash/Cargo.toml | 13 ++++++++++--- triehash/src/lib.rs | 22 +++++++++++++++++++--- 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/triehash/CHANGELOG.md b/triehash/CHANGELOG.md index 67629508f..c8b6fd2be 100644 --- a/triehash/CHANGELOG.md +++ b/triehash/CHANGELOG.md @@ -5,7 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] - +- Added no-std support (https://github.com/paritytech/parity-common/pull/280) ## [0.8.1] - 2019-10-24 - Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/214) ### Dependencies diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index 701aae36b..a941edaa0 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "triehash" -version = "0.8.1" +version = "0.8.2" authors = ["Parity Technologies "] description = "In-memory patricia trie operations" repository = "https://github.com/paritytech/parity-common" @@ -8,8 +8,8 @@ license = "GPL-3.0" edition = "2018" [dependencies] -hash-db = "0.15.2" -rlp = { version = "0.4", path = "../rlp" } +hash-db = { version = "0.15.2", default-features = false } +rlp = { version = "0.4", path = "../rlp", default-features = false } [dev-dependencies] criterion = "0.3.0" @@ -19,6 +19,13 @@ tiny-keccak = { version = "2.0", features = ["keccak"] } trie-standardmap = "0.15.2" hex-literal = "0.2.1" +[features] +default = ["std"] +std = [ + "hash-db/std", + "rlp/std", +] + [[bench]] name = "triehash" path = "benches/triehash.rs" diff --git a/triehash/src/lib.rs b/triehash/src/lib.rs index 41b2a0d17..964e7e14f 100644 --- a/triehash/src/lib.rs +++ b/triehash/src/lib.rs @@ -18,9 +18,25 @@ //! //! This module should be used to generate trie root hash. -use std::cmp; -use std::collections::BTreeMap; -use std::iter::once; +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +extern crate alloc; + +#[cfg(feature = "std")] +mod rstd { + pub use std::collections::BTreeMap; +} + +#[cfg(not(feature = "std"))] +mod rstd { + pub use alloc::collections::BTreeMap; + pub use alloc::vec::Vec; +} + +use core::cmp; +use core::iter::once; +use rstd::*; use hash_db::Hasher; use rlp::RlpStream; From a6058e9eea2baeec4f43309d6582330845c489ad Mon Sep 17 00:00:00 2001 From: David Date: Tue, 10 Dec 2019 19:54:49 +0100 Subject: [PATCH 041/359] [kvdb-rocksdb] Add benchmark for point lookups (#275) * [kvdb-rocksdb] Add benchmark for point lookups * Document variability and how it influences allocations/iter Add iter benchmark * Add benchmarks for get_by_prefix and getting a single item off an iterator * Bump alloc_counter * More docs and notes * review grumble * review grumbles --- kvdb-rocksdb/Cargo.toml | 9 +- kvdb-rocksdb/benches/.gitignore | 1 + kvdb-rocksdb/benches/bench_read_perf.rs | 212 ++++++++++++++++++++++++ 3 files changed, 221 insertions(+), 1 deletion(-) create mode 100644 kvdb-rocksdb/benches/.gitignore create mode 100644 kvdb-rocksdb/benches/bench_read_perf.rs diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index c01b51c6a..2049da4ba 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -7,6 +7,10 @@ description = "kvdb implementation backed by rocksDB" license = "GPL-3.0" edition = "2018" +[[bench]] +name = "bench_read_perf" +harness = false + [dependencies] elastic-array = "0.10.2" fs-swap = "0.2.4" @@ -20,5 +24,8 @@ rocksdb = { version = "0.13", features = ["snappy"], default-features = false } owning_ref = "0.4.0" [dev-dependencies] -tempdir = "0.3.7" +alloc_counter = "0.0.4" +criterion = "0.3" ethereum-types = { version = "0.8.0", path = "../ethereum-types" } +rand = "0.7.2" +tempdir = "0.3.7" diff --git a/kvdb-rocksdb/benches/.gitignore b/kvdb-rocksdb/benches/.gitignore new file mode 100644 index 000000000..85954e328 --- /dev/null +++ b/kvdb-rocksdb/benches/.gitignore @@ -0,0 +1 @@ +_rocksdb_bench_get diff --git a/kvdb-rocksdb/benches/bench_read_perf.rs b/kvdb-rocksdb/benches/bench_read_perf.rs new file mode 100644 index 000000000..350fdf2eb --- /dev/null +++ b/kvdb-rocksdb/benches/bench_read_perf.rs @@ -0,0 +1,212 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Benchmark RocksDB read performance. +//! The benchmark setup consists in writing `NEEDLES * NEEDLES_TO_HAYSTACK_RATIO` 32-bytes random +//! keys with random values 150 +/- 30 bytes long. With 10 000 keys and a ratio of 100 we get one +//! million keys; ideally the db should be deleted for each benchmark run but in practice it has +//! little impact on the performance numbers for these small database sizes. +//! Allocations (on the Rust side) are counted and printed. +//! +//! Note that this benchmark is not a good way to measure the performance of the database itself; +//! its purpose is to be a tool to gauge the performance of the glue code, or work as a starting point +//! for a more elaborate benchmark of a specific workload. + +const NEEDLES: usize = 10_000; +const NEEDLES_TO_HAYSTACK_RATIO: usize = 100; + +use std::io; +use std::time::{Duration, Instant}; + +use alloc_counter::{count_alloc, AllocCounterSystem}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use ethereum_types::H256; +use rand::{distributions::Uniform, seq::SliceRandom, Rng}; + +use kvdb_rocksdb::{Database, DatabaseConfig}; + +#[global_allocator] +static A: AllocCounterSystem = AllocCounterSystem; + +criterion_group!(benches, get, iter); +criterion_main!(benches); + +/// Opens (or creates) a RocksDB database in the `benches/` folder of the crate with one column +/// family and default options. Needs manual cleanup. +fn open_db() -> Database { + let tempdir_str = "./benches/_rocksdb_bench_get"; + let cfg = DatabaseConfig::with_columns(Some(1)); + let db = Database::open(&cfg, tempdir_str).expect("rocksdb works"); + db +} + +/// Generate `n` random bytes +/- 20%. +/// The variability in the payload size lets us simulate payload allocation patterns: `DBValue` is +/// an `ElasticArray128` so sometimes we save on allocations. +fn n_random_bytes(n: usize) -> Vec { + let mut rng = rand::thread_rng(); + let variability: i64 = rng.gen_range(0, (n / 5) as i64); + let plus_or_minus: i64 = if variability % 2 == 0 { 1 } else { -1 }; + let range = Uniform::from(0..u8::max_value()); + rng.sample_iter(&range).take((n as i64 + plus_or_minus * variability) as usize).collect() +} + +/// Writes `NEEDLES * NEEDLES_TO_HAYSTACK_RATIO` keys to the DB. Keys are random, 32 bytes long and +/// values are random, 120-180 bytes long. Every `NEEDLES_TO_HAYSTACK_RATIO` keys are kept and +/// returned in a `Vec` for and used to benchmark point lookup performance. Keys are sorted +/// lexicographically in the DB, and the benchmark keys are random bytes making the needles are +/// effectively random points in the key set. +fn populate(db: &Database) -> io::Result> { + let mut needles = Vec::with_capacity(NEEDLES); + let mut batch = db.transaction(); + for i in 0..NEEDLES * NEEDLES_TO_HAYSTACK_RATIO { + let key = H256::random(); + if i % NEEDLES_TO_HAYSTACK_RATIO == 0 { + needles.push(key.clone()); + if i % 100_000 == 0 && i > 0 { + println!("[populate] {} keys", i); + } + } + // In ethereum keys are mostly 32 bytes and payloads ~140bytes. + batch.put(Some(0), &key.as_bytes(), &n_random_bytes(140)); + } + db.write(batch)?; + // Clear the overlay + db.flush()?; + Ok(needles) +} + +fn get(c: &mut Criterion) { + let db = open_db(); + let needles = populate(&db).expect("rocksdb works"); + + let mut total_iterations = 0; + let mut total_allocs = 0; + + c.bench_function("get key", |b| { + b.iter_custom(|iterations| { + total_iterations += iterations; + let mut elapsed = Duration::new(0, 0); + // NOTE: counts allocations on the Rust side only + let (alloc_stats, _) = count_alloc(|| { + let start = Instant::now(); + for _ in 0..iterations { + // This has no measurable impact on performance (~30ns) + let needle = needles.choose(&mut rand::thread_rng()).expect("needles is not empty"); + black_box(db.get(Some(0), needle.as_bytes()).unwrap()); + } + elapsed = start.elapsed(); + }); + total_allocs += alloc_stats.0; + elapsed + }); + }); + if total_iterations > 0 { + println!( + "[get key] total: iterations={}, allocations={}; allocations per iter={:.2}\n", + total_iterations, + total_allocs, + total_allocs as f64 / total_iterations as f64 + ); + } + + total_iterations = 0; + total_allocs = 0; + c.bench_function("get key by prefix", |b| { + b.iter_custom(|iterations| { + total_iterations += iterations; + let mut elapsed = Duration::new(0, 0); + // NOTE: counts allocations on the Rust side only + let (alloc_stats, _) = count_alloc(|| { + let start = Instant::now(); + for _ in 0..iterations { + // This has no measurable impact on performance (~30ns) + let needle = needles.choose(&mut rand::thread_rng()).expect("needles is not empty"); + black_box(db.get_by_prefix(Some(0), &needle.as_bytes()[..8]).unwrap()); + } + elapsed = start.elapsed(); + }); + total_allocs += alloc_stats.0; + elapsed + }); + }); + if total_iterations > 0 { + println!( + "[get key by prefix] total: iterations={}, allocations={}; allocations per iter={:.2}\n", + total_iterations, + total_allocs, + total_allocs as f64 / total_iterations as f64 + ); + } +} + +fn iter(c: &mut Criterion) { + let db = open_db(); + let mut total_iterations = 0; + let mut total_allocs = 0; + + c.bench_function("iterate over 1k keys", |b| { + b.iter_custom(|iterations| { + total_iterations += iterations; + let mut elapsed = Duration::new(0, 0); + // NOTE: counts allocations on the Rust side only + let (alloc_stats, _) = count_alloc(|| { + let start = Instant::now(); + for _ in 0..iterations { + black_box(db.iter(Some(0)).take(1000).collect::>()); + } + elapsed = start.elapsed(); + }); + total_allocs += alloc_stats.0; + elapsed + }); + }); + if total_iterations > 0 { + println!( + "[iterate over 1k keys] total: iterations={}, allocations={}; allocations per iter={:.2}\n", + total_iterations, + total_allocs, + total_allocs as f64 / total_iterations as f64 + ); + } + + total_allocs = 0; + total_iterations = 0; + c.bench_function("single key from iterator", |b| { + b.iter_custom(|iterations| { + total_iterations += iterations; + let mut elapsed = Duration::new(0, 0); + // NOTE: counts allocations on the Rust side only + let (alloc_stats, _) = count_alloc(|| { + let start = Instant::now(); + for _ in 0..iterations { + black_box(db.iter(Some(0)).next().unwrap()); + } + elapsed = start.elapsed(); + }); + total_allocs += alloc_stats.0; + elapsed + }); + }); + if total_iterations > 0 { + println!( + "[single key from iterator] total: iterations={}, allocations={}; allocations per iter={:.2}\n", + total_iterations, + total_allocs, + total_allocs as f64 / total_iterations as f64 + ); + } +} From a9d9335e2e58ad74ea773c68f2c7f7aa739c83a9 Mon Sep 17 00:00:00 2001 From: Robert Vojta Date: Thu, 12 Dec 2019 13:31:07 +0100 Subject: [PATCH 042/359] [kvdb*] Make column type u32 instead of Option (#278) * [kvdb] Require column index Signed-off-by: Robert Vojta * [kvdb-rocksdb] Require column index Signed-off-by: Robert Vojta * [kvdb-web] Require column index Signed-off-by: Robert Vojta * [kvdb-memorydb] Sync iter_from_prefix behaviour skip_while replaced with the filter to synchronize the behaviour with the kvdb-rocksdb. Signed-off-by: Robert Vojta * [kvdb-memory] Require column index Signed-off-by: Robert Vojta * [kvdb-memory] Add basic tests Signed-off-by: Robert Vojta * [kvdb-rocksdb] Remove unsafe code Signed-off-by: Robert Vojta * [kvdb-rocksdb] Remove default column options No longer required - it was used for setting up the default column. Signed-off-by: Robert Vojta * [kvdb-rocksdb] Add note about migration Signed-off-by: Robert Vojta * [kvdb-rocksdb] Update changelog Signed-off-by: Robert Vojta * [kvdb-rocksdb] Replace NonZeroU32 with u32 Signed-off-by: Robert Vojta --- kvdb-memorydb/CHANGELOG.md | 6 + kvdb-memorydb/src/lib.rs | 119 +++++++++++- kvdb-rocksdb/CHANGELOG.md | 14 +- kvdb-rocksdb/src/iter.rs | 24 +-- kvdb-rocksdb/src/lib.rs | 349 +++++++++++++++-------------------- kvdb-web/CHANGELOG.md | 4 + kvdb-web/src/indexed_db.rs | 16 +- kvdb-web/src/lib.rs | 20 +- kvdb-web/tests/indexed_db.rs | 8 +- kvdb/CHANGELOG.md | 4 + kvdb/src/lib.rs | 26 +-- 11 files changed, 325 insertions(+), 265 deletions(-) diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index 927c9dc9c..8e4f2a3b5 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,3 +5,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Fixed +- `iter_from_prefix` behaviour synced with the `kvdb-rocksdb` +### Changed +- Default column support removed from the API + - Column argument type changed from `Option` to `u32` + - Migration `None` -> `0`, `Some(0)` -> `1`, `Some(1)` -> `2`, etc. diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 8491129a0..666c10cec 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -25,24 +25,23 @@ use std::{ /// This is generally intended for tests and is not particularly optimized. #[derive(Default)] pub struct InMemory { - columns: RwLock, BTreeMap, DBValue>>>, + columns: RwLock, DBValue>>>, } /// Create an in-memory database with the given number of columns. /// Columns will be indexable by 0..`num_cols` pub fn create(num_cols: u32) -> InMemory { let mut cols = HashMap::new(); - cols.insert(None, BTreeMap::new()); for idx in 0..num_cols { - cols.insert(Some(idx), BTreeMap::new()); + cols.insert(idx, BTreeMap::new()); } InMemory { columns: RwLock::new(cols) } } impl KeyValueDB for InMemory { - fn get(&self, col: Option, key: &[u8]) -> io::Result> { + fn get(&self, col: u32, key: &[u8]) -> io::Result> { let columns = self.columns.read(); match columns.get(&col) { None => Err(io::Error::new(io::ErrorKind::Other, format!("No such column family: {:?}", col))), @@ -50,7 +49,7 @@ impl KeyValueDB for InMemory { } } - fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { + fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { let columns = self.columns.read(); match columns.get(&col) { None => None, @@ -83,7 +82,7 @@ impl KeyValueDB for InMemory { Ok(()) } - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, col: u32) -> Box, Box<[u8]>)> + 'a> { match self.columns.read().get(&col) { Some(map) => Box::new( // TODO: worth optimizing at all? @@ -95,14 +94,14 @@ impl KeyValueDB for InMemory { fn iter_from_prefix<'a>( &'a self, - col: Option, + col: u32, prefix: &'a [u8], ) -> Box, Box<[u8]>)> + 'a> { match self.columns.read().get(&col) { Some(map) => Box::new( map.clone() .into_iter() - .skip_while(move |&(ref k, _)| !k.starts_with(prefix)) + .filter(move |&(ref k, _)| k.starts_with(prefix)) .map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())), ), None => Box::new(None.into_iter()), @@ -113,3 +112,107 @@ impl KeyValueDB for InMemory { Err(io::Error::new(io::ErrorKind::Other, "Attempted to restore in-memory database")) } } + +#[cfg(test)] +mod tests { + use super::{create, KeyValueDB}; + + #[test] + fn get_fails_with_non_existing_column() { + let db = create(1); + assert!(db.get(1, &[]).is_err()); + } + + #[test] + fn put_and_get() { + let db = create(1); + + let key1 = b"key1"; + + let mut transaction = db.transaction(); + transaction.put(0, key1, b"horse"); + db.write_buffered(transaction); + assert_eq!(&*db.get(0, key1).unwrap().unwrap(), b"horse"); + } + + #[test] + fn delete_and_get() { + let db = create(1); + + let key1 = b"key1"; + + let mut transaction = db.transaction(); + transaction.put(0, key1, b"horse"); + db.write_buffered(transaction); + assert_eq!(&*db.get(0, key1).unwrap().unwrap(), b"horse"); + + let mut transaction = db.transaction(); + transaction.delete(0, key1); + db.write_buffered(transaction); + assert!(db.get(0, key1).unwrap().is_none()); + } + + #[test] + fn iter() { + let db = create(1); + + let key1 = b"key1"; + let key2 = b"key2"; + + let mut transaction = db.transaction(); + transaction.put(0, key1, key1); + transaction.put(0, key2, key2); + db.write_buffered(transaction); + + let contents: Vec<_> = db.iter(0).into_iter().collect(); + assert_eq!(contents.len(), 2); + assert_eq!(&*contents[0].0, key1); + assert_eq!(&*contents[0].1, key1); + assert_eq!(&*contents[1].0, key2); + assert_eq!(&*contents[1].1, key2); + } + + #[test] + fn iter_from_prefix() { + let db = create(1); + + let key1 = b"0"; + let key2 = b"a"; + let key3 = b"ab"; + + let mut transaction = db.transaction(); + transaction.put(0, key1, key1); + transaction.put(0, key2, key2); + transaction.put(0, key3, key3); + db.write_buffered(transaction); + + let contents: Vec<_> = db.iter_from_prefix(0, b"").into_iter().collect(); + assert_eq!(contents.len(), 3); + assert_eq!(&*contents[0].0, key1); + assert_eq!(&*contents[0].1, key1); + assert_eq!(&*contents[1].0, key2); + assert_eq!(&*contents[1].1, key2); + assert_eq!(&*contents[2].0, key3); + assert_eq!(&*contents[2].1, key3); + + let contents: Vec<_> = db.iter_from_prefix(0, b"0").into_iter().collect(); + assert_eq!(contents.len(), 1); + assert_eq!(&*contents[0].0, key1); + assert_eq!(&*contents[0].1, key1); + + let contents: Vec<_> = db.iter_from_prefix(0, b"a").into_iter().collect(); + assert_eq!(contents.len(), 2); + assert_eq!(&*contents[0].0, key2); + assert_eq!(&*contents[0].1, key2); + assert_eq!(&*contents[1].0, key3); + assert_eq!(&*contents[1].1, key3); + + let contents: Vec<_> = db.iter_from_prefix(0, b"ab").into_iter().collect(); + assert_eq!(contents.len(), 1); + assert_eq!(&*contents[0].0, key3); + assert_eq!(&*contents[0].1, key3); + + let contents: Vec<_> = db.iter_from_prefix(0, b"abc").into_iter().collect(); + assert_eq!(contents.len(), 0); + } +} diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index d673404cb..632d6ff24 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,9 +5,17 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -- use `get_pinned` API to save one allocation for each call to `get()` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) -- rename `drop_column` to `remove_last_column` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) -- rename `get_cf` to `cf` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) +- Use `get_pinned` API to save one allocation for each call to `get()` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) +- Rename `drop_column` to `remove_last_column` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) +- Rename `get_cf` to `cf` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) +- Default column support removed from the API (See [PR #278](https://github.com/paritytech/parity-common/pull/278) for details) + - Column argument type changed from `Option` to `u32` + - Migration + - Column index `None` -> `0`, `Some(0)` -> `1`, `Some(1)` -> `2`, etc. + - Database must be opened with at least one column and existing DBs has to be opened with a number of columns increased by 1 to avoid having to migrate the data, e.g. before: `Some(9)`, after: `10`. + - `DatabaseConfig::default()` defaults to 1 column + - `Database::with_columns` still accepts `u32`, but panics if `0` is provided + - `Database::open` panics if configuration with 0 columns is provided ## [0.2.0] - 2019-11-28 - Switched away from using [parity-rocksdb](https://crates.io/crates/parity-rocksdb) in favour of upstream [rust-rocksdb](https://crates.io/crates/rocksdb) (see [PR #257](https://github.com/paritytech/parity-common/pull/257) for details) diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs index 079563aad..a8239bef8 100644 --- a/kvdb-rocksdb/src/iter.rs +++ b/kvdb-rocksdb/src/iter.rs @@ -39,6 +39,7 @@ struct UnsafeStableAddress<'a, T>(RwLockReadGuard<'a, T>); impl<'a, T> Deref for UnsafeStableAddress<'a, T> { type Target = T; + fn deref(&self) -> &Self::Target { self.0.deref() } @@ -51,6 +52,7 @@ struct DerefWrapper(T); impl Deref for DerefWrapper { type Target = T; + fn deref(&self) -> &Self::Target { &self.0 } @@ -76,21 +78,21 @@ pub trait IterationHandler { /// Create an `Iterator` over the default DB column or over a `ColumnFamily` if a column number /// is passed. - fn iter(&self, col: Option) -> Self::Iterator; + fn iter(&self, col: u32) -> Self::Iterator; /// Create an `Iterator` over the default DB column or over a `ColumnFamily` if a column number /// is passed. The iterator starts from the first key having the provided `prefix`. - fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Self::Iterator; + fn iter_from_prefix(&self, col: u32, prefix: &[u8]) -> Self::Iterator; } impl<'a, T> ReadGuardedIterator<'a, <&'a T as IterationHandler>::Iterator, T> where &'a T: IterationHandler, { - pub fn new(read_lock: RwLockReadGuard<'a, Option>, col: Option) -> Self { + pub fn new(read_lock: RwLockReadGuard<'a, Option>, col: u32) -> Self { Self { inner: Self::new_inner(read_lock, |db| db.iter(col)) } } - pub fn new_from_prefix(read_lock: RwLockReadGuard<'a, Option>, col: Option, prefix: &[u8]) -> Self { + pub fn new_from_prefix(read_lock: RwLockReadGuard<'a, Option>, col: u32, prefix: &[u8]) -> Self { Self { inner: Self::new_inner(read_lock, |db| db.iter_from_prefix(col, prefix)) } } @@ -108,17 +110,11 @@ where impl<'a> IterationHandler for &'a DBAndColumns { type Iterator = DBIterator<'a>; - fn iter(&self, col: Option) -> Self::Iterator { - col.map_or_else( - || self.db.iterator(IteratorMode::Start), - |c| self.db.iterator_cf(self.cf(c as usize), IteratorMode::Start).expect("iterator params are valid; qed"), - ) + fn iter(&self, col: u32) -> Self::Iterator { + self.db.iterator_cf(self.cf(col as usize), IteratorMode::Start).expect("iterator params are valid; qed") } - fn iter_from_prefix(&self, col: Option, prefix: &[u8]) -> Self::Iterator { - col.map_or_else( - || self.db.prefix_iterator(prefix), - |c| self.db.prefix_iterator_cf(self.cf(c as usize), prefix).expect("iterator params are valid; qed"), - ) + fn iter_from_prefix(&self, col: u32, prefix: &[u8]) -> Self::Iterator { + self.db.prefix_iterator_cf(self.cf(col as usize), prefix).expect("iterator params are valid; qed") } } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index df7b9f833..760b9177e 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -49,8 +49,8 @@ where // Used for memory budget. type MiB = usize; -const KB: usize = 1024; -const MB: usize = 1024 * KB; +const KB: usize = 1_024; +const MB: usize = 1_024 * KB; /// The default column memory budget in MiB. pub const DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB: MiB = 128; @@ -162,11 +162,15 @@ pub struct DatabaseConfig { /// write buffer size for each column including the default one. /// If the memory budget of a column is not specified, /// `DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB` is used for that column. - pub memory_budget: HashMap, MiB>, + pub memory_budget: HashMap, /// Compaction profile. pub compaction: CompactionProfile, /// Set number of columns. - pub columns: Option, + /// + /// # Safety + /// + /// The number of columns must not be zero. + pub columns: u32, /// Specify the maximum number of info/debug log files to be kept. pub keep_log_file_num: i32, } @@ -174,23 +178,24 @@ pub struct DatabaseConfig { impl DatabaseConfig { /// Create new `DatabaseConfig` with default parameters and specified set of columns. /// Note that cache sizes must be explicitly set. - pub fn with_columns(columns: Option) -> Self { + /// + /// # Safety + /// + /// The number of `columns` must not be zero. + pub fn with_columns(columns: u32) -> Self { + assert!(columns > 0, "the number of columns must not be zero"); + Self { columns, ..Default::default() } } /// Returns the total memory budget in bytes. pub fn memory_budget(&self) -> MiB { - match self.columns { - None => self.memory_budget.get(&None).unwrap_or(&DB_DEFAULT_MEMORY_BUDGET_MB) * MB, - Some(columns) => (0..columns) - .map(|i| self.memory_budget.get(&Some(i)).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB) - .sum(), - } + (0..self.columns).map(|i| self.memory_budget.get(&i).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB).sum() } /// Returns the memory budget of the specified column in bytes. fn memory_budget_for_col(&self, col: u32) -> MiB { - self.memory_budget.get(&Some(col)).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB + self.memory_budget.get(&col).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB } // Get column family configuration with the given block based options. @@ -214,7 +219,7 @@ impl Default for DatabaseConfig { max_open_files: 512, memory_budget: HashMap::new(), compaction: CompactionProfile::default(), - columns: None, + columns: 1, keep_log_file_num: 1, } } @@ -268,17 +273,6 @@ fn is_corrupted(err: &Error) -> bool { /// Generate the options for RocksDB, based on the given `DatabaseConfig`. fn generate_options(config: &DatabaseConfig) -> Options { let mut opts = Options::default(); - let columns = config.columns.unwrap_or(0); - - if columns == 0 { - let budget = config.memory_budget() / 2; - opts.set_db_write_buffer_size(budget); - // from https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#memtable - // Memtable size is controlled by the option `write_buffer_size`. - // If you increase your memtable size, be sure to also increase your L1 size! - // L1 size is controlled by the option `max_bytes_for_level_base`. - opts.set_max_bytes_for_level_base(budget as u64); - } opts.set_use_fsync(false); opts.create_if_missing(true); @@ -311,20 +305,16 @@ fn generate_block_based_options(config: &DatabaseConfig) -> BlockBasedOptions { impl Database { const CORRUPTION_FILE_NAME: &'static str = "CORRUPTED"; - /// Open database with default settings. - pub fn open_default(path: &str) -> io::Result { - Database::open(&DatabaseConfig::default(), path) - } - /// Open database file. Creates if it does not exist. + /// + /// # Safety + /// + /// The number of `config.columns` must not be zero. pub fn open(config: &DatabaseConfig, path: &str) -> io::Result { + assert!(config.columns > 0, "the number of columns must not be zero"); + let opts = generate_options(config); let block_opts = generate_block_based_options(config); - let columns = config.columns.unwrap_or(0); - - if config.columns.is_some() && config.memory_budget.contains_key(&None) { - warn!("Memory budget for the default column (None) is ignored if columns.is_some()"); - } // attempt database repair if it has been previously marked as corrupted let db_corrupted = Path::new(path).join(Database::CORRUPTION_FILE_NAME); @@ -334,36 +324,32 @@ impl Database { fs::remove_file(db_corrupted)?; } - let column_names: Vec<_> = (0..columns).map(|c| format!("col{}", c)).collect(); + let column_names: Vec<_> = (0..config.columns).map(|c| format!("col{}", c)).collect(); let write_opts = WriteOptions::default(); let mut read_opts = ReadOptions::default(); read_opts.set_verify_checksums(false); - let db = if config.columns.is_some() { - let cf_descriptors: Vec<_> = (0..columns) - .map(|i| ColumnFamilyDescriptor::new(&column_names[i as usize], config.column_config(&block_opts, i))) - .collect(); - - match DB::open_cf_descriptors(&opts, path, cf_descriptors) { - Err(_) => { - // retry and create CFs - match DB::open_cf(&opts, path, &[] as &[&str]) { - Ok(mut db) => { - for (i, name) in column_names.iter().enumerate() { - let _ = db - .create_cf(name, &config.column_config(&block_opts, i as u32)) - .map_err(other_io_err)?; - } - Ok(db) + let cf_descriptors: Vec<_> = (0..config.columns) + .map(|i| ColumnFamilyDescriptor::new(&column_names[i as usize], config.column_config(&block_opts, i))) + .collect(); + + let db = match DB::open_cf_descriptors(&opts, path, cf_descriptors) { + Err(_) => { + // retry and create CFs + match DB::open_cf(&opts, path, &[] as &[&str]) { + Ok(mut db) => { + for (i, name) in column_names.iter().enumerate() { + let _ = db + .create_cf(name, &config.column_config(&block_opts, i as u32)) + .map_err(other_io_err)?; } - err => err, + Ok(db) } + err => err, } - ok => ok, } - } else { - DB::open(&opts, path) + ok => ok, }; let db = match db { @@ -372,25 +358,21 @@ impl Database { warn!("DB corrupted: {}, attempting repair", s); DB::repair(&opts, path).map_err(other_io_err)?; - if config.columns.is_some() { - let cf_descriptors: Vec<_> = (0..columns) - .map(|i| { - ColumnFamilyDescriptor::new(&column_names[i as usize], config.column_config(&block_opts, i)) - }) - .collect(); + let cf_descriptors: Vec<_> = (0..config.columns) + .map(|i| { + ColumnFamilyDescriptor::new(&column_names[i as usize], config.column_config(&block_opts, i)) + }) + .collect(); - DB::open_cf_descriptors(&opts, path, cf_descriptors).map_err(other_io_err)? - } else { - DB::open(&opts, path).map_err(other_io_err)? - } + DB::open_cf_descriptors(&opts, path, cf_descriptors).map_err(other_io_err)? } Err(s) => return Err(other_io_err(s)), }; Ok(Database { db: RwLock::new(Some(DBAndColumns { db, column_names })), config: config.clone(), - overlay: RwLock::new((0..=columns).map(|_| HashMap::new()).collect()), - flushing: RwLock::new((0..=columns).map(|_| HashMap::new()).collect()), + overlay: RwLock::new((0..config.columns).map(|_| HashMap::new()).collect()), + flushing: RwLock::new((0..config.columns).map(|_| HashMap::new()).collect()), flushing_lock: Mutex::new(false), path: path.to_owned(), read_opts, @@ -404,25 +386,15 @@ impl Database { DBTransaction::new() } - fn to_overlay_column(col: Option) -> usize { - col.map_or(0, |c| (c + 1) as usize) - } - /// Commit transaction to database. pub fn write_buffered(&self, tr: DBTransaction) { let mut overlay = self.overlay.write(); let ops = tr.ops; for op in ops { match op { - DBOp::Insert { col, key, value } => { - let c = Self::to_overlay_column(col); - overlay[c].insert(key, KeyState::Insert(value)); - } - DBOp::Delete { col, key } => { - let c = Self::to_overlay_column(col); - overlay[c].insert(key, KeyState::Delete); - } - } + DBOp::Insert { col, key, value } => overlay[col as usize].insert(key, KeyState::Insert(value)), + DBOp::Delete { col, key } => overlay[col as usize].insert(key, KeyState::Delete), + }; } } @@ -435,24 +407,11 @@ impl Database { { for (c, column) in self.flushing.read().iter().enumerate() { for (key, state) in column.iter() { + let cf = cfs.cf(c); match *state { - KeyState::Delete => { - if c > 0 { - let cf = cfs.cf(c - 1); - batch.delete_cf(cf, key).map_err(other_io_err)?; - } else { - batch.delete(key).map_err(other_io_err)?; - } - } - KeyState::Insert(ref value) => { - if c > 0 { - let cf = cfs.cf(c - 1); - batch.put_cf(cf, key, value).map_err(other_io_err)?; - } else { - batch.put(key, value).map_err(other_io_err)?; - } - } - } + KeyState::Delete => batch.delete_cf(cf, key).map_err(other_io_err)?, + KeyState::Insert(ref value) => batch.put_cf(cf, key, value).map_err(other_io_err)?, + }; } } } @@ -492,18 +451,14 @@ impl Database { let ops = tr.ops; for op in ops { // remove any buffered operation for this key - self.overlay.write()[Self::to_overlay_column(op.col())].remove(op.key()); + self.overlay.write()[op.col() as usize].remove(op.key()); + + let cf = cfs.cf(op.col() as usize); match op { - DBOp::Insert { col, key, value } => match col { - None => batch.put(&key, &value).map_err(other_io_err)?, - Some(c) => batch.put_cf(cfs.cf(c as usize), &key, &value).map_err(other_io_err)?, - }, - DBOp::Delete { col, key } => match col { - None => batch.delete(&key).map_err(other_io_err)?, - Some(c) => batch.delete_cf(cfs.cf(c as usize), &key).map_err(other_io_err)?, - }, - } + DBOp::Insert { col: _, key, value } => batch.put_cf(cf, &key, &value).map_err(other_io_err)?, + DBOp::Delete { col: _, key } => batch.delete_cf(cf, &key).map_err(other_io_err)?, + }; } check_for_corruption(&self.path, cfs.db.write_opt(batch, &self.write_opts)) @@ -513,31 +468,22 @@ impl Database { } /// Get value by key. - pub fn get(&self, col: Option, key: &[u8]) -> io::Result> { + pub fn get(&self, col: u32, key: &[u8]) -> io::Result> { match *self.db.read() { Some(ref cfs) => { - let overlay = &self.overlay.read()[Self::to_overlay_column(col)]; + let overlay = &self.overlay.read()[col as usize]; match overlay.get(key) { Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), Some(&KeyState::Delete) => Ok(None), None => { - let flushing = &self.flushing.read()[Self::to_overlay_column(col)]; + let flushing = &self.flushing.read()[col as usize]; match flushing.get(key) { Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), Some(&KeyState::Delete) => Ok(None), - None => col - .map_or_else( - || { - cfs.db - .get_pinned_opt(key, &self.read_opts) - .map(|r| r.map(|v| DBValue::from_slice(&v))) - }, - |c| { - cfs.db - .get_pinned_cf_opt(cfs.cf(c as usize), key, &self.read_opts) - .map(|r| r.map(|v| DBValue::from_slice(&v))) - }, - ) + None => cfs + .db + .get_pinned_cf_opt(cfs.cf(col as usize), key, &self.read_opts) + .map(|r| r.map(|v| DBValue::from_slice(&v))) .map_err(other_io_err), } } @@ -549,19 +495,18 @@ impl Database { /// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values. // TODO: support prefix seek for unflushed data - pub fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { + pub fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { self.iter_from_prefix(col, prefix).next().map(|(_, v)| v) } /// Get database iterator for flushed data. /// Will hold a lock until the iterator is dropped /// preventing the database from being closed. - pub fn iter<'a>(&'a self, col: Option) -> impl Iterator + 'a { + pub fn iter<'a>(&'a self, col: u32) -> impl Iterator + 'a { let read_lock = self.db.read(); let optional = if read_lock.is_some() { - let c = Self::to_overlay_column(col); let overlay_data = { - let overlay = &self.overlay.read()[c]; + let overlay = &self.overlay.read()[col as usize]; let mut overlay_data = overlay .iter() .filter_map(|(k, v)| match *v { @@ -582,14 +527,11 @@ impl Database { }; optional.into_iter().flat_map(identity) } + /// Get database iterator from prefix for flushed data. /// Will hold a lock until the iterator is dropped /// preventing the database from being closed. - fn iter_from_prefix<'a>( - &'a self, - col: Option, - prefix: &'a [u8], - ) -> impl Iterator + 'a { + fn iter_from_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> impl Iterator + 'a { let read_lock = self.db.read(); let optional = if read_lock.is_some() { let guarded = iter::ReadGuardedIterator::new_from_prefix(read_lock, col, prefix); @@ -686,11 +628,11 @@ impl Database { // duplicate declaration of methods here to avoid trait import in certain existing cases // at time of addition. impl KeyValueDB for Database { - fn get(&self, col: Option, key: &[u8]) -> io::Result> { + fn get(&self, col: u32, key: &[u8]) -> io::Result> { Database::get(self, col, key) } - fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { + fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { Database::get_by_prefix(self, col, prefix) } @@ -706,16 +648,12 @@ impl KeyValueDB for Database { Database::flush(self) } - fn iter<'a>(&'a self, col: Option) -> Box + 'a> { + fn iter<'a>(&'a self, col: u32) -> Box + 'a> { let unboxed = Database::iter(self, col); Box::new(unboxed.into_iter()) } - fn iter_from_prefix<'a>( - &'a self, - col: Option, - prefix: &'a [u8], - ) -> Box + 'a> { + fn iter_from_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> Box + 'a> { let unboxed = Database::iter_from_prefix(self, col, prefix); Box::new(unboxed.into_iter()) } @@ -743,6 +681,7 @@ mod tests { fn test_db(config: &DatabaseConfig) { let tempdir = TempDir::new("").unwrap(); let db = Database::open(config, tempdir.path().to_str().unwrap()).unwrap(); + let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); let key3 = H256::from_str("04c00000000b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); @@ -750,64 +689,65 @@ mod tests { let key5 = H256::from_str("04c02222220b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); let mut batch = db.transaction(); - batch.put(None, key1.as_bytes(), b"cat"); - batch.put(None, key2.as_bytes(), b"dog"); - batch.put(None, key3.as_bytes(), b"caterpillar"); - batch.put(None, key4.as_bytes(), b"beef"); - batch.put(None, key5.as_bytes(), b"fish"); + batch.put(0, key1.as_bytes(), b"cat"); + batch.put(0, key2.as_bytes(), b"dog"); + batch.put(0, key3.as_bytes(), b"caterpillar"); + batch.put(0, key4.as_bytes(), b"beef"); + batch.put(0, key5.as_bytes(), b"fish"); db.write(batch).unwrap(); - assert_eq!(&*db.get(None, key1.as_bytes()).unwrap().unwrap(), b"cat"); + assert_eq!(&*db.get(0, key1.as_bytes()).unwrap().unwrap(), b"cat"); - let contents: Vec<_> = db.iter(None).into_iter().collect(); + let contents: Vec<_> = db.iter(0).into_iter().collect(); assert_eq!(contents.len(), 5); assert_eq!(&*contents[0].0, key1.as_bytes()); assert_eq!(&*contents[0].1, b"cat"); assert_eq!(&*contents[1].0, key2.as_bytes()); assert_eq!(&*contents[1].1, b"dog"); - let mut prefix_iter = db.iter_from_prefix(None, &[0x04, 0xc0]); + let mut prefix_iter = db.iter_from_prefix(0, &[0x04, 0xc0]); assert_eq!(*prefix_iter.next().unwrap().1, b"caterpillar"[..]); assert_eq!(*prefix_iter.next().unwrap().1, b"beef"[..]); assert_eq!(*prefix_iter.next().unwrap().1, b"fish"[..]); let mut batch = db.transaction(); - batch.delete(None, key1.as_bytes()); + batch.delete(0, key1.as_bytes()); db.write(batch).unwrap(); - assert!(db.get(None, key1.as_bytes()).unwrap().is_none()); + assert!(db.get(0, key1.as_bytes()).unwrap().is_none()); let mut batch = db.transaction(); - batch.put(None, key1.as_bytes(), b"cat"); + batch.put(0, key1.as_bytes(), b"cat"); db.write(batch).unwrap(); let mut transaction = db.transaction(); - transaction.put(None, key3.as_bytes(), b"elephant"); - transaction.delete(None, key1.as_bytes()); + transaction.put(0, key3.as_bytes(), b"elephant"); + transaction.delete(0, key1.as_bytes()); db.write(transaction).unwrap(); - assert!(db.get(None, key1.as_bytes()).unwrap().is_none()); - assert_eq!(&*db.get(None, key3.as_bytes()).unwrap().unwrap(), b"elephant"); + assert!(db.get(0, key1.as_bytes()).unwrap().is_none()); + assert_eq!(&*db.get(0, key3.as_bytes()).unwrap().unwrap(), b"elephant"); - assert_eq!(&*db.get_by_prefix(None, key3.as_bytes()).unwrap(), b"elephant"); - assert_eq!(&*db.get_by_prefix(None, key2.as_bytes()).unwrap(), b"dog"); + assert_eq!(&*db.get_by_prefix(0, key3.as_bytes()).unwrap(), b"elephant"); + assert_eq!(&*db.get_by_prefix(0, key2.as_bytes()).unwrap(), b"dog"); let mut transaction = db.transaction(); - transaction.put(None, key1.as_bytes(), b"horse"); - transaction.delete(None, key3.as_bytes()); + transaction.put(0, key1.as_bytes(), b"horse"); + transaction.delete(0, key3.as_bytes()); db.write_buffered(transaction); - assert!(db.get(None, key3.as_bytes()).unwrap().is_none()); - assert_eq!(&*db.get(None, key1.as_bytes()).unwrap().unwrap(), b"horse"); + assert!(db.get(0, key3.as_bytes()).unwrap().is_none()); + assert_eq!(&*db.get(0, key1.as_bytes()).unwrap().unwrap(), b"horse"); db.flush().unwrap(); - assert!(db.get(None, key3.as_bytes()).unwrap().is_none()); - assert_eq!(&*db.get(None, key1.as_bytes()).unwrap().unwrap(), b"horse"); + assert!(db.get(0, key3.as_bytes()).unwrap().is_none()); + assert_eq!(&*db.get(0, key1.as_bytes()).unwrap().unwrap(), b"horse"); } #[test] fn kvdb() { let tempdir = TempDir::new("").unwrap(); - let _ = Database::open_default(tempdir.path().to_str().unwrap()).unwrap(); - test_db(&DatabaseConfig::default()); + let config = DatabaseConfig::default(); + let _ = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); + test_db(&config); } #[test] @@ -826,19 +766,32 @@ mod tests { assert_eq!(rotational_from_df_output(example_df), expected_output); } + #[test] + #[should_panic] + fn db_config_with_zero_columns() { + let _cfg = DatabaseConfig::with_columns(0); + } + + #[test] + #[should_panic] + fn open_db_with_zero_columns() { + let cfg = DatabaseConfig { columns: 0, ..Default::default() }; + let _db = Database::open(&cfg, ""); + } + #[test] fn add_columns() { - let config = DatabaseConfig::default(); - let config_5 = DatabaseConfig::with_columns(Some(5)); + let config_1 = DatabaseConfig::default(); + let config_5 = DatabaseConfig::with_columns(5); let tempdir = TempDir::new("").unwrap(); - // open empty, add 5. + // open 1, add 4. { - let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); - assert_eq!(db.num_columns(), 0); + let db = Database::open(&config_1, tempdir.path().to_str().unwrap()).unwrap(); + assert_eq!(db.num_columns(), 1); - for i in 1..=5 { + for i in 2..=5 { db.add_column().unwrap(); assert_eq!(db.num_columns(), i); } @@ -853,33 +806,33 @@ mod tests { #[test] fn remove_columns() { - let config = DatabaseConfig::default(); - let config_5 = DatabaseConfig::with_columns(Some(5)); + let config_1 = DatabaseConfig::default(); + let config_5 = DatabaseConfig::with_columns(5); let tempdir = TempDir::new("drop_columns").unwrap(); - // open 5, remove all. + // open 5, remove 4. { let db = Database::open(&config_5, tempdir.path().to_str().unwrap()).expect("open with 5 columns"); assert_eq!(db.num_columns(), 5); - for i in (0..5).rev() { + for i in (1..5).rev() { db.remove_last_column().unwrap(); assert_eq!(db.num_columns(), i); } } - // reopen as 0. + // reopen as 1. { - let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); - assert_eq!(db.num_columns(), 0); + let db = Database::open(&config_1, tempdir.path().to_str().unwrap()).unwrap(); + assert_eq!(db.num_columns(), 1); } } #[test] fn test_iter_by_prefix() { let tempdir = TempDir::new("").unwrap(); - let config = DatabaseConfig::default(); + let config = DatabaseConfig::with_columns(1); let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); let key1 = b"0"; @@ -888,14 +841,14 @@ mod tests { let key4 = b"abcd"; let mut batch = db.transaction(); - batch.put(None, key1, key1); - batch.put(None, key2, key2); - batch.put(None, key3, key3); - batch.put(None, key4, key4); + batch.put(0, key1, key1); + batch.put(0, key2, key2); + batch.put(0, key3, key3); + batch.put(0, key4, key4); db.write(batch).unwrap(); // empty prefix - let contents: Vec<_> = db.iter_from_prefix(None, b"").into_iter().collect(); + let contents: Vec<_> = db.iter_from_prefix(0, b"").into_iter().collect(); assert_eq!(contents.len(), 4); assert_eq!(&*contents[0].0, key1); assert_eq!(&*contents[1].0, key2); @@ -903,24 +856,24 @@ mod tests { assert_eq!(&*contents[3].0, key4); // prefix a - let contents: Vec<_> = db.iter_from_prefix(None, b"a").into_iter().collect(); + let contents: Vec<_> = db.iter_from_prefix(0, b"a").into_iter().collect(); assert_eq!(contents.len(), 3); assert_eq!(&*contents[0].0, key2); assert_eq!(&*contents[1].0, key3); assert_eq!(&*contents[2].0, key4); // prefix abc - let contents: Vec<_> = db.iter_from_prefix(None, b"abc").into_iter().collect(); + let contents: Vec<_> = db.iter_from_prefix(0, b"abc").into_iter().collect(); assert_eq!(contents.len(), 2); assert_eq!(&*contents[0].0, key3); assert_eq!(&*contents[1].0, key4); // prefix abcde - let contents: Vec<_> = db.iter_from_prefix(None, b"abcde").into_iter().collect(); + let contents: Vec<_> = db.iter_from_prefix(0, b"abcde").into_iter().collect(); assert_eq!(contents.len(), 0); // prefix 0 - let contents: Vec<_> = db.iter_from_prefix(None, b"0").into_iter().collect(); + let contents: Vec<_> = db.iter_from_prefix(0, b"0").into_iter().collect(); assert_eq!(contents.len(), 1); assert_eq!(&*contents[0].0, key1); } @@ -928,25 +881,25 @@ mod tests { #[test] fn write_clears_buffered_ops() { let tempdir = TempDir::new("").unwrap(); - let config = DatabaseConfig::default(); + let config = DatabaseConfig::with_columns(1); let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); let mut batch = db.transaction(); - batch.put(None, b"foo", b"bar"); + batch.put(0, b"foo", b"bar"); db.write_buffered(batch); let mut batch = db.transaction(); - batch.put(None, b"foo", b"baz"); + batch.put(0, b"foo", b"baz"); db.write(batch).unwrap(); - assert_eq!(db.get(None, b"foo").unwrap().unwrap().as_ref(), b"baz"); + assert_eq!(db.get(0, b"foo").unwrap().unwrap().as_ref(), b"baz"); } #[test] fn default_memory_budget() { let c = DatabaseConfig::default(); - assert_eq!(c.columns, None); - assert_eq!(c.memory_budget(), DB_DEFAULT_MEMORY_BUDGET_MB * MB, "total memory budget is default"); + assert_eq!(c.columns, 1); + assert_eq!(c.memory_budget(), DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB * MB, "total memory budget is default"); assert_eq!( c.memory_budget_for_col(0), DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB * MB, @@ -961,19 +914,19 @@ mod tests { #[test] fn memory_budget() { - let mut c = DatabaseConfig::with_columns(Some(3)); - c.memory_budget = [(0, 10), (1, 15), (2, 20)].iter().cloned().map(|(c, b)| (Some(c), b)).collect(); + let mut c = DatabaseConfig::with_columns(3); + c.memory_budget = [(0, 10), (1, 15), (2, 20)].iter().cloned().collect(); assert_eq!(c.memory_budget(), 45 * MB, "total budget is the sum of the column budget"); } #[test] fn rocksdb_settings() { const NUM_COLS: usize = 2; - let mut cfg = DatabaseConfig::with_columns(Some(NUM_COLS as u32)); + let mut cfg = DatabaseConfig::with_columns(NUM_COLS as u32); cfg.max_open_files = 123; // is capped by the OS fd limit (typically 1024) cfg.compaction.block_size = 323232; cfg.compaction.initial_file_size = 102030; - cfg.memory_budget = [(0, 30), (1, 300)].iter().cloned().map(|(c, b)| (Some(c), b)).collect(); + cfg.memory_budget = [(0, 30), (1, 300)].iter().cloned().collect(); let db_path = TempDir::new("config_test").expect("the OS can create tmp dirs"); let _db = Database::open(&cfg, db_path.path().to_str().unwrap()).expect("can open a db"); diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index 437f1ba3e..4e9aed06e 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -5,6 +5,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Changed +- Default column support removed from the API + - Column argument type changed from `Option` to `u32` + - Migration `None` -> `0`, `Some(0)` -> `1`, `Some(1)` -> `2`, etc. ## [0.1.1] - 2019-10-24 ### Dependencies diff --git a/kvdb-web/src/indexed_db.rs b/kvdb-web/src/indexed_db.rs index 99b06569e..028b60a0a 100644 --- a/kvdb-web/src/indexed_db.rs +++ b/kvdb-web/src/indexed_db.rs @@ -28,7 +28,7 @@ use kvdb::{DBOp, DBTransaction}; use log::{debug, warn}; use std::ops::Deref; -use crate::{error::Error, Column}; +use crate::error::Error; pub struct IndexedDB { pub version: u32, @@ -85,13 +85,9 @@ fn store_name(num: u32) -> String { format!("col{}", num) } -fn column_to_number(column: Column) -> u32 { - column.map(|c| c + 1).unwrap_or_default() -} - // Returns js objects representing store names for each column fn store_names_js(columns: u32) -> Array { - let column_names = (0..=columns).map(store_name); + let column_names = (0..columns).map(store_name); let js_array = Array::new(); for name in column_names { @@ -136,7 +132,7 @@ pub fn idb_commit_transaction(idb: &IdbDatabase, txn: &DBTransaction, columns: u .expect("The provided mode and store names are valid; qed"); // Open object stores (columns) - let object_stores = (0..=columns) + let object_stores = (0..columns) .map(|n| { idb_txn .object_store(store_name(n).as_str()) @@ -147,8 +143,7 @@ pub fn idb_commit_transaction(idb: &IdbDatabase, txn: &DBTransaction, columns: u for op in &txn.ops { match op { DBOp::Insert { col, key, value } => { - let column = column_to_number(*col) as usize; - + let column = *col as usize; // Convert rust bytes to js arrays let key_js = Uint8Array::from(key.as_ref()); let val_js = Uint8Array::from(value.as_ref()); @@ -160,8 +155,7 @@ pub fn idb_commit_transaction(idb: &IdbDatabase, txn: &DBTransaction, columns: u } } DBOp::Delete { col, key } => { - let column = column_to_number(*col) as usize; - + let column = *col as usize; // Convert rust bytes to js arrays let key_js = Uint8Array::from(key.as_ref()); diff --git a/kvdb-web/src/lib.rs b/kvdb-web/src/lib.rs index 946057830..232689968 100644 --- a/kvdb-web/src/lib.rs +++ b/kvdb-web/src/lib.rs @@ -45,13 +45,6 @@ pub struct Database { indexed_db: SendWrapper, } -// The default column is represented as `None`. -type Column = Option; - -fn number_to_column(col: u32) -> Column { - col.checked_sub(1) -} - impl Database { /// Opens the database with the given name, /// and the specified number of columns (not including the default one). @@ -76,10 +69,9 @@ impl Database { let indexed_db::IndexedDB { version, inner, .. } = db; let in_memory = in_memory::create(columns); // read the columns from the IndexedDB - for n in 0..=columns { - let column = number_to_column(n); + for column in 0..columns { let mut txn = DBTransaction::new(); - let mut stream = indexed_db::idb_cursor(&*inner, n); + let mut stream = indexed_db::idb_cursor(&*inner, column); while let Some((key, value)) = stream.next().await { txn.put_vec(column, key.as_ref(), value); } @@ -107,11 +99,11 @@ impl Drop for Database { } impl KeyValueDB for Database { - fn get(&self, col: Option, key: &[u8]) -> io::Result> { + fn get(&self, col: u32, key: &[u8]) -> io::Result> { self.in_memory.get(col, key) } - fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option> { + fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { self.in_memory.get_by_prefix(col, prefix) } @@ -125,14 +117,14 @@ impl KeyValueDB for Database { } // NOTE: clones the whole db - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, col: u32) -> Box, Box<[u8]>)> + 'a> { self.in_memory.iter(col) } // NOTE: clones the whole db fn iter_from_prefix<'a>( &'a self, - col: Option, + col: u32, prefix: &'a [u8], ) -> Box, Box<[u8]>)> + 'a> { self.in_memory.iter_from_prefix(col, prefix) diff --git a/kvdb-web/tests/indexed_db.rs b/kvdb-web/tests/indexed_db.rs index 2a9ddc14e..e3d47e45b 100644 --- a/kvdb-web/tests/indexed_db.rs +++ b/kvdb-web/tests/indexed_db.rs @@ -36,10 +36,10 @@ async fn reopen_the_database_with_more_columns() { // Write a value into the database let mut batch = db.transaction(); - batch.put(None, b"hello", b"world"); + batch.put(0, b"hello", b"world"); db.write_buffered(batch); - assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); + assert_eq!(db.get(0, b"hello").unwrap().unwrap().as_ref(), b"world"); // Check the database version assert_eq!(db.version(), 1); @@ -51,8 +51,8 @@ async fn reopen_the_database_with_more_columns() { let db = open_db(3).await; // The value should still be present - assert_eq!(db.get(None, b"hello").unwrap().unwrap().as_ref(), b"world"); - assert!(db.get(None, b"trash").unwrap().is_none()); + assert_eq!(db.get(0, b"hello").unwrap().unwrap().as_ref(), b"world"); + assert!(db.get(0, b"trash").unwrap().is_none()); // The version should be bumped assert_eq!(db.version(), 2); diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 565fdccf3..6aeb26f41 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -5,6 +5,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Changed +- Default column support removed from the API + - Column argument type changed from `Option` to `u32` + - Migration `None` -> `0`, `Some(0)` -> `1`, `Some(1)` -> `2`, etc. ## [0.1.1] - 2019-10-24 ### Dependencies diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 46c53e5f0..de8de5dda 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -38,8 +38,8 @@ pub struct DBTransaction { /// Database operation. #[derive(Clone, PartialEq)] pub enum DBOp { - Insert { col: Option, key: ElasticArray32, value: DBValue }, - Delete { col: Option, key: ElasticArray32 }, + Insert { col: u32, key: ElasticArray32, value: DBValue }, + Delete { col: u32, key: ElasticArray32 }, } impl DBOp { @@ -52,7 +52,7 @@ impl DBOp { } /// Returns the column associated with this operation. - pub fn col(&self) -> Option { + pub fn col(&self) -> u32 { match *self { DBOp::Insert { col, .. } => col, DBOp::Delete { col, .. } => col, @@ -72,24 +72,24 @@ impl DBTransaction { } /// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write. - pub fn put(&mut self, col: Option, key: &[u8], value: &[u8]) { + pub fn put(&mut self, col: u32, key: &[u8], value: &[u8]) { let mut ekey = ElasticArray32::new(); ekey.append_slice(key); - self.ops.push(DBOp::Insert { col: col, key: ekey, value: DBValue::from_slice(value) }); + self.ops.push(DBOp::Insert { col, key: ekey, value: DBValue::from_slice(value) }); } /// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write. - pub fn put_vec(&mut self, col: Option, key: &[u8], value: Bytes) { + pub fn put_vec(&mut self, col: u32, key: &[u8], value: Bytes) { let mut ekey = ElasticArray32::new(); ekey.append_slice(key); - self.ops.push(DBOp::Insert { col: col, key: ekey, value: DBValue::from_vec(value) }); + self.ops.push(DBOp::Insert { col, key: ekey, value: DBValue::from_vec(value) }); } /// Delete value by key. - pub fn delete(&mut self, col: Option, key: &[u8]) { + pub fn delete(&mut self, col: u32, key: &[u8]) { let mut ekey = ElasticArray32::new(); ekey.append_slice(key); - self.ops.push(DBOp::Delete { col: col, key: ekey }); + self.ops.push(DBOp::Delete { col, key: ekey }); } } @@ -118,10 +118,10 @@ pub trait KeyValueDB: Sync + Send { } /// Get a value by key. - fn get(&self, col: Option, key: &[u8]) -> io::Result>; + fn get(&self, col: u32, key: &[u8]) -> io::Result>; /// Get a value by partial key. Only works for flushed data. - fn get_by_prefix(&self, col: Option, prefix: &[u8]) -> Option>; + fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option>; /// Write a transaction of changes to the buffer. fn write_buffered(&self, transaction: DBTransaction); @@ -136,12 +136,12 @@ pub trait KeyValueDB: Sync + Send { fn flush(&self) -> io::Result<()>; /// Iterate over flushed data for a given column. - fn iter<'a>(&'a self, col: Option) -> Box, Box<[u8]>)> + 'a>; + fn iter<'a>(&'a self, col: u32) -> Box, Box<[u8]>)> + 'a>; /// Iterate over flushed data for a given column, starting from a given prefix. fn iter_from_prefix<'a>( &'a self, - col: Option, + col: u32, prefix: &'a [u8], ) -> Box, Box<[u8]>)> + 'a>; From 83c9ccdaa7c598a8afb2fe0baa76f6270e2b4637 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet Date: Fri, 13 Dec 2019 18:56:42 +0100 Subject: [PATCH 043/359] Fix a typo in error rlp error message (#283) --- rlp/src/stream.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rlp/src/stream.rs b/rlp/src/stream.rs index 581f5efbe..6dcf1500e 100644 --- a/rlp/src/stream.rs +++ b/rlp/src/stream.rs @@ -317,7 +317,7 @@ impl RlpStream { Some(ref mut x) => { x.current += inserted_items; match x.max { - Some(ref max) if x.current > *max => panic!("You cannot append more items then you expect!"), + Some(ref max) if x.current > *max => panic!("You cannot append more items than you expect!"), Some(ref max) => x.current == *max, _ => false, } From df75029ce94913baf2f9fc23e451d7e12e37bf96 Mon Sep 17 00:00:00 2001 From: David Date: Tue, 17 Dec 2019 16:58:43 +0100 Subject: [PATCH 044/359] Update benchmark code to latest changes on master (#284) * Update benchmark code to latest changes on master * Check benches in CI * Try checking benches on nightly only --- .travis.yml | 2 ++ kvdb-rocksdb/benches/bench_read_perf.rs | 12 ++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index ff03ac379..3b9bbf6e6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,6 +12,8 @@ matrix: rust: beta - os: linux rust: nightly + script: + - cargo check --all --benches - os: osx osx_image: xcode11.2 addons: diff --git a/kvdb-rocksdb/benches/bench_read_perf.rs b/kvdb-rocksdb/benches/bench_read_perf.rs index 350fdf2eb..3b4cde3cf 100644 --- a/kvdb-rocksdb/benches/bench_read_perf.rs +++ b/kvdb-rocksdb/benches/bench_read_perf.rs @@ -48,7 +48,7 @@ criterion_main!(benches); /// family and default options. Needs manual cleanup. fn open_db() -> Database { let tempdir_str = "./benches/_rocksdb_bench_get"; - let cfg = DatabaseConfig::with_columns(Some(1)); + let cfg = DatabaseConfig::with_columns(1); let db = Database::open(&cfg, tempdir_str).expect("rocksdb works"); db } @@ -81,7 +81,7 @@ fn populate(db: &Database) -> io::Result> { } } // In ethereum keys are mostly 32 bytes and payloads ~140bytes. - batch.put(Some(0), &key.as_bytes(), &n_random_bytes(140)); + batch.put(0, &key.as_bytes(), &n_random_bytes(140)); } db.write(batch)?; // Clear the overlay @@ -106,7 +106,7 @@ fn get(c: &mut Criterion) { for _ in 0..iterations { // This has no measurable impact on performance (~30ns) let needle = needles.choose(&mut rand::thread_rng()).expect("needles is not empty"); - black_box(db.get(Some(0), needle.as_bytes()).unwrap()); + black_box(db.get(0, needle.as_bytes()).unwrap()); } elapsed = start.elapsed(); }); @@ -135,7 +135,7 @@ fn get(c: &mut Criterion) { for _ in 0..iterations { // This has no measurable impact on performance (~30ns) let needle = needles.choose(&mut rand::thread_rng()).expect("needles is not empty"); - black_box(db.get_by_prefix(Some(0), &needle.as_bytes()[..8]).unwrap()); + black_box(db.get_by_prefix(0, &needle.as_bytes()[..8]).unwrap()); } elapsed = start.elapsed(); }); @@ -166,7 +166,7 @@ fn iter(c: &mut Criterion) { let (alloc_stats, _) = count_alloc(|| { let start = Instant::now(); for _ in 0..iterations { - black_box(db.iter(Some(0)).take(1000).collect::>()); + black_box(db.iter(0).take(1000).collect::>()); } elapsed = start.elapsed(); }); @@ -193,7 +193,7 @@ fn iter(c: &mut Criterion) { let (alloc_stats, _) = count_alloc(|| { let start = Instant::now(); for _ in 0..iterations { - black_box(db.iter(Some(0)).next().unwrap()); + black_box(db.iter(0).next().unwrap()); } elapsed = start.elapsed(); }); From ae7abe27fee169ede60f0ab97d8c18f3aa6dffd6 Mon Sep 17 00:00:00 2001 From: David Date: Thu, 19 Dec 2019 12:15:30 +0100 Subject: [PATCH 045/359] Replace ElasticArray with SmallVec (#282) * Replace ElasticArray with SmallVec * Resolve todos * Fix formatting * Attempt a sane impl of MallocSizeOf for SmallVec * remove debug code * More tests for MallocSizeOf impl for SmallVec * Include shallow size of the SmallVec when it's spilled Annotate tests * Update CHANGELOGs * Attempt to fix windows build * Attempt to fix allocator differences * One more attempt * One more try * Maybe both 24 and 72 are correct * getting there? * Assert the allocator does *something* when the SmallVec spills * formatting * Update kvdb-rocksdb/CHANGELOG.md Co-Authored-By: Andronik Ordian * address review grumbles --- kvdb-memorydb/CHANGELOG.md | 1 + kvdb-memorydb/src/lib.rs | 4 +- kvdb-rocksdb/CHANGELOG.md | 2 + kvdb-rocksdb/Cargo.toml | 2 +- kvdb-rocksdb/src/lib.rs | 13 +++-- kvdb-web/tests/indexed_db.rs | 4 +- kvdb/CHANGELOG.md | 4 +- kvdb/Cargo.toml | 2 +- kvdb/src/lib.rs | 22 ++++----- parity-util-mem/CHANGELOG.md | 1 + parity-util-mem/Cargo.toml | 6 +-- parity-util-mem/src/impls.rs | 93 +++++++++++++++++++++++++++--------- 12 files changed, 102 insertions(+), 52 deletions(-) diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index 8e4f2a3b5..fa067272d 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog]. ## [Unreleased] ### Fixed - `iter_from_prefix` behaviour synced with the `kvdb-rocksdb` + ### Changed - Default column support removed from the API - Column argument type changed from `Option` to `u32` diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 666c10cec..9643861f9 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -86,7 +86,7 @@ impl KeyValueDB for InMemory { match self.columns.read().get(&col) { Some(map) => Box::new( // TODO: worth optimizing at all? - map.clone().into_iter().map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())), + map.clone().into_iter().map(|(k, v)| (k.into_boxed_slice(), v.into_boxed_slice())), ), None => Box::new(None.into_iter()), } @@ -102,7 +102,7 @@ impl KeyValueDB for InMemory { map.clone() .into_iter() .filter(move |&(ref k, _)| k.starts_with(prefix)) - .map(|(k, v)| (k.into_boxed_slice(), v.into_vec().into_boxed_slice())), + .map(|(k, v)| (k.into_boxed_slice(), v.into_boxed_slice())), ), None => Box::new(None.into_iter()), } diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 632d6ff24..e7b5e4553 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -16,6 +16,8 @@ The format is based on [Keep a Changelog]. - `DatabaseConfig::default()` defaults to 1 column - `Database::with_columns` still accepts `u32`, but panics if `0` is provided - `Database::open` panics if configuration with 0 columns is provided +### Breaking +- Remove `ElasticArray` and use the new `DBValue` (alias for `Vec`) and `DBKey` types from `kvdb`. (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) ## [0.2.0] - 2019-11-28 - Switched away from using [parity-rocksdb](https://crates.io/crates/parity-rocksdb) in favour of upstream [rust-rocksdb](https://crates.io/crates/rocksdb) (see [PR #257](https://github.com/paritytech/parity-common/pull/257) for details) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 2049da4ba..b4954ae05 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -12,7 +12,7 @@ name = "bench_read_perf" harness = false [dependencies] -elastic-array = "0.10.2" +smallvec = "1.0.0" fs-swap = "0.2.4" interleaved-ordered = "0.1.1" kvdb = { path = "../kvdb", version = "0.1" } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 760b9177e..dc7653f6d 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -24,10 +24,9 @@ use rocksdb::{ }; use crate::iter::KeyValuePair; -use elastic_array::ElasticArray32; use fs_swap::{swap, swap_nonatomic}; use interleaved_ordered::interleave_ordered; -use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; +use kvdb::{DBKey, DBOp, DBTransaction, DBValue, KeyValueDB}; use log::{debug, warn}; #[cfg(target_os = "linux")] @@ -245,9 +244,9 @@ pub struct Database { read_opts: ReadOptions, block_opts: BlockBasedOptions, // Dirty values added with `write_buffered`. Cleaned on `flush`. - overlay: RwLock, KeyState>>>, + overlay: RwLock>>, // Values currently being flushed. Cleared when `flush` completes. - flushing: RwLock, KeyState>>>, + flushing: RwLock>>, // Prevents concurrent flushes. // Value indicates if a flush is in progress. flushing_lock: Mutex, @@ -483,7 +482,7 @@ impl Database { None => cfs .db .get_pinned_cf_opt(cfs.cf(col as usize), key, &self.read_opts) - .map(|r| r.map(|v| DBValue::from_slice(&v))) + .map(|r| r.map(|v| v.to_vec())) .map_err(other_io_err), } } @@ -511,7 +510,7 @@ impl Database { .iter() .filter_map(|(k, v)| match *v { KeyState::Insert(ref value) => { - Some((k.clone().into_vec().into_boxed_slice(), value.clone().into_vec().into_boxed_slice())) + Some((k.clone().into_vec().into_boxed_slice(), value.clone().into_boxed_slice())) } KeyState::Delete => None, }) @@ -892,7 +891,7 @@ mod tests { batch.put(0, b"foo", b"baz"); db.write(batch).unwrap(); - assert_eq!(db.get(0, b"foo").unwrap().unwrap().as_ref(), b"baz"); + assert_eq!(db.get(0, b"foo").unwrap().unwrap(), b"baz"); } #[test] diff --git a/kvdb-web/tests/indexed_db.rs b/kvdb-web/tests/indexed_db.rs index e3d47e45b..9dc0556d4 100644 --- a/kvdb-web/tests/indexed_db.rs +++ b/kvdb-web/tests/indexed_db.rs @@ -39,7 +39,7 @@ async fn reopen_the_database_with_more_columns() { batch.put(0, b"hello", b"world"); db.write_buffered(batch); - assert_eq!(db.get(0, b"hello").unwrap().unwrap().as_ref(), b"world"); + assert_eq!(db.get(0, b"hello").unwrap().unwrap(), b"world"); // Check the database version assert_eq!(db.version(), 1); @@ -51,7 +51,7 @@ async fn reopen_the_database_with_more_columns() { let db = open_db(3).await; // The value should still be present - assert_eq!(db.get(0, b"hello").unwrap().unwrap().as_ref(), b"world"); + assert_eq!(db.get(0, b"hello").unwrap().unwrap(), b"world"); assert!(db.get(0, b"trash").unwrap().is_none()); // The version should be bumped diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 6aeb26f41..4c6a80642 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -6,9 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] ### Changed -- Default column support removed from the API +- [BREAKING] Default column support removed from the API - Column argument type changed from `Option` to `u32` - Migration `None` -> `0`, `Some(0)` -> `1`, `Some(1)` -> `2`, etc. +### BREAKING +- Remove `ElasticArray` and change `DBValue` to be a type alias for `Vec` and add a `DBKey` backed by a `SmallVec`. (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) ## [0.1.1] - 2019-10-24 ### Dependencies diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index db7badb98..2f8db5964 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -8,5 +8,5 @@ license = "GPL-3.0" edition = "2018" [dependencies] -elastic-array = "0.10.2" +smallvec = "1.0.0" bytes = { package = "parity-bytes", version = "0.1", path = "../parity-bytes" } diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index de8de5dda..67d9be1c3 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -17,7 +17,7 @@ //! Key-Value store abstraction with `RocksDB` backend. use bytes::Bytes; -use elastic_array::{ElasticArray128, ElasticArray32}; +use smallvec::SmallVec; use std::io; use std::path::Path; use std::sync::Arc; @@ -26,7 +26,9 @@ use std::sync::Arc; pub const PREFIX_LEN: usize = 12; /// Database value. -pub type DBValue = ElasticArray128; +pub type DBValue = Vec; +/// Database keys. +pub type DBKey = SmallVec<[u8; 32]>; /// Write transaction. Batches a sequence of put/delete operations for efficiency. #[derive(Default, Clone, PartialEq)] @@ -38,8 +40,8 @@ pub struct DBTransaction { /// Database operation. #[derive(Clone, PartialEq)] pub enum DBOp { - Insert { col: u32, key: ElasticArray32, value: DBValue }, - Delete { col: u32, key: ElasticArray32 }, + Insert { col: u32, key: DBKey, value: DBValue }, + Delete { col: u32, key: DBKey }, } impl DBOp { @@ -73,23 +75,17 @@ impl DBTransaction { /// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write. pub fn put(&mut self, col: u32, key: &[u8], value: &[u8]) { - let mut ekey = ElasticArray32::new(); - ekey.append_slice(key); - self.ops.push(DBOp::Insert { col, key: ekey, value: DBValue::from_slice(value) }); + self.ops.push(DBOp::Insert { col, key: DBKey::from_slice(key), value: value.to_vec() }) } /// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write. pub fn put_vec(&mut self, col: u32, key: &[u8], value: Bytes) { - let mut ekey = ElasticArray32::new(); - ekey.append_slice(key); - self.ops.push(DBOp::Insert { col, key: ekey, value: DBValue::from_vec(value) }); + self.ops.push(DBOp::Insert { col, key: DBKey::from_slice(key), value }); } /// Delete value by key. pub fn delete(&mut self, col: u32, key: &[u8]) { - let mut ekey = ElasticArray32::new(); - ekey.append_slice(key); - self.ops.push(DBOp::Delete { col, key: ekey }); + self.ops.push(DBOp::Delete { col, key: DBKey::from_slice(key) }); } } diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 8f69530b7..edb63e8fc 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- [BREAKING] Remove `MallocSizeOf` impls for `ElasticArray` and implement it for `SmallVec` (32 and 36). (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) ## [0.2.1] - 2019-10-24 ### Dependencies diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index cf3dab6f5..2b928dcf3 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -20,12 +20,12 @@ wee_alloc = { version = "0.4.5", optional = true } mimallocator = { version = "0.1.3", features = ["secure"], optional = true } mimalloc-sys = { version = "0.1.6", optional = true } -elastic-array = { version = "0.10.2", optional = true } +smallvec = { version = "1.0.0", optional = true } ethereum-types = { version = "0.8.0", optional = true, path = "../ethereum-types" } parking_lot = { version = "0.9.0", optional = true } [target.'cfg(target_os = "windows")'.dependencies] -winapi = "0.3.8" +winapi = { version = "0.3.8", features = ["heapapi"] } [target.'cfg(not(target_os = "windows"))'.dependencies.jemallocator] version = "0.3.2" @@ -43,6 +43,6 @@ jemalloc-global = ["jemallocator"] # use mimalloc as global allocator mimalloc-global = ["mimallocator", "mimalloc-sys"] # implement additional types -ethereum-impls = ["ethereum-types", "elastic-array", "parking_lot"] +ethereum-impls = ["ethereum-types", "parking_lot", "smallvec"] # Full estimate: no call to allocator estimate-heapsize = [] diff --git a/parity-util-mem/src/impls.rs b/parity-util-mem/src/impls.rs index ca36ce193..ad1962d7f 100644 --- a/parity-util-mem/src/impls.rs +++ b/parity-util-mem/src/impls.rs @@ -15,17 +15,15 @@ // along with Parity. If not, see . //! Implementation of `MallocSize` for common types : -//! - etheureum types uint and fixed hash. -//! - elastic_array arrays +//! - ethereum types uint and fixed hash. +//! - smallvec arrays of sizes 32, 36 //! - parking_lot mutex structures use super::{MallocSizeOf, MallocSizeOfOps}; -use elastic_array::{ - ElasticArray1024, ElasticArray128, ElasticArray16, ElasticArray2, ElasticArray2048, ElasticArray256, - ElasticArray32, ElasticArray36, ElasticArray4, ElasticArray512, ElasticArray64, ElasticArray8, -}; + use ethereum_types::{Bloom, H128, H160, H256, H264, H32, H512, H520, H64, U128, U256, U512, U64}; use parking_lot::{Mutex, RwLock}; +use smallvec::SmallVec; #[cfg(not(feature = "std"))] use core as std; @@ -36,31 +34,25 @@ malloc_size_of_is_0!(std::time::Duration); malloc_size_of_is_0!(U64, U128, U256, U512, H32, H64, H128, H160, H256, H264, H512, H520, Bloom); -macro_rules! impl_elastic_array { - ($name: ident, $dummy: ident, $size: expr) => { - impl MallocSizeOf for $name +macro_rules! impl_smallvec { + ($size: expr) => { + impl MallocSizeOf for SmallVec<[T; $size]> where T: MallocSizeOf, { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self[..].size_of(ops) + let mut n = if self.spilled() { self.capacity() * core::mem::size_of::() } else { 0 }; + for elem in self.iter() { + n += elem.size_of(ops); + } + n } } }; } -impl_elastic_array!(ElasticArray2, ElasticArray2Dummy, 2); -impl_elastic_array!(ElasticArray4, ElasticArray4Dummy, 4); -impl_elastic_array!(ElasticArray8, ElasticArray8Dummy, 8); -impl_elastic_array!(ElasticArray16, ElasticArray16Dummy, 16); -impl_elastic_array!(ElasticArray32, ElasticArray32Dummy, 32); -impl_elastic_array!(ElasticArray36, ElasticArray36Dummy, 36); -impl_elastic_array!(ElasticArray64, ElasticArray64Dummy, 64); -impl_elastic_array!(ElasticArray128, ElasticArray128Dummy, 128); -impl_elastic_array!(ElasticArray256, ElasticArray256Dummy, 256); -impl_elastic_array!(ElasticArray512, ElasticArray512Dummy, 512); -impl_elastic_array!(ElasticArray1024, ElasticArray1024Dummy, 1024); -impl_elastic_array!(ElasticArray2048, ElasticArray2048Dummy, 2048); +impl_smallvec!(32); // kvdb uses this +impl_smallvec!(36); // trie-db uses this impl MallocSizeOf for Mutex { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { @@ -73,3 +65,60 @@ impl MallocSizeOf for RwLock { self.read().size_of(ops) } } + +#[cfg(test)] +mod tests { + use crate::{allocators::new_malloc_size_ops, MallocSizeOf, MallocSizeOfOps}; + use smallvec::SmallVec; + use std::mem; + impl_smallvec!(3); + + #[test] + fn test_smallvec_stack_allocated_type() { + let mut v: SmallVec<[u8; 3]> = SmallVec::new(); + let mut ops = new_malloc_size_ops(); + assert_eq!(v.size_of(&mut ops), 0); + v.push(1); + v.push(2); + v.push(3); + assert_eq!(v.size_of(&mut ops), 0); + assert!(!v.spilled()); + v.push(4); + assert!(v.spilled(), "SmallVec spills when going beyond the capacity of the inner backing array"); + assert_eq!(v.size_of(&mut ops), 4); // 4 u8s on the heap + } + + #[test] + fn test_smallvec_boxed_stack_allocated_type() { + let mut v: SmallVec<[Box; 3]> = SmallVec::new(); + let mut ops = new_malloc_size_ops(); + assert_eq!(v.size_of(&mut ops), 0); + v.push(Box::new(1u8)); + v.push(Box::new(2u8)); + v.push(Box::new(3u8)); + assert!(v.size_of(&mut ops) >= 3); + assert!(!v.spilled()); + v.push(Box::new(4u8)); + assert!(v.spilled(), "SmallVec spills when going beyond the capacity of the inner backing array"); + let mut ops = new_malloc_size_ops(); + let expected_min_allocs = mem::size_of::>() * 4 + 4; + assert!(v.size_of(&mut ops) >= expected_min_allocs); + } + + #[test] + fn test_smallvec_heap_allocated_type() { + let mut v: SmallVec<[String; 3]> = SmallVec::new(); + let mut ops = new_malloc_size_ops(); + assert_eq!(v.size_of(&mut ops), 0); + v.push("COW".into()); + v.push("PIG".into()); + v.push("DUCK".into()); + assert!(!v.spilled()); + assert!(v.size_of(&mut ops) >= "COW".len() + "PIG".len() + "DUCK".len()); + v.push("ÖWL".into()); + assert!(v.spilled()); + let mut ops = new_malloc_size_ops(); + let expected_min_allocs = mem::size_of::() * 4 + "ÖWL".len() + "COW".len() + "PIG".len() + "DUCK".len(); + assert!(v.size_of(&mut ops) >= expected_min_allocs); + } +} From 5837dff8d529076002cdbf9c53ba89a6e468dc53 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Thu, 19 Dec 2019 12:20:39 +0100 Subject: [PATCH 046/359] kvdb-rocksdb: pass ReadOptions to iterators (#277) * kvdb-rocksdb: pass ReadOptions to iterators * kvdb-rocksdb: add some iter docs * Apply David's suggestions from code review Co-Authored-By: David * kvdb-rocksdb: postprocess docs * formatting --- kvdb-rocksdb/src/iter.rs | 45 ++++++++++++++++++++++++++++++---------- kvdb-rocksdb/src/lib.rs | 8 ++++--- 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs index a8239bef8..4ea9a9e92 100644 --- a/kvdb-rocksdb/src/iter.rs +++ b/kvdb-rocksdb/src/iter.rs @@ -17,11 +17,17 @@ //! This module contains an implementation of a RocksDB iterator //! wrapped inside a `RwLock`. Since `RwLock` "owns" the inner data, //! we're using `owning_ref` to work around the borrowing rules of Rust. +//! +//! Note: this crate does not use "Prefix Seek" mode which means that the prefix iterator +//! will return keys not starting with the given prefix as well (as long as `key >= prefix`). +//! To work around this we filter the data returned by rocksdb to ensure that +//! all data yielded by the iterator does start with the given prefix. +//! See https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes for details. use crate::DBAndColumns; use owning_ref::{OwningHandle, StableAddress}; use parking_lot::RwLockReadGuard; -use rocksdb::{DBIterator, IteratorMode}; +use rocksdb::{DBIterator, Direction, IteratorMode, ReadOptions}; use std::ops::{Deref, DerefMut}; /// A tuple holding key and value data, used as the iterator item type. @@ -78,22 +84,35 @@ pub trait IterationHandler { /// Create an `Iterator` over the default DB column or over a `ColumnFamily` if a column number /// is passed. - fn iter(&self, col: u32) -> Self::Iterator; + /// In addition to a read lock and a column index, it takes a ref to the same `ReadOptions` we + /// pass to the `get` method. + fn iter(&self, col: u32, read_opts: &ReadOptions) -> Self::Iterator; /// Create an `Iterator` over the default DB column or over a `ColumnFamily` if a column number /// is passed. The iterator starts from the first key having the provided `prefix`. - fn iter_from_prefix(&self, col: u32, prefix: &[u8]) -> Self::Iterator; + /// In addition to a read lock and a column index, it takes a ref to the same `ReadOptions` we + /// pass to the `get` method. + fn iter_from_prefix(&self, col: u32, prefix: &[u8], read_opts: &ReadOptions) -> Self::Iterator; } impl<'a, T> ReadGuardedIterator<'a, <&'a T as IterationHandler>::Iterator, T> where &'a T: IterationHandler, { - pub fn new(read_lock: RwLockReadGuard<'a, Option>, col: u32) -> Self { - Self { inner: Self::new_inner(read_lock, |db| db.iter(col)) } + /// Creates a new `ReadGuardedIterator` that maps `RwLock` to `RwLock`, + /// where `DBIterator` iterates over all keys. + pub fn new(read_lock: RwLockReadGuard<'a, Option>, col: u32, read_opts: &ReadOptions) -> Self { + Self { inner: Self::new_inner(read_lock, |db| db.iter(col, read_opts)) } } - pub fn new_from_prefix(read_lock: RwLockReadGuard<'a, Option>, col: u32, prefix: &[u8]) -> Self { - Self { inner: Self::new_inner(read_lock, |db| db.iter_from_prefix(col, prefix)) } + /// Creates a new `ReadGuardedIterator` that maps `RwLock` to `RwLock`, + /// where `DBIterator` iterates over keys >= prefix. + pub fn new_from_prefix( + read_lock: RwLockReadGuard<'a, Option>, + col: u32, + prefix: &[u8], + read_opts: &ReadOptions, + ) -> Self { + Self { inner: Self::new_inner(read_lock, |db| db.iter_from_prefix(col, prefix, read_opts)) } } fn new_inner( @@ -110,11 +129,15 @@ where impl<'a> IterationHandler for &'a DBAndColumns { type Iterator = DBIterator<'a>; - fn iter(&self, col: u32) -> Self::Iterator { - self.db.iterator_cf(self.cf(col as usize), IteratorMode::Start).expect("iterator params are valid; qed") + fn iter(&self, col: u32, read_opts: &ReadOptions) -> Self::Iterator { + self.db + .iterator_cf_opt(self.cf(col as usize), read_opts, IteratorMode::Start) + .expect("iterator params are valid; qed") } - fn iter_from_prefix(&self, col: u32, prefix: &[u8]) -> Self::Iterator { - self.db.prefix_iterator_cf(self.cf(col as usize), prefix).expect("iterator params are valid; qed") + fn iter_from_prefix(&self, col: u32, prefix: &[u8], read_opts: &ReadOptions) -> Self::Iterator { + self.db + .iterator_cf_opt(self.cf(col as usize), read_opts, IteratorMode::From(prefix, Direction::Forward)) + .expect("iterator params are valid; qed") } } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index dc7653f6d..d649ed343 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -519,7 +519,7 @@ impl Database { overlay_data }; - let guarded = iter::ReadGuardedIterator::new(read_lock, col); + let guarded = iter::ReadGuardedIterator::new(read_lock, col, &self.read_opts); Some(interleave_ordered(overlay_data, guarded)) } else { None @@ -533,12 +533,14 @@ impl Database { fn iter_from_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> impl Iterator + 'a { let read_lock = self.db.read(); let optional = if read_lock.is_some() { - let guarded = iter::ReadGuardedIterator::new_from_prefix(read_lock, col, prefix); + let guarded = iter::ReadGuardedIterator::new_from_prefix(read_lock, col, prefix, &self.read_opts); Some(interleave_ordered(Vec::new(), guarded)) } else { None }; - // workaround for https://github.com/facebook/rocksdb/issues/2343 + // We're not using "Prefix Seek" mode, so the iterator will return + // keys not starting with the given prefix as well, + // see https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes optional.into_iter().flat_map(identity).filter(move |(k, _)| k.starts_with(prefix)) } From 73297de9cbc9c13f0d7eec75f60cc657b6415db4 Mon Sep 17 00:00:00 2001 From: David Date: Thu, 19 Dec 2019 14:59:12 +0100 Subject: [PATCH 047/359] [kvdb-rocksdb] adds `num_keys()` (#285) * [kvdb-rocksdb] adds `num_keys()` Uses `"rocksdb.estimate-num-keys"` to get an estimate of the number of keys in a database (see https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ#failure-recovery). * Changelog * Address review grumbles * formatting * Fix doc changes requested in previous PR * Update kvdb-rocksdb/src/lib.rs Co-Authored-By: Andronik Ordian --- kvdb-rocksdb/CHANGELOG.md | 2 ++ kvdb-rocksdb/src/iter.rs | 15 +++++++-------- kvdb-rocksdb/src/lib.rs | 32 +++++++++++++++++++++++++++++++- 3 files changed, 40 insertions(+), 9 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index e7b5e4553..9e4006897 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -16,6 +16,8 @@ The format is based on [Keep a Changelog]. - `DatabaseConfig::default()` defaults to 1 column - `Database::with_columns` still accepts `u32`, but panics if `0` is provided - `Database::open` panics if configuration with 0 columns is provided +- Add `num_keys(col)` to get an estimate of the number of keys in a column (See [PR #285](https://github.com/paritytech/parity-common/pull/285)). + ### Breaking - Remove `ElasticArray` and use the new `DBValue` (alias for `Vec`) and `DBKey` types from `kvdb`. (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs index 4ea9a9e92..881aa724e 100644 --- a/kvdb-rocksdb/src/iter.rs +++ b/kvdb-rocksdb/src/iter.rs @@ -82,15 +82,14 @@ impl<'a, I: Iterator, T> Iterator for ReadGuardedIterator<'a, I, T> { pub trait IterationHandler { type Iterator: Iterator; - /// Create an `Iterator` over the default DB column or over a `ColumnFamily` if a column number - /// is passed. - /// In addition to a read lock and a column index, it takes a ref to the same `ReadOptions` we - /// pass to the `get` method. + /// Create an `Iterator` over a `ColumnFamily` corresponding to the passed index. Takes a + /// reference to a `ReadOptions` to allow configuration of the new iterator (see + /// https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h#L1169). fn iter(&self, col: u32, read_opts: &ReadOptions) -> Self::Iterator; - /// Create an `Iterator` over the default DB column or over a `ColumnFamily` if a column number - /// is passed. The iterator starts from the first key having the provided `prefix`. - /// In addition to a read lock and a column index, it takes a ref to the same `ReadOptions` we - /// pass to the `get` method. + /// Create an `Iterator` over a `ColumnFamily` corresponding to the passed index. Takes a + /// reference to a `ReadOptions` to allow configuration of the new iterator (see + /// https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h#L1169). + /// The iterator starts from the first key having the provided `prefix`. fn iter_from_prefix(&self, col: u32, prefix: &[u8], read_opts: &ReadOptions) -> Self::Iterator; } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index d649ed343..0041b3dc3 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -587,7 +587,7 @@ impl Database { Ok(()) } - /// The number of non-default column families. + /// The number of column families in the db. pub fn num_columns(&self) -> u32 { self.db .read() @@ -597,6 +597,22 @@ impl Database { .unwrap_or(0) } + /// The number of keys in a column (estimated). + /// Does not take into account the unflushed data. + pub fn num_keys(&self, col: u32) -> io::Result { + const ESTIMATE_NUM_KEYS: &str = "rocksdb.estimate-num-keys"; + match *self.db.read() { + Some(ref cfs) => { + let cf = cfs.cf(col as usize); + match cfs.db.property_int_value_cf(cf, ESTIMATE_NUM_KEYS) { + Ok(estimate) => Ok(estimate.unwrap_or_default()), + Err(err_string) => Err(other_io_err(err_string)), + } + } + None => Ok(0), + } + } + /// Remove the last column family in the database. The deletion is definitive. pub fn remove_last_column(&self) -> io::Result<()> { match *self.db.write() { @@ -830,6 +846,20 @@ mod tests { } } + #[test] + fn test_num_keys() { + let tempdir = TempDir::new("").unwrap(); + let config = DatabaseConfig::with_columns(1); + let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); + + assert_eq!(db.num_keys(0).unwrap(), 0, "database is empty after creation"); + let key1 = b"beef"; + let mut batch = db.transaction(); + batch.put(0, key1, key1); + db.write(batch).unwrap(); + assert_eq!(db.num_keys(0).unwrap(), 1, "adding a key increases the count"); + } + #[test] fn test_iter_by_prefix() { let tempdir = TempDir::new("").unwrap(); From 3d10274df12f95bb4679cc42c0b052622c1ae320 Mon Sep 17 00:00:00 2001 From: David Date: Thu, 19 Dec 2019 17:05:49 +0100 Subject: [PATCH 048/359] Prepare releases (#286) * Prepare releases * Sort out cross requirements * Bump fixed-hash as well --- fixed-hash/CHANGELOG.md | 4 ++++ fixed-hash/Cargo.toml | 2 +- kvdb-memorydb/CHANGELOG.md | 4 +++- kvdb-memorydb/Cargo.toml | 4 ++-- kvdb-rocksdb/CHANGELOG.md | 6 +++--- kvdb-rocksdb/Cargo.toml | 4 ++-- kvdb-web/CHANGELOG.md | 4 +++- kvdb-web/Cargo.toml | 6 +++--- kvdb/CHANGELOG.md | 7 ++++--- kvdb/Cargo.toml | 2 +- parity-util-mem/CHANGELOG.md | 4 +++- parity-util-mem/Cargo.toml | 2 +- triehash/CHANGELOG.md | 2 ++ 13 files changed, 32 insertions(+), 19 deletions(-) diff --git a/fixed-hash/CHANGELOG.md b/fixed-hash/CHANGELOG.md index 2d2170b42..ae22eee7f 100644 --- a/fixed-hash/CHANGELOG.md +++ b/fixed-hash/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.5.2] - 2019-12-19 +### Fixed +- re-export `alloc` for both std and no-std to fix compilation (See [PR #268](https://github.com/paritytech/parity-common/pull/268)) + ## [0.5.1] - 2019-10-24 ### Dependencies - Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 8e1bcfb16..cc3d3f3a8 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fixed-hash" -version = "0.5.1" +version = "0.5.2" authors = ["Parity Technologies "] license = "MIT" homepage = "https://github.com/paritytech/parity-common" diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index fa067272d..c594b4d98 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,10 +5,12 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.2.0] - 2019-12-19 ### Fixed - `iter_from_prefix` behaviour synced with the `kvdb-rocksdb` ### Changed - Default column support removed from the API - Column argument type changed from `Option` to `u32` - - Migration `None` -> `0`, `Some(0)` -> `1`, `Some(1)` -> `2`, etc. + - Migration `None` -> unsupported, `Some(0)` -> `0`, `Some(1)` -> `1`, etc. diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 4ee909d4a..21dbe7128 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.1.2" +version = "0.2.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -9,4 +9,4 @@ edition = "2018" [dependencies] parking_lot = "0.9.0" -kvdb = { version = "0.1", path = "../kvdb" } +kvdb = { version = "0.2", path = "../kvdb" } diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 9e4006897..550ff450f 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,20 +5,20 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.3.0] - 2019-12-19 - Use `get_pinned` API to save one allocation for each call to `get()` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) - Rename `drop_column` to `remove_last_column` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) - Rename `get_cf` to `cf` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) - Default column support removed from the API (See [PR #278](https://github.com/paritytech/parity-common/pull/278) for details) - Column argument type changed from `Option` to `u32` - Migration - - Column index `None` -> `0`, `Some(0)` -> `1`, `Some(1)` -> `2`, etc. + - Column index `None` -> unsupported, `Some(0)` -> `0`, `Some(1)` -> `1`, etc. - Database must be opened with at least one column and existing DBs has to be opened with a number of columns increased by 1 to avoid having to migrate the data, e.g. before: `Some(9)`, after: `10`. - `DatabaseConfig::default()` defaults to 1 column - `Database::with_columns` still accepts `u32`, but panics if `0` is provided - `Database::open` panics if configuration with 0 columns is provided - Add `num_keys(col)` to get an estimate of the number of keys in a column (See [PR #285](https://github.com/paritytech/parity-common/pull/285)). - -### Breaking - Remove `ElasticArray` and use the new `DBValue` (alias for `Vec`) and `DBKey` types from `kvdb`. (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) ## [0.2.0] - 2019-11-28 diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index b4954ae05..f6387edc5 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.2.0" +version = "0.3.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by rocksDB" @@ -15,7 +15,7 @@ harness = false smallvec = "1.0.0" fs-swap = "0.2.4" interleaved-ordered = "0.1.1" -kvdb = { path = "../kvdb", version = "0.1" } +kvdb = { path = "../kvdb", version = "0.2" } log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.9.0" diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index 4e9aed06e..475d4f564 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -5,10 +5,12 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.2.0] - 2019-12-19 ### Changed - Default column support removed from the API - Column argument type changed from `Option` to `u32` - - Migration `None` -> `0`, `Some(0)` -> `1`, `Some(1)` -> `2`, etc. + - Migration `None` -> unsupported, `Some(0)` -> `0`, `Some(1)` -> `1`, etc. ## [0.1.1] - 2019-10-24 ### Dependencies diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index c9157d922..e875d594b 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-web" -version = "0.1.1" +version = "0.2.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" @@ -11,8 +11,8 @@ edition = "2018" [dependencies] wasm-bindgen = "0.2.54" js-sys = "0.3.31" -kvdb = { version = "0.1", path = "../kvdb" } -kvdb-memorydb = { version = "0.1", path = "../kvdb-memorydb" } +kvdb = { version = "0.2", path = "../kvdb" } +kvdb-memorydb = { version = "0.2", path = "../kvdb-memorydb" } futures = "0.3" log = "0.4.8" send_wrapper = "0.3.0" diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 4c6a80642..41a94676a 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -5,11 +5,12 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.2.0] - 2019-12-19 ### Changed -- [BREAKING] Default column support removed from the API +- Default column support removed from the API - Column argument type changed from `Option` to `u32` - - Migration `None` -> `0`, `Some(0)` -> `1`, `Some(1)` -> `2`, etc. -### BREAKING + - Migration `None` -> unsupported, `Some(0)` -> `0`, `Some(1)` -> `1`, etc. - Remove `ElasticArray` and change `DBValue` to be a type alias for `Vec` and add a `DBKey` backed by a `SmallVec`. (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) ## [0.1.1] - 2019-10-24 diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 2f8db5964..bb40534b5 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.1.1" +version = "0.2.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index edb63e8fc..e7d58fa35 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -5,7 +5,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -- [BREAKING] Remove `MallocSizeOf` impls for `ElasticArray` and implement it for `SmallVec` (32 and 36). (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) + +## [0.3.0] - 2019-12-19 +- Remove `MallocSizeOf` impls for `ElasticArray` and implement it for `SmallVec` (32 and 36). (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) ## [0.2.1] - 2019-10-24 ### Dependencies diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 2b928dcf3..8fe85da10 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.2.1" +version = "0.3.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" diff --git a/triehash/CHANGELOG.md b/triehash/CHANGELOG.md index c8b6fd2be..320e4f84f 100644 --- a/triehash/CHANGELOG.md +++ b/triehash/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.8.2] - 2019-12-15 - Added no-std support (https://github.com/paritytech/parity-common/pull/280) ## [0.8.1] - 2019-10-24 - Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/214) From b0eefb73c49c50f9e11325e1fb9ee595663a321f Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Sat, 21 Dec 2019 16:55:14 +0100 Subject: [PATCH 049/359] [ci]: remove feature flags in virtual workspace (#289) For more information https://github.com/rust-lang/cargo/pull/7507/ --- appveyor.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index d930e318c..bc9c9bf75 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -18,11 +18,11 @@ install: - cargo -vV build_script: - - cargo check --tests --features "%FEATURES%" - - cargo build --all --features "%FEATURES%" + - cargo check --tests + - cargo build --all test_script: - - cargo test --all --features "%FEATURES%" --exclude uint --exclude fixed-hash + - cargo test --all --exclude uint --exclude fixed-hash - cd fixed-hash/ && cargo test --all-features && cd .. - cd uint/ && cargo test --features=std,quickcheck --release && cd .. - cd plain_hasher/ && cargo test --no-default-features && cd .. From 2b9ce2bc603e648acd013f5ea59ccbcef0c426df Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sat, 21 Dec 2019 18:55:59 +0300 Subject: [PATCH 050/359] Update README.md (#287) --- parity-crypto/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-crypto/README.md b/parity-crypto/README.md index 51390fe4f..b7309710c 100644 --- a/parity-crypto/README.md +++ b/parity-crypto/README.md @@ -5,4 +5,4 @@ General cryptographic utilities for Ethereum. ## Changelog -The 0.4 release removes the dependency on `ring` and replaces it with prue-rust alternatives. As a consequence of this, AES GCM support has been removed. `subtle` is used for constant time equality testing and error handling is pared down to the bare minimum required. +The 0.4 release removes the dependency on `ring` and replaces it with pure rust alternatives. As a consequence of this, AES GCM support has been removed. `subtle` is used for constant time equality testing and error handling is pared down to the bare minimum required. From 51d1b9a231c7c32f94c6cdd854f397c14f3e7d48 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 30 Dec 2019 12:04:15 +0300 Subject: [PATCH 051/359] Update uint README (#288) * Update uint README * Update README.md --- uint/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uint/README.md b/uint/README.md index 6d4a30b70..557d63991 100644 --- a/uint/README.md +++ b/uint/README.md @@ -2,7 +2,7 @@ ## Description -Provides facilities to construct big unsigned integer types. +Provides facilities to construct big unsigned integer types which use no allocations (stack-based, fixed bit length). If you want to use a predefined `U128`, `U256` or `U512` type, take a look at the [`primitive-types`](https://github.com/paritytech/parity-common/tree/master/primitive-types) or [`ethereum-types`](https://github.com/paritytech/parity-common/tree/master/ethereum-types) crate. The focus on the provided big unsigned integer types is performance and cross-platform availability. From f71d33f1e599684076d517674ad72898ee750582 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 31 Dec 2019 16:17:04 +0300 Subject: [PATCH 052/359] Move and extend impls for locking primitives (#290) * move and impls for locking primitives * remove parking_lot * add doc comment --- parity-util-mem/Cargo.toml | 4 ++-- parity-util-mem/src/impls.rs | 14 -------------- parity-util-mem/src/malloc_size.rs | 28 ++++++++++++++++++++++++++-- 3 files changed, 28 insertions(+), 18 deletions(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 8fe85da10..a89122723 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -33,7 +33,7 @@ optional = true [features] default = ["std", "ethereum-impls"] -std = [] +std = ["parking_lot"] # use dlmalloc as global allocator dlmalloc-global = ["dlmalloc", "estimate-heapsize"] # use wee_alloc as global allocator @@ -43,6 +43,6 @@ jemalloc-global = ["jemallocator"] # use mimalloc as global allocator mimalloc-global = ["mimallocator", "mimalloc-sys"] # implement additional types -ethereum-impls = ["ethereum-types", "parking_lot", "smallvec"] +ethereum-impls = ["ethereum-types", "smallvec"] # Full estimate: no call to allocator estimate-heapsize = [] diff --git a/parity-util-mem/src/impls.rs b/parity-util-mem/src/impls.rs index ad1962d7f..322375873 100644 --- a/parity-util-mem/src/impls.rs +++ b/parity-util-mem/src/impls.rs @@ -17,12 +17,10 @@ //! Implementation of `MallocSize` for common types : //! - ethereum types uint and fixed hash. //! - smallvec arrays of sizes 32, 36 -//! - parking_lot mutex structures use super::{MallocSizeOf, MallocSizeOfOps}; use ethereum_types::{Bloom, H128, H160, H256, H264, H32, H512, H520, H64, U128, U256, U512, U64}; -use parking_lot::{Mutex, RwLock}; use smallvec::SmallVec; #[cfg(not(feature = "std"))] @@ -54,18 +52,6 @@ macro_rules! impl_smallvec { impl_smallvec!(32); // kvdb uses this impl_smallvec!(36); // trie-db uses this -impl MallocSizeOf for Mutex { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - (*self.lock()).size_of(ops) - } -} - -impl MallocSizeOf for RwLock { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.read().size_of(ops) - } -} - #[cfg(test)] mod tests { use crate::{allocators::new_malloc_size_ops, MallocSizeOf, MallocSizeOfOps}; diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index a3018af1f..a748eec82 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -43,7 +43,8 @@ //! measured as well as the thing it points to. E.g. //! ` as MallocSizeOf>::size_of(field, ops)`. -// This file is patched at commit 5bdea7dc1c80790a852a3fb03edfb2b8fbd403dc DO NOT EDIT. +//! This is an extended (for own internal needs) version of the Servo internal malloc_size crate. +//! We should occasionally track the upstream changes/fixes and reintroduce them here, be they applicable. #[cfg(not(feature = "std"))] use alloc::vec::Vec; @@ -539,10 +540,33 @@ impl MallocConditionalSizeOf for Arc { /// If a mutex is stored inside of an Arc value as a member of a data type that is being measured, /// the Arc will not be automatically measured so there is no risk of overcounting the mutex's /// contents. +/// +/// The same reasoning applies to RwLock. #[cfg(feature = "std")] impl MallocSizeOf for std::sync::Mutex { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - (*self.lock().unwrap()).size_of(ops) + self.lock().unwrap().size_of(ops) + } +} + +#[cfg(feature = "std")] +impl MallocSizeOf for parking_lot::Mutex { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.lock().size_of(ops) + } +} + +#[cfg(feature = "std")] +impl MallocSizeOf for std::sync::RwLock { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.read().unwrap().size_of(ops) + } +} + +#[cfg(feature = "std")] +impl MallocSizeOf for parking_lot::RwLock { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.read().size_of(ops) } } From 4612d115ae7fc8a8da476d84aa9d541c9ab9f94d Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 31 Dec 2019 19:03:30 +0300 Subject: [PATCH 053/359] Provide #[derive(MallocSizeOf)] that is actually working (#291) * add derive and remove bounds * add smoky test * add another test * add also for fixed size arrays * cargo fmt * fix tests for different archs * cargo fmt * address review * add ignore test * Update parity-util-mem/src/malloc_size.rs Co-Authored-By: David * Update parity-util-mem/src/malloc_size.rs Co-Authored-By: David * add license preamble Co-authored-by: David --- Cargo.toml | 1 + parity-util-mem/Cargo.toml | 2 +- parity-util-mem/derive/Cargo.toml | 16 ++++++ parity-util-mem/derive/lib.rs | 84 ++++++++++++++++++++++++++++++ parity-util-mem/src/impls.rs | 6 +++ parity-util-mem/src/lib.rs | 5 +- parity-util-mem/src/malloc_size.rs | 20 +++---- parity-util-mem/tests/derive.rs | 63 ++++++++++++++++++++++ 8 files changed, 179 insertions(+), 18 deletions(-) create mode 100644 parity-util-mem/derive/Cargo.toml create mode 100644 parity-util-mem/derive/lib.rs create mode 100644 parity-util-mem/tests/derive.rs diff --git a/Cargo.toml b/Cargo.toml index 6c12c3205..c2511404c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,4 +20,5 @@ members = [ "primitive-types", "ethereum-types", "ethbloom", + "parity-util-mem/derive" ] diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index a89122723..10a90c3cc 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -9,7 +9,6 @@ edition = "2018" [dependencies] cfg-if = "0.1.10" -malloc_size_of_derive = "0.1.1" dlmalloc = { version = "0.1.3", features = ["global"], optional = true } wee_alloc = { version = "0.4.5", optional = true } # from https://github.com/microsoft/mimalloc: @@ -19,6 +18,7 @@ wee_alloc = { version = "0.4.5", optional = true } # The performance penalty is only around 3% on average over our benchmarks. mimallocator = { version = "0.1.3", features = ["secure"], optional = true } mimalloc-sys = { version = "0.1.6", optional = true } +parity-util-mem-derive = { path = "derive", version = "0.1" } smallvec = { version = "1.0.0", optional = true } ethereum-types = { version = "0.8.0", optional = true, path = "../ethereum-types" } diff --git a/parity-util-mem/derive/Cargo.toml b/parity-util-mem/derive/Cargo.toml new file mode 100644 index 000000000..f37d38013 --- /dev/null +++ b/parity-util-mem/derive/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "parity-util-mem-derive" +version = "0.1.0" +authors = ["Parity Technologies"] +license = "MIT" +description = "Crate for memory reporting" +repository = "https://github.com/paritytech/pariry-common/parity-util-mem/derive" + +[lib] +path = "lib.rs" +proc-macro = true + +[dependencies] +proc-macro2 = "1" +syn = { version = "1", features = ["full"] } +synstructure = "0.12" diff --git a/parity-util-mem/derive/lib.rs b/parity-util-mem/derive/lib.rs new file mode 100644 index 000000000..c1c1e504e --- /dev/null +++ b/parity-util-mem/derive/lib.rs @@ -0,0 +1,84 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! A crate for deriving the MallocSizeOf trait. +//! +//! This is a copy of Servo malloc_size_of_derive code, modified to work with +//! our `parity_util_mem` library + +extern crate proc_macro2; +#[macro_use] +extern crate syn; +#[macro_use] +extern crate synstructure; + +#[cfg(not(test))] +decl_derive!([MallocSizeOf, attributes(ignore_malloc_size_of)] => malloc_size_of_derive); + +fn malloc_size_of_derive(s: synstructure::Structure) -> proc_macro2::TokenStream { + let match_body = s.each(|binding| { + let ignore = binding.ast().attrs.iter().any(|attr| match attr.parse_meta().unwrap() { + syn::Meta::Path(ref path) | syn::Meta::List(syn::MetaList { ref path, .. }) + if path.is_ident("ignore_malloc_size_of") => + { + panic!( + "#[ignore_malloc_size_of] should have an explanation, \ + e.g. #[ignore_malloc_size_of = \"because reasons\"]" + ); + } + syn::Meta::NameValue(syn::MetaNameValue { ref path, .. }) if path.is_ident("ignore_malloc_size_of") => true, + _ => false, + }); + if ignore { + None + } else if let syn::Type::Array(..) = binding.ast().ty { + Some(quote! { + for item in #binding.iter() { + sum += parity_util_mem::MallocSizeOf::size_of(item, ops); + } + }) + } else { + Some(quote! { + sum += parity_util_mem::MallocSizeOf::size_of(#binding, ops); + }) + } + }); + + let ast = s.ast(); + let name = &ast.ident; + let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); + let mut where_clause = where_clause.unwrap_or(&parse_quote!(where)).clone(); + for param in ast.generics.type_params() { + let ident = ¶m.ident; + where_clause.predicates.push(parse_quote!(#ident: parity_util_mem::MallocSizeOf)); + } + + let tokens = quote! { + impl #impl_generics parity_util_mem::MallocSizeOf for #name #ty_generics #where_clause { + #[inline] + #[allow(unused_variables, unused_mut, unreachable_code)] + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + let mut sum = 0; + match *self { + #match_body + } + sum + } + } + }; + + tokens +} diff --git a/parity-util-mem/src/impls.rs b/parity-util-mem/src/impls.rs index 322375873..4124b132a 100644 --- a/parity-util-mem/src/impls.rs +++ b/parity-util-mem/src/impls.rs @@ -32,6 +32,12 @@ malloc_size_of_is_0!(std::time::Duration); malloc_size_of_is_0!(U64, U128, U256, U512, H32, H64, H128, H160, H256, H264, H512, H520, Bloom); +malloc_size_of_is_0!( + [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 9], [u8; 10], [u8; 11], [u8; 12], + [u8; 13], [u8; 14], [u8; 15], [u8; 16], [u8; 17], [u8; 18], [u8; 19], [u8; 20], [u8; 21], [u8; 22], [u8; 23], + [u8; 24], [u8; 25], [u8; 26], [u8; 27], [u8; 28], [u8; 29], [u8; 30], [u8; 31], [u8; 32] +); + macro_rules! impl_smallvec { ($size: expr) => { impl MallocSizeOf for SmallVec<[T; $size]> diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index 43fbc4ab6..23c07e27b 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -23,8 +23,6 @@ #[cfg(not(feature = "std"))] extern crate alloc; -use malloc_size_of_derive as malloc_size_derive; - cfg_if::cfg_if! { if #[cfg(all( feature = "jemalloc-global", @@ -72,7 +70,8 @@ pub mod impls; pub use allocators::MallocSizeOfExt; pub use malloc_size::{MallocSizeOf, MallocSizeOfOps}; -pub use malloc_size_derive::*; + +pub use parity_util_mem_derive::*; #[cfg(feature = "std")] #[cfg(test)] diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index a748eec82..a86823db5 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -43,8 +43,8 @@ //! measured as well as the thing it points to. E.g. //! ` as MallocSizeOf>::size_of(field, ops)`. -//! This is an extended (for own internal needs) version of the Servo internal malloc_size crate. -//! We should occasionally track the upstream changes/fixes and reintroduce them here, be they applicable. +//! This is an extended version of the Servo internal malloc_size crate. +//! We should occasionally track the upstream changes/fixes and reintroduce them here, whenever applicable. #[cfg(not(feature = "std"))] use alloc::vec::Vec; @@ -426,11 +426,7 @@ where } #[cfg(feature = "std")] -impl MallocShallowSizeOf for std::collections::HashMap -where - K: Eq + Hash, - S: BuildHasher, -{ +impl MallocShallowSizeOf for std::collections::HashMap { fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { // See the implementation for std::collections::HashSet for details. if ops.has_malloc_enclosing_size_of() { @@ -444,9 +440,8 @@ where #[cfg(feature = "std")] impl MallocSizeOf for std::collections::HashMap where - K: Eq + Hash + MallocSizeOf, + K: MallocSizeOf, V: MallocSizeOf, - S: BuildHasher, { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = self.shallow_size_of(ops); @@ -458,10 +453,7 @@ where } } -impl MallocShallowSizeOf for rstd::collections::BTreeMap -where - K: Eq + Hash, -{ +impl MallocShallowSizeOf for rstd::collections::BTreeMap { fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { if ops.has_malloc_enclosing_size_of() { self.values().next().map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) @@ -473,7 +465,7 @@ where impl MallocSizeOf for rstd::collections::BTreeMap where - K: Eq + Hash + MallocSizeOf, + K: MallocSizeOf, V: MallocSizeOf, { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { diff --git a/parity-util-mem/tests/derive.rs b/parity-util-mem/tests/derive.rs new file mode 100644 index 000000000..10dc6975d --- /dev/null +++ b/parity-util-mem/tests/derive.rs @@ -0,0 +1,63 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use parity_util_mem::{MallocSizeOf, MallocSizeOfExt}; + +#[test] +#[cfg(feature = "std")] +fn derive_vec() { + #[derive(MallocSizeOf)] + struct Trivia { + v: Vec, + } + + let t = Trivia { v: vec![0u8; 1024] }; + + assert!(t.malloc_size_of() > 1000); +} + +#[test] +#[cfg(feature = "std")] +fn derive_hashmap() { + #[derive(MallocSizeOf, Default)] + struct Trivia { + hm: std::collections::HashMap>, + } + + let mut t = Trivia::default(); + + t.hm.insert(1, vec![0u8; 2048]); + + assert!(t.malloc_size_of() > 2000); +} + +#[test] +#[cfg(feature = "std")] +fn derive_ignore() { + #[derive(MallocSizeOf, Default)] + struct Trivia { + hm: std::collections::HashMap>, + #[ignore_malloc_size_of = "I don't like vectors"] + v: Vec, + } + + let mut t = Trivia::default(); + + t.hm.insert(1, vec![0u8; 2048]); + t.v = vec![0u8; 1024]; + + assert!(t.malloc_size_of() < 3000); +} From f5dbb7824a93b760e447c182a6f84ec47c7a94fc Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 31 Dec 2019 23:31:51 +0300 Subject: [PATCH 054/359] Add memory stats for kvdb-s (#292) * add memory stats for kvdb-s * also stub for kvdb-web * fix tests for different archs * address review * a bit more sophisticated test * rebase and cargo fmt * query block-cache-usage also * use rstd * Update parity-util-mem/src/malloc_size.rs Co-Authored-By: Andronik Ordian Co-authored-by: Andronik Ordian --- kvdb-memorydb/Cargo.toml | 1 + kvdb-memorydb/src/lib.rs | 3 +- kvdb-rocksdb/Cargo.toml | 1 + kvdb-rocksdb/src/lib.rs | 56 ++++++++++++++++++++++++++++++ kvdb-web/Cargo.toml | 1 + kvdb-web/src/lib.rs | 3 ++ kvdb/Cargo.toml | 1 + kvdb/src/lib.rs | 2 +- parity-util-mem/src/malloc_size.rs | 6 ++++ parity-util-mem/tests/derive.rs | 3 +- 10 files changed, 73 insertions(+), 4 deletions(-) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 21dbe7128..1451a66b8 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -8,5 +8,6 @@ license = "GPL-3.0" edition = "2018" [dependencies] +parity-util-mem = { path = "../parity-util-mem", version = "0.3" } parking_lot = "0.9.0" kvdb = { version = "0.2", path = "../kvdb" } diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 9643861f9..ea0c85649 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; +use parity_util_mem::MallocSizeOf; use parking_lot::RwLock; use std::{ collections::{BTreeMap, HashMap}, @@ -23,7 +24,7 @@ use std::{ /// A key-value database fulfilling the `KeyValueDB` trait, living in memory. /// This is generally intended for tests and is not particularly optimized. -#[derive(Default)] +#[derive(Default, MallocSizeOf)] pub struct InMemory { columns: RwLock, DBValue>>>, } diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index f6387edc5..695c08def 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -22,6 +22,7 @@ parking_lot = "0.9.0" regex = "1.3.1" rocksdb = { version = "0.13", features = ["snappy"], default-features = false } owning_ref = "0.4.0" +parity-util-mem = { path = "../parity-util-mem", version = "0.3" } [dev-dependencies] alloc_counter = "0.0.4" diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 0041b3dc3..2d666d139 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -18,6 +18,7 @@ mod iter; use std::{cmp, collections::HashMap, convert::identity, error, fs, io, mem, path::Path, result}; +use parity_util_mem::MallocSizeOf; use parking_lot::{Mutex, MutexGuard, RwLock}; use rocksdb::{ BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Error, Options, ReadOptions, WriteBatch, WriteOptions, DB, @@ -57,6 +58,7 @@ pub const DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB: MiB = 128; /// The default memory budget in MiB. pub const DB_DEFAULT_MEMORY_BUDGET_MB: MiB = 512; +#[derive(MallocSizeOf)] enum KeyState { Insert(DBValue), Delete, @@ -229,6 +231,25 @@ struct DBAndColumns { column_names: Vec, } +fn static_property_or_warn(db: &DB, prop: &str) -> usize { + match db.property_int_value(prop) { + Ok(Some(v)) => v as usize, + _ => { + warn!("Cannot read expected static property of RocksDb database: {}", prop); + 0 + } + } +} + +impl MallocSizeOf for DBAndColumns { + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + self.column_names.size_of(ops) + + static_property_or_warn(&self.db, "rocksdb.estimate-table-readers-mem") + + static_property_or_warn(&self.db, "rocksdb.cur-size-all-mem-tables") + + static_property_or_warn(&self.db, "rocksdb.block-cache-usage") + } +} + impl DBAndColumns { fn cf(&self, i: usize) -> &ColumnFamily { self.db.cf_handle(&self.column_names[i]).expect("the specified column name is correct; qed") @@ -236,12 +257,17 @@ impl DBAndColumns { } /// Key-Value database. +#[derive(MallocSizeOf)] pub struct Database { db: RwLock>, + #[ignore_malloc_size_of = "insignificant"] config: DatabaseConfig, path: String, + #[ignore_malloc_size_of = "insignificant"] write_opts: WriteOptions, + #[ignore_malloc_size_of = "insignificant"] read_opts: ReadOptions, + #[ignore_malloc_size_of = "insignificant"] block_opts: BlockBasedOptions, // Dirty values added with `write_buffered`. Cleaned on `flush`. overlay: RwLock>>, @@ -759,6 +785,36 @@ mod tests { assert_eq!(&*db.get(0, key1.as_bytes()).unwrap().unwrap(), b"horse"); } + #[test] + fn mem_tables_size() { + let tempdir = TempDir::new("").unwrap(); + + let config = DatabaseConfig { + max_open_files: 512, + memory_budget: HashMap::new(), + compaction: CompactionProfile::default(), + columns: 11, + keep_log_file_num: 1, + }; + + let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); + + let mut batch = db.transaction(); + for i in 0u32..10000u32 { + batch.put(i / 1000 + 1, &i.to_le_bytes(), &(i * 17).to_le_bytes()); + } + db.write(batch).unwrap(); + + db.flush().unwrap(); + + { + let db = db.db.read(); + db.as_ref().map(|db| { + assert!(super::static_property_or_warn(&db.db, "rocksdb.cur-size-all-mem-tables") > 512); + }); + } + } + #[test] fn kvdb() { let tempdir = TempDir::new("").unwrap(); diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index e875d594b..fb3af40ea 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -16,6 +16,7 @@ kvdb-memorydb = { version = "0.2", path = "../kvdb-memorydb" } futures = "0.3" log = "0.4.8" send_wrapper = "0.3.0" +parity-util-mem = { path = "../parity-util-mem", version = "0.3" } [dependencies.web-sys] version = "0.3.31" diff --git a/kvdb-web/src/lib.rs b/kvdb-web/src/lib.rs index 232689968..f73426904 100644 --- a/kvdb-web/src/lib.rs +++ b/kvdb-web/src/lib.rs @@ -45,6 +45,9 @@ pub struct Database { indexed_db: SendWrapper, } +// TODO: implement when web-based implementation need memory stats +parity_util_mem::malloc_size_of_is_0!(Database); + impl Database { /// Opens the database with the given name, /// and the specified number of columns (not including the default one). diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index bb40534b5..56f2a7a5e 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -10,3 +10,4 @@ edition = "2018" [dependencies] smallvec = "1.0.0" bytes = { package = "parity-bytes", version = "0.1", path = "../parity-bytes" } +parity-util-mem = { path = "../parity-util-mem", version = "0.3" } diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 67d9be1c3..708871982 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -107,7 +107,7 @@ impl DBTransaction { /// /// The API laid out here, along with the `Sync` bound implies interior synchronization for /// implementation. -pub trait KeyValueDB: Sync + Send { +pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { /// Helper to create a new transaction. fn transaction(&self) -> DBTransaction { DBTransaction::new() diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index a86823db5..49f9c9bce 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -425,6 +425,12 @@ where } } +impl MallocSizeOf for rstd::cmp::Reverse { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.0.size_of(ops) + } +} + #[cfg(feature = "std")] impl MallocShallowSizeOf for std::collections::HashMap { fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { diff --git a/parity-util-mem/tests/derive.rs b/parity-util-mem/tests/derive.rs index 10dc6975d..6338e2cc8 100644 --- a/parity-util-mem/tests/derive.rs +++ b/parity-util-mem/tests/derive.rs @@ -29,8 +29,8 @@ fn derive_vec() { assert!(t.malloc_size_of() > 1000); } -#[test] #[cfg(feature = "std")] +#[test] fn derive_hashmap() { #[derive(MallocSizeOf, Default)] struct Trivia { @@ -58,6 +58,5 @@ fn derive_ignore() { t.hm.insert(1, vec![0u8; 2048]); t.v = vec![0u8; 1024]; - assert!(t.malloc_size_of() < 3000); } From e009e51a6b68ae071f3ff6126273db910d98b5ab Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 1 Jan 2020 22:42:01 +0300 Subject: [PATCH 055/359] Add memory extensions for LRUCache, hashbrown (#293) * add hashbrown and lru cache impls * remove std constraints * Update parity-util-mem/src/malloc_size.rs Co-Authored-By: Andronik Ordian * fix warnings, remove duplicate and add feature flags * cargo fmt Co-authored-by: Andronik Ordian --- parity-util-mem/Cargo.toml | 4 ++- parity-util-mem/src/malloc_size.rs | 46 ++++++++++++++++++++++++++++++ parity-util-mem/tests/derive.rs | 22 ++++++++++++-- 3 files changed, 68 insertions(+), 4 deletions(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 10a90c3cc..e15e460c1 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -11,6 +11,8 @@ edition = "2018" cfg-if = "0.1.10" dlmalloc = { version = "0.1.3", features = ["global"], optional = true } wee_alloc = { version = "0.4.5", optional = true } +lru = { version = "0.4", optional = true } +hashbrown = { version = "0.6", optional = true } # from https://github.com/microsoft/mimalloc: # mimalloc can be built in secure mode, # adding guard pages, randomized allocation, encrypted free lists, etc. @@ -32,7 +34,7 @@ version = "0.3.2" optional = true [features] -default = ["std", "ethereum-impls"] +default = ["std", "ethereum-impls", "lru", "hashbrown"] std = ["parking_lot"] # use dlmalloc as global allocator dlmalloc-global = ["dlmalloc", "estimate-heapsize"] diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index 49f9c9bce..3c26a917a 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -68,6 +68,7 @@ use std::sync::Arc; pub use alloc::boxed::Box; #[cfg(not(feature = "std"))] use core::ffi::c_void; +#[cfg(feature = "std")] use rstd::hash::Hash; use rstd::mem::size_of; use rstd::ops::Range; @@ -623,3 +624,48 @@ impl DerefMut for Measurable { &mut self.0 } } + +#[cfg(feature = "hashbrown")] +impl MallocShallowSizeOf for hashbrown::HashMap { + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + // See the implementation for std::collections::HashSet for details. + if ops.has_malloc_enclosing_size_of() { + self.values().next().map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) + } else { + self.capacity() * (size_of::() + size_of::() + size_of::()) + } + } +} + +#[cfg(feature = "hashbrown")] +impl MallocSizeOf for hashbrown::HashMap +where + K: MallocSizeOf, + V: MallocSizeOf, +{ + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + for (k, v) in self.iter() { + n += k.size_of(ops); + n += v.size_of(ops); + } + n + } +} + +#[cfg(feature = "lru")] +impl MallocSizeOf for lru::LruCache +where + K: MallocSizeOf + rstd::cmp::Eq + rstd::hash::Hash, + V: MallocSizeOf, + S: rstd::hash::BuildHasher, +{ + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = 0; + for (k, v) in self.iter() { + n += k.size_of(ops); + n += v.size_of(ops); + } + n + } +} diff --git a/parity-util-mem/tests/derive.rs b/parity-util-mem/tests/derive.rs index 6338e2cc8..34674fd5f 100644 --- a/parity-util-mem/tests/derive.rs +++ b/parity-util-mem/tests/derive.rs @@ -14,10 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +#![cfg(feature = "std")] + use parity_util_mem::{MallocSizeOf, MallocSizeOfExt}; #[test] -#[cfg(feature = "std")] fn derive_vec() { #[derive(MallocSizeOf)] struct Trivia { @@ -29,7 +30,6 @@ fn derive_vec() { assert!(t.malloc_size_of() > 1000); } -#[cfg(feature = "std")] #[test] fn derive_hashmap() { #[derive(MallocSizeOf, Default)] @@ -45,7 +45,6 @@ fn derive_hashmap() { } #[test] -#[cfg(feature = "std")] fn derive_ignore() { #[derive(MallocSizeOf, Default)] struct Trivia { @@ -60,3 +59,20 @@ fn derive_ignore() { t.v = vec![0u8; 1024]; assert!(t.malloc_size_of() < 3000); } + +#[test] +fn derive_morecomplex() { + #[derive(MallocSizeOf)] + struct Trivia { + hm: hashbrown::HashMap>, + cache: lru::LruCache>, + } + + let mut t = Trivia { hm: hashbrown::HashMap::new(), cache: lru::LruCache::unbounded() }; + + t.hm.insert(1, vec![0u8; 2048]); + t.cache.put(1, vec![0u8; 2048]); + t.cache.put(2, vec![0u8; 4096]); + + assert!(t.malloc_size_of() > 8000); +} From 6be1f79e77a64728cf882ed687625fdbc6355330 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 2 Jan 2020 13:14:59 +0300 Subject: [PATCH 056/359] use for_tuples (#300) --- parity-util-mem/Cargo.toml | 1 + parity-util-mem/src/malloc_size.rs | 40 ++++-------------------------- parity-util-mem/tests/derive.rs | 14 +++++++++++ 3 files changed, 20 insertions(+), 35 deletions(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index e15e460c1..ff69fc3aa 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -21,6 +21,7 @@ hashbrown = { version = "0.6", optional = true } mimallocator = { version = "0.1.3", features = ["secure"], optional = true } mimalloc-sys = { version = "0.1.6", optional = true } parity-util-mem-derive = { path = "derive", version = "0.1" } +impl-trait-for-tuples = "0.1.3" smallvec = { version = "1.0.0", optional = true } ethereum-types = { version = "0.8.0", optional = true, path = "../ethereum-types" } diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index 3c26a917a..b9761527f 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -261,42 +261,12 @@ impl MallocSizeOf for Box { } } -impl MallocSizeOf for () { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - 0 - } -} - -impl MallocSizeOf for (T1, T2) -where - T1: MallocSizeOf, - T2: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.0.size_of(ops) + self.1.size_of(ops) - } -} - -impl MallocSizeOf for (T1, T2, T3) -where - T1: MallocSizeOf, - T2: MallocSizeOf, - T3: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.0.size_of(ops) + self.1.size_of(ops) + self.2.size_of(ops) - } -} - -impl MallocSizeOf for (T1, T2, T3, T4) -where - T1: MallocSizeOf, - T2: MallocSizeOf, - T3: MallocSizeOf, - T4: MallocSizeOf, -{ +#[impl_trait_for_tuples::impl_for_tuples(12)] +impl MallocSizeOf for Tuple { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.0.size_of(ops) + self.1.size_of(ops) + self.2.size_of(ops) + self.3.size_of(ops) + let mut result = 0; + for_tuples!( #( result += Tuple.size_of(ops); )* ); + result } } diff --git a/parity-util-mem/tests/derive.rs b/parity-util-mem/tests/derive.rs index 34674fd5f..87f8c9f50 100644 --- a/parity-util-mem/tests/derive.rs +++ b/parity-util-mem/tests/derive.rs @@ -76,3 +76,17 @@ fn derive_morecomplex() { assert!(t.malloc_size_of() > 8000); } + +#[test] +fn derive_tuple() { + #[derive(MallocSizeOf)] + struct Trivia { + tp1: (), + tp2: (Vec, Vec), + } + + let t = Trivia { tp1: (), tp2: (vec![7u8; 1024], vec![9u8; 1024]) }; + + assert!(t.malloc_size_of() > 2000); + assert!(t.malloc_size_of() < 3000); +} From fe989634cdddad97c5df231547b7977499f05374 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 2 Jan 2020 14:10:53 +0300 Subject: [PATCH 057/359] I/O statistic for key-value databases (#294) * kvdb general io stats * rocksdb io stats gathering * add bytes stats and allow to keep gathered data * cargo fmt * address review * cargo fmt * tally_read inside * add test for delete * cargo fmt * refactor completely * alter tests * Update kvdb-rocksdb/src/stats.rs Co-Authored-By: Andronik Ordian * Update kvdb-rocksdb/src/stats.rs Co-Authored-By: Andronik Ordian * remove argument description since it is now documented enum * add stats to write_buffered * enhance io api * cargo fmt Co-authored-by: Andronik Ordian --- kvdb-rocksdb/src/lib.rs | 128 ++++++++++++++++++++++++++++++--- kvdb-rocksdb/src/stats.rs | 144 ++++++++++++++++++++++++++++++++++++++ kvdb/src/io_stats.rs | 141 +++++++++++++++++++++++++++++++++++++ kvdb/src/lib.rs | 16 ++++- 4 files changed, 419 insertions(+), 10 deletions(-) create mode 100644 kvdb-rocksdb/src/stats.rs create mode 100644 kvdb/src/io_stats.rs diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 2d666d139..cb09432dc 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . mod iter; +mod stats; use std::{cmp, collections::HashMap, convert::identity, error, fs, io, mem, path::Path, result}; @@ -271,6 +272,8 @@ pub struct Database { block_opts: BlockBasedOptions, // Dirty values added with `write_buffered`. Cleaned on `flush`. overlay: RwLock>>, + #[ignore_malloc_size_of = "insignificant"] + stats: stats::RunningDbStats, // Values currently being flushed. Cleared when `flush` completes. flushing: RwLock>>, // Prevents concurrent flushes. @@ -403,6 +406,7 @@ impl Database { read_opts, write_opts, block_opts, + stats: stats::RunningDbStats::new(), }) } @@ -428,20 +432,32 @@ impl Database { match *self.db.read() { Some(ref cfs) => { let mut batch = WriteBatch::default(); + let mut ops: usize = 0; + let mut bytes: usize = 0; mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write()); { for (c, column) in self.flushing.read().iter().enumerate() { + ops += column.len(); for (key, state) in column.iter() { let cf = cfs.cf(c); match *state { - KeyState::Delete => batch.delete_cf(cf, key).map_err(other_io_err)?, - KeyState::Insert(ref value) => batch.put_cf(cf, key, value).map_err(other_io_err)?, + KeyState::Delete => { + bytes += key.len(); + batch.delete_cf(cf, key).map_err(other_io_err)? + } + KeyState::Insert(ref value) => { + bytes += key.len() + value.len(); + batch.put_cf(cf, key, value).map_err(other_io_err)? + } }; } } } check_for_corruption(&self.path, cfs.db.write_opt(batch, &self.write_opts))?; + self.stats.tally_transactions(1); + self.stats.tally_writes(ops as u64); + self.stats.tally_bytes_written(bytes as u64); for column in self.flushing.write().iter_mut() { column.clear(); @@ -474,6 +490,12 @@ impl Database { Some(ref cfs) => { let mut batch = WriteBatch::default(); let ops = tr.ops; + + self.stats.tally_writes(ops.len() as u64); + self.stats.tally_transactions(1); + + let mut stats_total_bytes = 0; + for op in ops { // remove any buffered operation for this key self.overlay.write()[op.col() as usize].remove(op.key()); @@ -481,10 +503,18 @@ impl Database { let cf = cfs.cf(op.col() as usize); match op { - DBOp::Insert { col: _, key, value } => batch.put_cf(cf, &key, &value).map_err(other_io_err)?, - DBOp::Delete { col: _, key } => batch.delete_cf(cf, &key).map_err(other_io_err)?, + DBOp::Insert { col: _, key, value } => { + stats_total_bytes += key.len() + value.len(); + batch.put_cf(cf, &key, &value).map_err(other_io_err)? + } + DBOp::Delete { col: _, key } => { + // We count deletes as writes. + stats_total_bytes += key.len(); + batch.delete_cf(cf, &key).map_err(other_io_err)? + } }; } + self.stats.tally_bytes_written(stats_total_bytes as u64); check_for_corruption(&self.path, cfs.db.write_opt(batch, &self.write_opts)) } @@ -496,6 +526,7 @@ impl Database { pub fn get(&self, col: u32, key: &[u8]) -> io::Result> { match *self.db.read() { Some(ref cfs) => { + self.stats.tally_reads(1); let overlay = &self.overlay.read()[col as usize]; match overlay.get(key) { Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), @@ -505,11 +536,21 @@ impl Database { match flushing.get(key) { Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), Some(&KeyState::Delete) => Ok(None), - None => cfs - .db - .get_pinned_cf_opt(cfs.cf(col as usize), key, &self.read_opts) - .map(|r| r.map(|v| v.to_vec())) - .map_err(other_io_err), + None => { + let aquired_val = cfs + .db + .get_pinned_cf_opt(cfs.cf(col as usize), key, &self.read_opts) + .map(|r| r.map(|v| v.to_vec())) + .map_err(other_io_err); + + match aquired_val { + Ok(Some(ref v)) => self.stats.tally_bytes_read((key.len() + v.len()) as u64), + Ok(None) => self.stats.tally_bytes_read(key.len() as u64), + _ => {} + }; + + aquired_val + } } } } @@ -704,6 +745,26 @@ impl KeyValueDB for Database { fn restore(&self, new_db: &str) -> io::Result<()> { Database::restore(self, new_db) } + + fn io_stats(&self, kind: kvdb::IoStatsKind) -> kvdb::IoStats { + let taken_stats = match kind { + kvdb::IoStatsKind::Overall => self.stats.overall(), + kvdb::IoStatsKind::SincePrevious => self.stats.since_previous(), + }; + + let mut stats = kvdb::IoStats::empty(); + + stats.reads = taken_stats.raw.reads; + stats.writes = taken_stats.raw.writes; + stats.transactions = taken_stats.raw.transactions; + stats.bytes_written = taken_stats.raw.bytes_written; + stats.bytes_read = taken_stats.raw.bytes_read; + + stats.started = taken_stats.started; + stats.span = taken_stats.started.elapsed(); + + stats + } } impl Drop for Database { @@ -916,6 +977,55 @@ mod tests { assert_eq!(db.num_keys(0).unwrap(), 1, "adding a key increases the count"); } + #[test] + fn stats() { + use kvdb::IoStatsKind; + + let tempdir = TempDir::new("").unwrap(); + let config = DatabaseConfig::with_columns(3); + let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); + + let key1 = b"kkk"; + let mut batch = db.transaction(); + batch.put(0, key1, key1); + batch.put(1, key1, key1); + batch.put(2, key1, key1); + + for _ in 0..10 { + db.get(0, key1).unwrap(); + } + + db.write(batch).unwrap(); + + let io_stats = db.io_stats(IoStatsKind::SincePrevious); + assert_eq!(io_stats.transactions, 1); + assert_eq!(io_stats.writes, 3); + assert_eq!(io_stats.bytes_written, 18); + assert_eq!(io_stats.reads, 10); + assert_eq!(io_stats.bytes_read, 30); + + let new_io_stats = db.io_stats(IoStatsKind::SincePrevious); + // Since we taken previous statistic period, + // this is expected to be totally empty. + assert_eq!(new_io_stats.transactions, 0); + + // but the overall should be there + let new_io_stats = db.io_stats(IoStatsKind::Overall); + assert_eq!(new_io_stats.bytes_written, 18); + + let mut batch = db.transaction(); + batch.delete(0, key1); + batch.delete(1, key1); + batch.delete(2, key1); + + // transaction is not commited yet + assert_eq!(db.io_stats(IoStatsKind::SincePrevious).writes, 0); + + db.write(batch).unwrap(); + // now it is, and delete is counted as write + assert_eq!(db.io_stats(IoStatsKind::SincePrevious).writes, 3); + } + #[test] fn test_iter_by_prefix() { let tempdir = TempDir::new("").unwrap(); diff --git a/kvdb-rocksdb/src/stats.rs b/kvdb-rocksdb/src/stats.rs new file mode 100644 index 000000000..039dd3a88 --- /dev/null +++ b/kvdb-rocksdb/src/stats.rs @@ -0,0 +1,144 @@ +// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use parking_lot::RwLock; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; +use std::time::Instant; + +pub struct RawDbStats { + pub reads: u64, + pub writes: u64, + pub bytes_written: u64, + pub bytes_read: u64, + pub transactions: u64, +} + +impl RawDbStats { + fn combine(&self, other: &RawDbStats) -> Self { + RawDbStats { + reads: self.reads + other.reads, + writes: self.writes + other.writes, + bytes_written: self.bytes_written + other.bytes_written, + bytes_read: self.bytes_read + other.bytes_written, + transactions: self.transactions + other.transactions, + } + } +} + +struct OverallDbStats { + stats: RawDbStats, + last_taken: Instant, + started: Instant, +} + +impl OverallDbStats { + fn new() -> Self { + OverallDbStats { + stats: RawDbStats { reads: 0, writes: 0, bytes_written: 0, bytes_read: 0, transactions: 0 }, + last_taken: Instant::now(), + started: Instant::now(), + } + } +} + +pub struct RunningDbStats { + reads: AtomicU64, + writes: AtomicU64, + bytes_written: AtomicU64, + bytes_read: AtomicU64, + transactions: AtomicU64, + overall: RwLock, +} + +pub struct TakenDbStats { + pub raw: RawDbStats, + pub started: Instant, +} + +impl RunningDbStats { + pub fn new() -> Self { + Self { + reads: 0.into(), + bytes_read: 0.into(), + writes: 0.into(), + bytes_written: 0.into(), + transactions: 0.into(), + overall: OverallDbStats::new().into(), + } + } + + pub fn tally_reads(&self, val: u64) { + self.reads.fetch_add(val, AtomicOrdering::Relaxed); + } + + pub fn tally_bytes_read(&self, val: u64) { + self.bytes_read.fetch_add(val, AtomicOrdering::Relaxed); + } + + pub fn tally_writes(&self, val: u64) { + self.writes.fetch_add(val, AtomicOrdering::Relaxed); + } + + pub fn tally_bytes_written(&self, val: u64) { + self.bytes_written.fetch_add(val, AtomicOrdering::Relaxed); + } + + pub fn tally_transactions(&self, val: u64) { + self.transactions.fetch_add(val, AtomicOrdering::Relaxed); + } + + fn take_current(&self) -> RawDbStats { + RawDbStats { + reads: self.reads.swap(0, AtomicOrdering::Relaxed), + writes: self.writes.swap(0, AtomicOrdering::Relaxed), + bytes_written: self.bytes_written.swap(0, AtomicOrdering::Relaxed), + bytes_read: self.bytes_read.swap(0, AtomicOrdering::Relaxed), + transactions: self.transactions.swap(0, AtomicOrdering::Relaxed), + } + } + + fn peek_current(&self) -> RawDbStats { + RawDbStats { + reads: self.reads.load(AtomicOrdering::Relaxed), + writes: self.writes.load(AtomicOrdering::Relaxed), + bytes_written: self.bytes_written.load(AtomicOrdering::Relaxed), + bytes_read: self.bytes_read.load(AtomicOrdering::Relaxed), + transactions: self.transactions.load(AtomicOrdering::Relaxed), + } + } + + pub fn since_previous(&self) -> TakenDbStats { + let mut overall_lock = self.overall.write(); + + let current = self.take_current(); + + overall_lock.stats = overall_lock.stats.combine(¤t); + + let stats = TakenDbStats { raw: current, started: overall_lock.last_taken }; + + overall_lock.last_taken = Instant::now(); + + stats + } + + pub fn overall(&self) -> TakenDbStats { + let overall_lock = self.overall.read(); + + let current = self.peek_current(); + + TakenDbStats { raw: overall_lock.stats.combine(¤t), started: overall_lock.started } + } +} diff --git a/kvdb/src/io_stats.rs b/kvdb/src/io_stats.rs new file mode 100644 index 000000000..d0de5ce36 --- /dev/null +++ b/kvdb/src/io_stats.rs @@ -0,0 +1,141 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Generic statistics for key-value databases + +/// Statistic kind to query. +pub enum Kind { + /// Overall statistics since start. + Overall, + /// Statistics since previous query. + SincePrevious, +} + +/// Statistic for the `span` period +#[derive(Debug, Clone)] +pub struct IoStats { + /// Number of transaction. + pub transactions: u64, + /// Number of read operations. + pub reads: u64, + /// Number of reads resulted in a read from cache. + pub cache_reads: u64, + /// Number of write operations. + pub writes: u64, + /// Number of bytes read + pub bytes_read: u64, + /// Number of bytes read from cache + pub cache_read_bytes: u64, + /// Number of bytes write + pub bytes_written: u64, + /// Start of the statistic period. + pub started: std::time::Instant, + /// Total duration of the statistic period. + pub span: std::time::Duration, +} + +impl IoStats { + /// Empty statistic report. + pub fn empty() -> Self { + Self { + transactions: 0, + reads: 0, + cache_reads: 0, + writes: 0, + bytes_read: 0, + cache_read_bytes: 0, + bytes_written: 0, + started: std::time::Instant::now(), + span: std::time::Duration::default(), + } + } + + /// Average batch (transaction) size (writes per transaction) + pub fn avg_batch_size(&self) -> f64 { + if self.writes == 0 { + return 0.0; + } + self.transactions as f64 / self.writes as f64 + } + + /// Read operations per second. + pub fn reads_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0; + } + + self.reads as f64 / self.span.as_secs_f64() + } + + pub fn byte_reads_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0; + } + + self.bytes_read as f64 / self.span.as_secs_f64() + } + + /// Write operations per second. + pub fn writes_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0; + } + + self.writes as f64 / self.span.as_secs_f64() + } + + pub fn byte_writes_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0; + } + + self.bytes_written as f64 / self.span.as_secs_f64() + } + + /// Total number of operations per second. + pub fn ops_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0; + } + + (self.writes as f64 + self.reads as f64) / self.span.as_secs_f64() + } + + /// Transactions per second. + pub fn transactions_per_sec(&self) -> f64 { + if self.span.as_secs_f64() == 0.0 { + return 0.0; + } + + (self.transactions as f64) / self.span.as_secs_f64() + } + + pub fn avg_transaction_size(&self) -> f64 { + if self.transactions == 0 { + return 0.0; + } + + self.bytes_written as f64 / self.transactions as f64 + } + + pub fn cache_hit_ratio(&self) -> f64 { + if self.reads == 0 { + return 0.0; + } + + self.cache_reads as f64 / self.reads as f64 + } +} diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 708871982..2bcb1b39c 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Key-Value store abstraction with `RocksDB` backend. +//! Key-Value store abstraction. use bytes::Bytes; use smallvec::SmallVec; @@ -22,6 +22,8 @@ use std::io; use std::path::Path; use std::sync::Arc; +mod io_stats; + /// Required length of prefixes. pub const PREFIX_LEN: usize = 12; @@ -30,6 +32,8 @@ pub type DBValue = Vec; /// Database keys. pub type DBKey = SmallVec<[u8; 32]>; +pub use io_stats::{IoStats, Kind as IoStatsKind}; + /// Write transaction. Batches a sequence of put/delete operations for efficiency. #[derive(Default, Clone, PartialEq)] pub struct DBTransaction { @@ -143,6 +147,16 @@ pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { /// Attempt to replace this database with a new one located at the given path. fn restore(&self, new_db: &str) -> io::Result<()>; + + /// Query statistics. + /// + /// Not all kvdb implementations are able or expected to implement this, so by + /// default, empty statistics is returned. Also, not all kvdb implementation + /// can return every statistic or configured to do so (some statistics gathering + /// may impede the performance and might be off by default). + fn io_stats(&self, _kind: IoStatsKind) -> IoStats { + IoStats::empty() + } } /// Generic key-value database handler. This trait contains one function `open`. From ab5d566cd016a519b166f9cd5ddcdc71fc8e351f Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 2 Jan 2020 15:48:04 +0300 Subject: [PATCH 058/359] Add a top level function to parity-util-mem (#298) * add function * Update parity-util-mem/src/lib.rs Co-Authored-By: cheme * Update parity-util-mem/src/lib.rs Co-Authored-By: cheme * cargo fmt Co-authored-by: cheme --- parity-util-mem/src/lib.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index 23c07e27b..2d45cf9b4 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -73,10 +73,17 @@ pub use malloc_size::{MallocSizeOf, MallocSizeOfOps}; pub use parity_util_mem_derive::*; +/// Heap size of structure. +/// +/// Structure can be anything that implements MallocSizeOf. +pub fn malloc_size(t: &T) -> usize { + MallocSizeOf::size_of(t, &mut allocators::new_malloc_size_ops()) +} + #[cfg(feature = "std")] #[cfg(test)] mod test { - use super::MallocSizeOfExt; + use super::{malloc_size, MallocSizeOf, MallocSizeOfExt}; use std::sync::Arc; #[test] @@ -85,4 +92,12 @@ mod test { let s = val.malloc_size_of(); assert!(s > 0); } + + #[test] + fn test_dyn() { + trait Augmented: MallocSizeOf {} + impl Augmented for Vec {} + let val: Arc = Arc::new(vec![0u8; 1024]); + assert!(malloc_size(&*val) > 1000); + } } From 5371e63020494fbec3e9d9095dfddb802237ee32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 3 Jan 2020 08:00:57 +0100 Subject: [PATCH 059/359] Expose to_hex and from_hex from impl-serde (#302) * Expose to and from hex. * Bump minor version. * impl-serde: cargo fmt Co-authored-by: Andronik Ordian --- ethbloom/Cargo.toml | 2 +- ethereum-types/Cargo.toml | 2 +- primitive-types/Cargo.toml | 2 +- primitive-types/impls/serde/Cargo.toml | 2 +- primitive-types/impls/serde/src/serialize.rs | 123 +++++++++++++------ 5 files changed, 89 insertions(+), 42 deletions(-) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 56a092776..952f043c3 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -13,7 +13,7 @@ edition = "2018" tiny-keccak = { version = "2.0", features = ["keccak"] } crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } fixed-hash = { path = "../fixed-hash", version = "0.5", default-features = false } -impl-serde = { path = "../primitive-types/impls/serde", version = "0.2", default-features = false, optional = true } +impl-serde = { path = "../primitive-types/impls/serde", version = "0.3", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } [dev-dependencies] diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index eb1a85e20..8153ea823 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -12,7 +12,7 @@ ethbloom = { path = "../ethbloom", version = "0.8", default-features = false } fixed-hash = { path = "../fixed-hash", version = "0.5", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.8", default-features = false } primitive-types = { path = "../primitive-types", version = "0.6", features = ["rlp", "byteorder", "rustc-hex"], default-features = false } -impl-serde = { path = "../primitive-types/impls/serde", version = "0.2", default-features = false, optional = true } +impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.0", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } [dev-dependencies] diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index b289a67c1..1ea39518c 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -10,7 +10,7 @@ edition = "2018" [dependencies] fixed-hash = { version = "0.5", path = "../fixed-hash", default-features = false } uint = { version = "0.8.1", path = "../uint", default-features = false } -impl-serde = { version = "0.2.1", path = "impls/serde", default-features = false, optional = true } +impl-serde = { version = "0.3.0", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } impl-rlp = { version = "0.2", path = "impls/rlp", default-features = false, optional = true } diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index dc01bc6ab..a57ada2a2 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-serde" -version = "0.2.3" +version = "0.3.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0/MIT" diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index 59d117fd0..d632652f1 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -11,7 +11,32 @@ use std::fmt; static CHARS: &[u8] = b"0123456789abcdef"; -fn to_hex<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a str { +/// Serialize given bytes to a 0x-prefixed hex string. +/// +/// If `skip_leading_zero` initial 0s will not be printed out, +/// unless the byte string is empty, in which case `0x0` will be returned. +/// The results are consistent with `serialize_uint` output if the flag is +/// on and `serialize_raw` if the flag is off. +pub fn to_hex(bytes: &[u8], skip_leading_zero: bool) -> String { + let bytes = if skip_leading_zero { + let non_zero = bytes.iter().take_while(|b| **b == 0).count(); + let bytes = &bytes[non_zero..]; + if bytes.is_empty() { + return "0x0".into(); + } else { + bytes + } + } else if bytes.is_empty() { + return "0x".into(); + } else { + bytes + }; + + let mut slice = vec![0u8; (bytes.len() + 1) * 2]; + to_hex_raw(&mut slice, bytes, skip_leading_zero).into() +} + +fn to_hex_raw<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a str { assert!(v.len() > 1 + bytes.len() * 2); v[0] = b'0'; @@ -36,6 +61,48 @@ fn to_hex<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a str unsafe { std::str::from_utf8_unchecked(&v[0..idx]) } } +/// Decode given hex string into a vector of bytes. +/// +/// Returns an error if the string is not prefixed with `0x` +/// or non-hex characters are present. +pub fn from_hex(v: &str) -> Result, String> { + if !v.starts_with("0x") { + return Err("0x prefix is missing".into()); + } + + let bytes_len = v.len() - 2; + let mut modulus = bytes_len % 2; + let mut bytes = vec![0u8; (bytes_len + 1) / 2]; + let mut buf = 0; + let mut pos = 0; + for (idx, byte) in v.bytes().enumerate().skip(2) { + buf <<= 4; + + match byte { + b'A'..=b'F' => buf |= byte - b'A' + 10, + b'a'..=b'f' => buf |= byte - b'a' + 10, + b'0'..=b'9' => buf |= byte - b'0', + b' ' | b'\r' | b'\n' | b'\t' => { + buf >>= 4; + continue; + } + b => { + let ch = char::from(b); + return Err(format!("invalid hex character: {}, at {}", ch, idx)); + } + } + + modulus += 1; + if modulus == 2 { + modulus = 0; + bytes[pos] = buf; + pos += 1; + } + } + + Ok(bytes) +} + /// Serializes a slice of bytes. pub fn serialize_raw(slice: &mut [u8], bytes: &[u8], serializer: S) -> Result where @@ -44,7 +111,7 @@ where if bytes.is_empty() { serializer.serialize_str("0x") } else { - serializer.serialize_str(to_hex(slice, bytes, false)) + serializer.serialize_str(to_hex_raw(slice, bytes, false)) } } @@ -69,7 +136,7 @@ where if bytes.is_empty() { serializer.serialize_str("0x0") } else { - serializer.serialize_str(to_hex(slice, bytes, true)) + serializer.serialize_str(to_hex_raw(slice, bytes, true)) } } @@ -107,41 +174,7 @@ where } fn visit_str(self, v: &str) -> Result { - if !v.starts_with("0x") { - return Err(E::custom("prefix is missing")); - } - - let bytes_len = v.len() - 2; - let mut modulus = bytes_len % 2; - let mut bytes = vec![0u8; (bytes_len + 1) / 2]; - let mut buf = 0; - let mut pos = 0; - for (idx, byte) in v.bytes().enumerate().skip(2) { - buf <<= 4; - - match byte { - b'A'..=b'F' => buf |= byte - b'A' + 10, - b'a'..=b'f' => buf |= byte - b'a' + 10, - b'0'..=b'9' => buf |= byte - b'0', - b' ' | b'\r' | b'\n' | b'\t' => { - buf >>= 4; - continue; - } - b => { - let ch = char::from(b); - return Err(E::custom(&format!("invalid hex character: {}, at {}", ch, idx))); - } - } - - modulus += 1; - if modulus == 2 { - modulus = 0; - bytes[pos] = buf; - pos += 1; - } - } - - Ok(bytes) + from_hex(v).map_err(E::custom) } fn visit_string(self, v: String) -> Result { @@ -229,6 +262,7 @@ where #[cfg(test)] mod tests { + use super::*; extern crate serde_derive; use self::serde_derive::{Deserialize, Serialize}; @@ -278,4 +312,17 @@ mod tests { let deserialized: Bytes = serde_json::from_str(&data).unwrap(); assert!(deserialized.0.is_empty()) } + + #[test] + fn should_encode_to_and_from_hex() { + assert_eq!(to_hex(&[0, 1, 2], true), "0x102"); + assert_eq!(to_hex(&[0, 1, 2], false), "0x000102"); + assert_eq!(to_hex(&[0], true), "0x0"); + assert_eq!(to_hex(&[], true), "0x0"); + assert_eq!(to_hex(&[], false), "0x"); + assert_eq!(to_hex(&[0], false), "0x00"); + assert_eq!(from_hex("0x0102"), Ok(vec![1, 2])); + assert_eq!(from_hex("0x102"), Ok(vec![1, 2])); + assert_eq!(from_hex("0xf"), Ok(vec![0xf])); + } } From 072d8e8cf2f450a145501e5d1415fe9f705ea72b Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 3 Jan 2020 10:35:10 +0300 Subject: [PATCH 060/359] extract common kvdb tests into a crate (#301) * kvdb-test-utils: extract common KeyValueDB tests into a crate * kvdb-memorydb: use kvdb-test-utils for tests * kvdb-test-utils: un-unwrap-ify * kvdb-rocksdb: use kvdb-test-utils for tests * kvdb-web: use kvdb-test-utils for tests * update year in license headers * Cargo.toml: add newlines * rename kvdb-test-utils to kvdb-shared-tests --- Cargo.toml | 1 + kvdb-memorydb/Cargo.toml | 3 + kvdb-memorydb/src/lib.rs | 99 +++----------- kvdb-rocksdb/Cargo.toml | 4 +- kvdb-rocksdb/src/lib.rs | 228 +++++++------------------------- kvdb-shared-tests/Cargo.toml | 10 ++ kvdb-shared-tests/src/lib.rs | 243 +++++++++++++++++++++++++++++++++++ kvdb-web/Cargo.toml | 3 +- kvdb-web/tests/indexed_db.rs | 51 +++++++- 9 files changed, 369 insertions(+), 273 deletions(-) create mode 100644 kvdb-shared-tests/Cargo.toml create mode 100644 kvdb-shared-tests/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index c2511404c..1ba7acb78 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "kvdb", "kvdb-memorydb", "kvdb-rocksdb", + "kvdb-shared-tests", "kvdb-web", "parity-bytes", "parity-crypto", diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 1451a66b8..bf7d7215c 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -11,3 +11,6 @@ edition = "2018" parity-util-mem = { path = "../parity-util-mem", version = "0.3" } parking_lot = "0.9.0" kvdb = { version = "0.2", path = "../kvdb" } + +[dev-dependencies] +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.1" } diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index ea0c85649..1f40d24cc 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify @@ -116,104 +116,43 @@ impl KeyValueDB for InMemory { #[cfg(test)] mod tests { - use super::{create, KeyValueDB}; + use super::create; + use kvdb_shared_tests as st; + use std::io; #[test] - fn get_fails_with_non_existing_column() { + fn get_fails_with_non_existing_column() -> io::Result<()> { let db = create(1); - assert!(db.get(1, &[]).is_err()); + st::test_get_fails_with_non_existing_column(&db) } #[test] - fn put_and_get() { + fn put_and_get() -> io::Result<()> { let db = create(1); - - let key1 = b"key1"; - - let mut transaction = db.transaction(); - transaction.put(0, key1, b"horse"); - db.write_buffered(transaction); - assert_eq!(&*db.get(0, key1).unwrap().unwrap(), b"horse"); + st::test_put_and_get(&db) } #[test] - fn delete_and_get() { + fn delete_and_get() -> io::Result<()> { let db = create(1); - - let key1 = b"key1"; - - let mut transaction = db.transaction(); - transaction.put(0, key1, b"horse"); - db.write_buffered(transaction); - assert_eq!(&*db.get(0, key1).unwrap().unwrap(), b"horse"); - - let mut transaction = db.transaction(); - transaction.delete(0, key1); - db.write_buffered(transaction); - assert!(db.get(0, key1).unwrap().is_none()); + st::test_delete_and_get(&db) } #[test] - fn iter() { + fn iter() -> io::Result<()> { let db = create(1); - - let key1 = b"key1"; - let key2 = b"key2"; - - let mut transaction = db.transaction(); - transaction.put(0, key1, key1); - transaction.put(0, key2, key2); - db.write_buffered(transaction); - - let contents: Vec<_> = db.iter(0).into_iter().collect(); - assert_eq!(contents.len(), 2); - assert_eq!(&*contents[0].0, key1); - assert_eq!(&*contents[0].1, key1); - assert_eq!(&*contents[1].0, key2); - assert_eq!(&*contents[1].1, key2); + st::test_iter(&db) } #[test] - fn iter_from_prefix() { + fn iter_from_prefix() -> io::Result<()> { let db = create(1); + st::test_iter_from_prefix(&db) + } - let key1 = b"0"; - let key2 = b"a"; - let key3 = b"ab"; - - let mut transaction = db.transaction(); - transaction.put(0, key1, key1); - transaction.put(0, key2, key2); - transaction.put(0, key3, key3); - db.write_buffered(transaction); - - let contents: Vec<_> = db.iter_from_prefix(0, b"").into_iter().collect(); - assert_eq!(contents.len(), 3); - assert_eq!(&*contents[0].0, key1); - assert_eq!(&*contents[0].1, key1); - assert_eq!(&*contents[1].0, key2); - assert_eq!(&*contents[1].1, key2); - assert_eq!(&*contents[2].0, key3); - assert_eq!(&*contents[2].1, key3); - - let contents: Vec<_> = db.iter_from_prefix(0, b"0").into_iter().collect(); - assert_eq!(contents.len(), 1); - assert_eq!(&*contents[0].0, key1); - assert_eq!(&*contents[0].1, key1); - - let contents: Vec<_> = db.iter_from_prefix(0, b"a").into_iter().collect(); - assert_eq!(contents.len(), 2); - assert_eq!(&*contents[0].0, key2); - assert_eq!(&*contents[0].1, key2); - assert_eq!(&*contents[1].0, key3); - assert_eq!(&*contents[1].1, key3); - - let contents: Vec<_> = db.iter_from_prefix(0, b"ab").into_iter().collect(); - assert_eq!(contents.len(), 1); - assert_eq!(&*contents[0].0, key3); - assert_eq!(&*contents[0].1, key3); - - let contents: Vec<_> = db.iter_from_prefix(0, b"abc").into_iter().collect(); - assert_eq!(contents.len(), 0); + #[test] + fn complex() -> io::Result<()> { + let db = create(1); + st::test_complex(&db) } } diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 695c08def..bc71a0b93 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -3,7 +3,7 @@ name = "kvdb-rocksdb" version = "0.3.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" -description = "kvdb implementation backed by rocksDB" +description = "kvdb implementation backed by RocksDB" license = "GPL-3.0" edition = "2018" @@ -27,6 +27,6 @@ parity-util-mem = { path = "../parity-util-mem", version = "0.3" } [dev-dependencies] alloc_counter = "0.0.4" criterion = "0.3" -ethereum-types = { version = "0.8.0", path = "../ethereum-types" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.1" } rand = "0.7.2" tempdir = "0.3.7" diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index cb09432dc..1dd64bbcc 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify @@ -527,7 +527,9 @@ impl Database { match *self.db.read() { Some(ref cfs) => { self.stats.tally_reads(1); - let overlay = &self.overlay.read()[col as usize]; + let guard = self.overlay.read(); + let overlay = + guard.get(col as usize).ok_or_else(|| other_io_err("kvdb column index is out of bounds"))?; match overlay.get(key) { Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), Some(&KeyState::Delete) => Ok(None), @@ -777,73 +779,56 @@ impl Drop for Database { #[cfg(test)] mod tests { use super::*; - use ethereum_types::H256; - use std::io::Read; - use std::str::FromStr; + use kvdb_shared_tests as st; + use std::io::{self, Read}; use tempdir::TempDir; - fn test_db(config: &DatabaseConfig) { - let tempdir = TempDir::new("").unwrap(); - let db = Database::open(config, tempdir.path().to_str().unwrap()).unwrap(); - - let key1 = H256::from_str("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - let key3 = H256::from_str("04c00000000b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - let key4 = H256::from_str("04c01111110b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - let key5 = H256::from_str("04c02222220b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - - let mut batch = db.transaction(); - batch.put(0, key1.as_bytes(), b"cat"); - batch.put(0, key2.as_bytes(), b"dog"); - batch.put(0, key3.as_bytes(), b"caterpillar"); - batch.put(0, key4.as_bytes(), b"beef"); - batch.put(0, key5.as_bytes(), b"fish"); - db.write(batch).unwrap(); - - assert_eq!(&*db.get(0, key1.as_bytes()).unwrap().unwrap(), b"cat"); - - let contents: Vec<_> = db.iter(0).into_iter().collect(); - assert_eq!(contents.len(), 5); - assert_eq!(&*contents[0].0, key1.as_bytes()); - assert_eq!(&*contents[0].1, b"cat"); - assert_eq!(&*contents[1].0, key2.as_bytes()); - assert_eq!(&*contents[1].1, b"dog"); - - let mut prefix_iter = db.iter_from_prefix(0, &[0x04, 0xc0]); - assert_eq!(*prefix_iter.next().unwrap().1, b"caterpillar"[..]); - assert_eq!(*prefix_iter.next().unwrap().1, b"beef"[..]); - assert_eq!(*prefix_iter.next().unwrap().1, b"fish"[..]); + fn create(columns: u32) -> io::Result { + let tempdir = TempDir::new("")?; + let config = DatabaseConfig::with_columns(columns); + Database::open(&config, tempdir.path().to_str().expect("tempdir path is valid unicode")) + } - let mut batch = db.transaction(); - batch.delete(0, key1.as_bytes()); - db.write(batch).unwrap(); + #[test] + fn get_fails_with_non_existing_column() -> io::Result<()> { + let db = create(1)?; + st::test_get_fails_with_non_existing_column(&db) + } - assert!(db.get(0, key1.as_bytes()).unwrap().is_none()); + #[test] + fn put_and_get() -> io::Result<()> { + let db = create(1)?; + st::test_put_and_get(&db) + } - let mut batch = db.transaction(); - batch.put(0, key1.as_bytes(), b"cat"); - db.write(batch).unwrap(); + #[test] + fn delete_and_get() -> io::Result<()> { + let db = create(1)?; + st::test_delete_and_get(&db) + } - let mut transaction = db.transaction(); - transaction.put(0, key3.as_bytes(), b"elephant"); - transaction.delete(0, key1.as_bytes()); - db.write(transaction).unwrap(); - assert!(db.get(0, key1.as_bytes()).unwrap().is_none()); - assert_eq!(&*db.get(0, key3.as_bytes()).unwrap().unwrap(), b"elephant"); + #[test] + fn iter() -> io::Result<()> { + let db = create(1)?; + st::test_iter(&db) + } - assert_eq!(&*db.get_by_prefix(0, key3.as_bytes()).unwrap(), b"elephant"); - assert_eq!(&*db.get_by_prefix(0, key2.as_bytes()).unwrap(), b"dog"); + #[test] + fn iter_from_prefix() -> io::Result<()> { + let db = create(1)?; + st::test_iter_from_prefix(&db) + } - let mut transaction = db.transaction(); - transaction.put(0, key1.as_bytes(), b"horse"); - transaction.delete(0, key3.as_bytes()); - db.write_buffered(transaction); - assert!(db.get(0, key3.as_bytes()).unwrap().is_none()); - assert_eq!(&*db.get(0, key1.as_bytes()).unwrap().unwrap(), b"horse"); + #[test] + fn complex() -> io::Result<()> { + let db = create(1)?; + st::test_complex(&db) + } - db.flush().unwrap(); - assert!(db.get(0, key3.as_bytes()).unwrap().is_none()); - assert_eq!(&*db.get(0, key1.as_bytes()).unwrap().unwrap(), b"horse"); + #[test] + fn stats() -> io::Result<()> { + let db = create(3)?; + st::test_io_stats(&db) } #[test] @@ -876,14 +861,6 @@ mod tests { } } - #[test] - fn kvdb() { - let tempdir = TempDir::new("").unwrap(); - let config = DatabaseConfig::default(); - let _ = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); - test_db(&config); - } - #[test] #[cfg(target_os = "linux")] fn df_to_rotational() { @@ -977,121 +954,6 @@ mod tests { assert_eq!(db.num_keys(0).unwrap(), 1, "adding a key increases the count"); } - #[test] - fn stats() { - use kvdb::IoStatsKind; - - let tempdir = TempDir::new("").unwrap(); - let config = DatabaseConfig::with_columns(3); - let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); - - let key1 = b"kkk"; - let mut batch = db.transaction(); - batch.put(0, key1, key1); - batch.put(1, key1, key1); - batch.put(2, key1, key1); - - for _ in 0..10 { - db.get(0, key1).unwrap(); - } - - db.write(batch).unwrap(); - - let io_stats = db.io_stats(IoStatsKind::SincePrevious); - assert_eq!(io_stats.transactions, 1); - assert_eq!(io_stats.writes, 3); - assert_eq!(io_stats.bytes_written, 18); - assert_eq!(io_stats.reads, 10); - assert_eq!(io_stats.bytes_read, 30); - - let new_io_stats = db.io_stats(IoStatsKind::SincePrevious); - // Since we taken previous statistic period, - // this is expected to be totally empty. - assert_eq!(new_io_stats.transactions, 0); - - // but the overall should be there - let new_io_stats = db.io_stats(IoStatsKind::Overall); - assert_eq!(new_io_stats.bytes_written, 18); - - let mut batch = db.transaction(); - batch.delete(0, key1); - batch.delete(1, key1); - batch.delete(2, key1); - - // transaction is not commited yet - assert_eq!(db.io_stats(IoStatsKind::SincePrevious).writes, 0); - - db.write(batch).unwrap(); - // now it is, and delete is counted as write - assert_eq!(db.io_stats(IoStatsKind::SincePrevious).writes, 3); - } - - #[test] - fn test_iter_by_prefix() { - let tempdir = TempDir::new("").unwrap(); - let config = DatabaseConfig::with_columns(1); - let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); - - let key1 = b"0"; - let key2 = b"ab"; - let key3 = b"abc"; - let key4 = b"abcd"; - - let mut batch = db.transaction(); - batch.put(0, key1, key1); - batch.put(0, key2, key2); - batch.put(0, key3, key3); - batch.put(0, key4, key4); - db.write(batch).unwrap(); - - // empty prefix - let contents: Vec<_> = db.iter_from_prefix(0, b"").into_iter().collect(); - assert_eq!(contents.len(), 4); - assert_eq!(&*contents[0].0, key1); - assert_eq!(&*contents[1].0, key2); - assert_eq!(&*contents[2].0, key3); - assert_eq!(&*contents[3].0, key4); - - // prefix a - let contents: Vec<_> = db.iter_from_prefix(0, b"a").into_iter().collect(); - assert_eq!(contents.len(), 3); - assert_eq!(&*contents[0].0, key2); - assert_eq!(&*contents[1].0, key3); - assert_eq!(&*contents[2].0, key4); - - // prefix abc - let contents: Vec<_> = db.iter_from_prefix(0, b"abc").into_iter().collect(); - assert_eq!(contents.len(), 2); - assert_eq!(&*contents[0].0, key3); - assert_eq!(&*contents[1].0, key4); - - // prefix abcde - let contents: Vec<_> = db.iter_from_prefix(0, b"abcde").into_iter().collect(); - assert_eq!(contents.len(), 0); - - // prefix 0 - let contents: Vec<_> = db.iter_from_prefix(0, b"0").into_iter().collect(); - assert_eq!(contents.len(), 1); - assert_eq!(&*contents[0].0, key1); - } - - #[test] - fn write_clears_buffered_ops() { - let tempdir = TempDir::new("").unwrap(); - let config = DatabaseConfig::with_columns(1); - let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); - - let mut batch = db.transaction(); - batch.put(0, b"foo", b"bar"); - db.write_buffered(batch); - - let mut batch = db.transaction(); - batch.put(0, b"foo", b"baz"); - db.write(batch).unwrap(); - - assert_eq!(db.get(0, b"foo").unwrap().unwrap(), b"baz"); - } - #[test] fn default_memory_budget() { let c = DatabaseConfig::default(); diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml new file mode 100644 index 000000000..4679a4900 --- /dev/null +++ b/kvdb-shared-tests/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "kvdb-shared-tests" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +description = "Shared tests for kvdb functionality, to be executed against actual implementations" +license = "GPL-3.0" + +[dependencies] +kvdb = { path = "../kvdb", version = "0.2" } diff --git a/kvdb-shared-tests/src/lib.rs b/kvdb-shared-tests/src/lib.rs new file mode 100644 index 000000000..28613c4f3 --- /dev/null +++ b/kvdb-shared-tests/src/lib.rs @@ -0,0 +1,243 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Shared tests for kvdb functionality, to be executed against actual implementations. + +use kvdb::{IoStatsKind, KeyValueDB}; +use std::io; + +/// A test for `KeyValueDB::get`. +pub fn test_put_and_get(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"key1"; + + let mut transaction = db.transaction(); + transaction.put(0, key1, b"horse"); + db.write_buffered(transaction); + assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); + Ok(()) +} + +/// A test for `KeyValueDB::get`. +pub fn test_delete_and_get(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"key1"; + + let mut transaction = db.transaction(); + transaction.put(0, key1, b"horse"); + db.write_buffered(transaction); + assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); + + let mut transaction = db.transaction(); + transaction.delete(0, key1); + db.write_buffered(transaction); + assert!(db.get(0, key1)?.is_none()); + Ok(()) +} + +/// A test for `KeyValueDB::get`. +/// Assumes the `db` has only 1 column. +pub fn test_get_fails_with_non_existing_column(db: &dyn KeyValueDB) -> io::Result<()> { + assert!(db.get(1, &[]).is_err()); + Ok(()) +} + +/// A test for `KeyValueDB::write`. +pub fn test_write_clears_buffered_ops(db: &dyn KeyValueDB) -> io::Result<()> { + let mut batch = db.transaction(); + batch.put(0, b"foo", b"bar"); + db.write_buffered(batch); + + assert_eq!(db.get(0, b"foo")?.unwrap(), b"bar"); + + let mut batch = db.transaction(); + batch.put(0, b"foo", b"baz"); + db.write(batch)?; + + assert_eq!(db.get(0, b"foo")?.unwrap(), b"baz"); + Ok(()) +} + +/// A test for `KeyValueDB::iter`. +pub fn test_iter(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"key1"; + let key2 = b"key2"; + + let mut transaction = db.transaction(); + transaction.put(0, key1, key1); + transaction.put(0, key2, key2); + db.write_buffered(transaction); + + let contents: Vec<_> = db.iter(0).into_iter().collect(); + assert_eq!(contents.len(), 2); + assert_eq!(&*contents[0].0, key1); + assert_eq!(&*contents[0].1, key1); + assert_eq!(&*contents[1].0, key2); + assert_eq!(&*contents[1].1, key2); + Ok(()) +} + +/// A test for `KeyValueDB::iter_from_prefix`. +pub fn test_iter_from_prefix(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"0"; + let key2 = b"ab"; + let key3 = b"abc"; + let key4 = b"abcd"; + + let mut batch = db.transaction(); + batch.put(0, key1, key1); + batch.put(0, key2, key2); + batch.put(0, key3, key3); + batch.put(0, key4, key4); + db.write(batch)?; + + // empty prefix + let contents: Vec<_> = db.iter_from_prefix(0, b"").into_iter().collect(); + assert_eq!(contents.len(), 4); + assert_eq!(&*contents[0].0, key1); + assert_eq!(&*contents[1].0, key2); + assert_eq!(&*contents[2].0, key3); + assert_eq!(&*contents[3].0, key4); + + // prefix a + let contents: Vec<_> = db.iter_from_prefix(0, b"a").into_iter().collect(); + assert_eq!(contents.len(), 3); + assert_eq!(&*contents[0].0, key2); + assert_eq!(&*contents[1].0, key3); + assert_eq!(&*contents[2].0, key4); + + // prefix abc + let contents: Vec<_> = db.iter_from_prefix(0, b"abc").into_iter().collect(); + assert_eq!(contents.len(), 2); + assert_eq!(&*contents[0].0, key3); + assert_eq!(&*contents[1].0, key4); + + // prefix abcde + let contents: Vec<_> = db.iter_from_prefix(0, b"abcde").into_iter().collect(); + assert_eq!(contents.len(), 0); + + // prefix 0 + let contents: Vec<_> = db.iter_from_prefix(0, b"0").into_iter().collect(); + assert_eq!(contents.len(), 1); + assert_eq!(&*contents[0].0, key1); + Ok(()) +} + +/// A test for `KeyValueDB::io_stats`. +/// Assumes that the `db` has at least 3 columns. +pub fn test_io_stats(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"kkk"; + let mut batch = db.transaction(); + batch.put(0, key1, key1); + batch.put(1, key1, key1); + batch.put(2, key1, key1); + + for _ in 0..10 { + db.get(0, key1)?; + } + + db.write(batch)?; + + let io_stats = db.io_stats(IoStatsKind::SincePrevious); + assert_eq!(io_stats.transactions, 1); + assert_eq!(io_stats.writes, 3); + assert_eq!(io_stats.bytes_written, 18); + assert_eq!(io_stats.reads, 10); + assert_eq!(io_stats.bytes_read, 30); + + let new_io_stats = db.io_stats(IoStatsKind::SincePrevious); + // Since we taken previous statistic period, + // this is expected to be totally empty. + assert_eq!(new_io_stats.transactions, 0); + + // but the overall should be there + let new_io_stats = db.io_stats(IoStatsKind::Overall); + assert_eq!(new_io_stats.bytes_written, 18); + + let mut batch = db.transaction(); + batch.delete(0, key1); + batch.delete(1, key1); + batch.delete(2, key1); + + // transaction is not commited yet + assert_eq!(db.io_stats(IoStatsKind::SincePrevious).writes, 0); + + db.write(batch)?; + // now it is, and delete is counted as write + assert_eq!(db.io_stats(IoStatsKind::SincePrevious).writes, 3); + Ok(()) +} + +/// A complex test. +pub fn test_complex(db: &dyn KeyValueDB) -> io::Result<()> { + let key1 = b"02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"; + let key2 = b"03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"; + let key3 = b"04c00000000b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"; + let key4 = b"04c01111110b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"; + let key5 = b"04c02222220b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"; + + let mut batch = db.transaction(); + batch.put(0, key1, b"cat"); + batch.put(0, key2, b"dog"); + batch.put(0, key3, b"caterpillar"); + batch.put(0, key4, b"beef"); + batch.put(0, key5, b"fish"); + db.write(batch)?; + + assert_eq!(&*db.get(0, key1)?.unwrap(), b"cat"); + + let contents: Vec<_> = db.iter(0).into_iter().collect(); + assert_eq!(contents.len(), 5); + assert_eq!(contents[0].0.to_vec(), key1.to_vec()); + assert_eq!(&*contents[0].1, b"cat"); + assert_eq!(contents[1].0.to_vec(), key2.to_vec()); + assert_eq!(&*contents[1].1, b"dog"); + + let mut prefix_iter = db.iter_from_prefix(0, b"04c0"); + assert_eq!(*prefix_iter.next().unwrap().1, b"caterpillar"[..]); + assert_eq!(*prefix_iter.next().unwrap().1, b"beef"[..]); + assert_eq!(*prefix_iter.next().unwrap().1, b"fish"[..]); + + let mut batch = db.transaction(); + batch.delete(0, key1); + db.write(batch)?; + + assert!(db.get(0, key1)?.is_none()); + + let mut batch = db.transaction(); + batch.put(0, key1, b"cat"); + db.write(batch)?; + + let mut transaction = db.transaction(); + transaction.put(0, key3, b"elephant"); + transaction.delete(0, key1); + db.write(transaction)?; + assert!(db.get(0, key1)?.is_none()); + assert_eq!(&*db.get(0, key3)?.unwrap(), b"elephant"); + + assert_eq!(&*db.get_by_prefix(0, key3).unwrap(), b"elephant"); + assert_eq!(&*db.get_by_prefix(0, key2).unwrap(), b"dog"); + + let mut transaction = db.transaction(); + transaction.put(0, key1, b"horse"); + transaction.delete(0, key3); + db.write_buffered(transaction); + assert!(db.get(0, key3)?.is_none()); + assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); + + db.flush()?; + assert!(db.get(0, key3)?.is_none()); + assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); + Ok(()) +} diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index fb3af40ea..5307cd70e 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -38,6 +38,7 @@ features = [ ] [dev-dependencies] -wasm-bindgen-test = "0.3.4" console_log = "0.1.2" +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.1" } +wasm-bindgen-test = "0.3.4" wasm-bindgen-futures = "0.4.4" diff --git a/kvdb-web/tests/indexed_db.rs b/kvdb-web/tests/indexed_db.rs index 9dc0556d4..fe5d8f6a3 100644 --- a/kvdb-web/tests/indexed_db.rs +++ b/kvdb-web/tests/indexed_db.rs @@ -1,4 +1,4 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. +// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify @@ -18,21 +18,58 @@ use futures::future::TryFutureExt as _; +use kvdb_shared_tests as st; use kvdb_web::{Database, KeyValueDB as _}; use wasm_bindgen_test::*; wasm_bindgen_test_configure!(run_in_browser); +async fn open_db(col: u32, name: &str) -> Database { + Database::open(name.into(), col).unwrap_or_else(|err| panic!("{}", err)).await +} + +#[wasm_bindgen_test] +async fn get_fails_with_non_existing_column() { + let db = open_db(1, "get_fails_with_non_existing_column").await; + st::test_get_fails_with_non_existing_column(&db).unwrap() +} + +#[wasm_bindgen_test] +async fn put_and_get() { + let db = open_db(1, "put_and_get").await; + st::test_put_and_get(&db).unwrap() +} + +#[wasm_bindgen_test] +async fn delete_and_get() { + let db = open_db(1, "delete_and_get").await; + st::test_delete_and_get(&db).unwrap() +} + +#[wasm_bindgen_test] +async fn iter() { + let db = open_db(1, "iter").await; + st::test_iter(&db).unwrap() +} + +#[wasm_bindgen_test] +async fn iter_from_prefix() { + let db = open_db(1, "iter_from_prefix").await; + st::test_iter_from_prefix(&db).unwrap() +} + +#[wasm_bindgen_test] +async fn complex() { + let db = open_db(1, "complex").await; + st::test_complex(&db).unwrap() +} + #[wasm_bindgen_test] async fn reopen_the_database_with_more_columns() { let _ = console_log::init_with_level(log::Level::Trace); - async fn open_db(col: u32) -> Database { - Database::open("MyAsyncTest".into(), col).unwrap_or_else(|err| panic!("{}", err)).await - } - - let db = open_db(1).await; + let db = open_db(1, "reopen_the_database_with_more_columns").await; // Write a value into the database let mut batch = db.transaction(); @@ -48,7 +85,7 @@ async fn reopen_the_database_with_more_columns() { drop(db); // Reopen it again with 3 columns - let db = open_db(3).await; + let db = open_db(3, "reopen_the_database_with_more_columns").await; // The value should still be present assert_eq!(db.get(0, b"hello").unwrap().unwrap(), b"world"); From 4a4c99019b7670c60d6f77dfd7a3c1acb326a653 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 3 Jan 2020 13:31:18 +0300 Subject: [PATCH 061/359] kvdb: remove KeyValueDBHandler (#304) --- kvdb/src/lib.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 2bcb1b39c..afe95c861 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -158,10 +158,3 @@ pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { IoStats::empty() } } - -/// Generic key-value database handler. This trait contains one function `open`. -/// When called, it opens database with a predefined config. -pub trait KeyValueDBHandler: Send + Sync { - /// Open the predefined key-value database. - fn open(&self, path: &Path) -> io::Result>; -} From a7dcd00a2c842ce729f24f7a79576f08de324a45 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 3 Jan 2020 14:10:42 +0300 Subject: [PATCH 062/359] Fix typo. (#303) --- kvdb-rocksdb/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 1dd64bbcc..d13603da9 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -539,19 +539,19 @@ impl Database { Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), Some(&KeyState::Delete) => Ok(None), None => { - let aquired_val = cfs + let acquired_val = cfs .db .get_pinned_cf_opt(cfs.cf(col as usize), key, &self.read_opts) .map(|r| r.map(|v| v.to_vec())) .map_err(other_io_err); - match aquired_val { + match acquired_val { Ok(Some(ref v)) => self.stats.tally_bytes_read((key.len() + v.len()) as u64), Ok(None) => self.stats.tally_bytes_read(key.len() as u64), _ => {} }; - aquired_val + acquired_val } } } From f02f892cc5d98b1dab45112e29f4583843a37f7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 3 Jan 2020 15:47:00 +0100 Subject: [PATCH 063/359] Use custom error type for `from_hex` (#305) * Use custom error type, remove dups. * cargo fmt * parity-util-mem: fix authors in Cargo.toml Co-authored-by: Andronik Ordian --- kvdb/src/lib.rs | 2 - parity-util-mem/derive/Cargo.toml | 2 +- primitive-types/impls/serde/src/serialize.rs | 87 +++++++++++--------- 3 files changed, 49 insertions(+), 42 deletions(-) diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index afe95c861..afda4af87 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -19,8 +19,6 @@ use bytes::Bytes; use smallvec::SmallVec; use std::io; -use std::path::Path; -use std::sync::Arc; mod io_stats; diff --git a/parity-util-mem/derive/Cargo.toml b/parity-util-mem/derive/Cargo.toml index f37d38013..cc208049d 100644 --- a/parity-util-mem/derive/Cargo.toml +++ b/parity-util-mem/derive/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "parity-util-mem-derive" version = "0.1.0" -authors = ["Parity Technologies"] +authors = ["Parity Technologies "] license = "MIT" description = "Crate for memory reporting" repository = "https://github.com/paritytech/pariry-common/parity-util-mem/derive" diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index d632652f1..01e85c036 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -61,21 +61,57 @@ fn to_hex_raw<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a unsafe { std::str::from_utf8_unchecked(&v[0..idx]) } } +/// Decoding bytes from hex string error. +#[derive(Debug, PartialEq, Eq)] +pub enum FromHexError { + /// The `0x` prefix is missing. + MissingPrefix, + /// Invalid (non-hex) character encountered. + InvalidHex { + /// The unexpected character. + character: char, + /// Index of that occurrence. + index: usize, + }, +} + +impl std::error::Error for FromHexError {} + +impl fmt::Display for FromHexError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match *self { + Self::MissingPrefix => write!(fmt, "0x prefix is missing"), + Self::InvalidHex { character, index } => write!(fmt, "invalid hex character: {}, at {}", character, index), + } + } +} + /// Decode given hex string into a vector of bytes. /// /// Returns an error if the string is not prefixed with `0x` /// or non-hex characters are present. -pub fn from_hex(v: &str) -> Result, String> { +pub fn from_hex(v: &str) -> Result, FromHexError> { if !v.starts_with("0x") { - return Err("0x prefix is missing".into()); + return Err(FromHexError::MissingPrefix); } + let mut bytes = vec![0u8; (v.len() - 1) / 2]; + from_hex_raw(v, &mut bytes)?; + Ok(bytes) +} + +/// Decode given 0x-prefixed hex string into provided slice. +/// Used internally by `from_hex` and `deserialize_check_len`. +/// +/// The method will panic if: +/// 1. `v` is shorter than 2 characters (you need to check 0x prefix outside). +/// 2. `bytes` have incorrect length (make sure to allocate enough beforehand). +fn from_hex_raw<'a>(v: &str, bytes: &mut [u8]) -> Result { let bytes_len = v.len() - 2; let mut modulus = bytes_len % 2; - let mut bytes = vec![0u8; (bytes_len + 1) / 2]; let mut buf = 0; let mut pos = 0; - for (idx, byte) in v.bytes().enumerate().skip(2) { + for (index, byte) in v.bytes().enumerate().skip(2) { buf <<= 4; match byte { @@ -87,8 +123,8 @@ pub fn from_hex(v: &str) -> Result, String> { continue; } b => { - let ch = char::from(b); - return Err(format!("invalid hex character: {}, at {}", ch, idx)); + let character = char::from(b); + return Err(FromHexError::InvalidHex { character, index }); } } @@ -100,7 +136,7 @@ pub fn from_hex(v: &str) -> Result, String> { } } - Ok(bytes) + Ok(pos) } /// Serializes a slice of bytes. @@ -204,12 +240,13 @@ where fn visit_str(self, v: &str) -> Result { if !v.starts_with("0x") { - return Err(E::custom("prefix is missing")); + return Err(E::custom(FromHexError::MissingPrefix)); } + let len = v.len(); let is_len_valid = match self.len { - ExpectedLen::Exact(ref slice) => v.len() == 2 * slice.len() + 2, - ExpectedLen::Between(min, ref slice) => v.len() <= 2 * slice.len() + 2 && v.len() > 2 * min + 2, + ExpectedLen::Exact(ref slice) => len == 2 * slice.len() + 2, + ExpectedLen::Between(min, ref slice) => len <= 2 * slice.len() + 2 && len > 2 * min + 2, }; if !is_len_valid { @@ -221,35 +258,7 @@ where ExpectedLen::Between(_, slice) => slice, }; - let mut modulus = v.len() % 2; - let mut buf = 0; - let mut pos = 0; - for (idx, byte) in v.bytes().enumerate().skip(2) { - buf <<= 4; - - match byte { - b'A'..=b'F' => buf |= byte - b'A' + 10, - b'a'..=b'f' => buf |= byte - b'a' + 10, - b'0'..=b'9' => buf |= byte - b'0', - b' ' | b'\r' | b'\n' | b'\t' => { - buf >>= 4; - continue; - } - b => { - let ch = char::from(b); - return Err(E::custom(&format!("invalid hex character: {}, at {}", ch, idx))); - } - } - - modulus += 1; - if modulus == 2 { - modulus = 0; - bytes[pos] = buf; - pos += 1; - } - } - - Ok(pos) + from_hex_raw(v, bytes).map_err(E::custom) } fn visit_string(self, v: String) -> Result { From 80370e78a761b5e80e56d315a2cfa4f5dd58370a Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 3 Jan 2020 19:25:41 +0300 Subject: [PATCH 064/359] Draft version updates and changelog (#299) * draft changelog * update changelog * Update kvdb-rocksdb/CHANGELOG.md Co-Authored-By: David * Update kvdb-memorydb/CHANGELOG.md Co-Authored-By: Andronik Ordian * Update kvdb/CHANGELOG.md Co-Authored-By: Andronik Ordian Co-authored-by: David Co-authored-by: Andronik Ordian --- kvdb-memorydb/CHANGELOG.md | 5 ++++- kvdb-memorydb/Cargo.toml | 6 +++--- kvdb-rocksdb/CHANGELOG.md | 10 +++++++--- kvdb-rocksdb/Cargo.toml | 6 +++--- kvdb-shared-tests/Cargo.toml | 2 +- kvdb-web/Cargo.toml | 8 ++++---- kvdb/CHANGELOG.md | 6 +++++- kvdb/Cargo.toml | 4 ++-- parity-util-mem/CHANGELOG.md | 8 +++++++- parity-util-mem/Cargo.toml | 2 +- primitive-types/CHANGELOG.md | 5 ++++- primitive-types/Cargo.toml | 2 +- 12 files changed, 42 insertions(+), 22 deletions(-) diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index c594b4d98..a76dd644f 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -1,11 +1,14 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +## [0.3.0] - 2019-01-03 +- InMemory key-value database now can report memory used (via `MallocSizeOf`). [#292](https://github.com/paritytech/parity-common/pull/292) + ## [0.2.0] - 2019-12-19 ### Fixed - `iter_from_prefix` behaviour synced with the `kvdb-rocksdb` diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index bf7d7215c..74c81818c 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.2.0" +version = "0.3.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -8,9 +8,9 @@ license = "GPL-3.0" edition = "2018" [dependencies] -parity-util-mem = { path = "../parity-util-mem", version = "0.3" } +parity-util-mem = { path = "../parity-util-mem", version = "0.4" } parking_lot = "0.9.0" -kvdb = { version = "0.2", path = "../kvdb" } +kvdb = { version = "0.3", path = "../kvdb" } [dev-dependencies] kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.1" } diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 550ff450f..f2754de3b 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -1,11 +1,15 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +## [0.4.0] - 2019-01-03 +- Add I/O statistics for RocksDB. [#294](https://github.com/paritytech/parity-common/pull/294) +- Support querying memory footprint via `MallocSizeOf` trait. [#292](https://github.com/paritytech/parity-common/pull/292) + ## [0.3.0] - 2019-12-19 - Use `get_pinned` API to save one allocation for each call to `get()` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) - Rename `drop_column` to `remove_last_column` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) @@ -16,10 +20,10 @@ The format is based on [Keep a Changelog]. - Column index `None` -> unsupported, `Some(0)` -> `0`, `Some(1)` -> `1`, etc. - Database must be opened with at least one column and existing DBs has to be opened with a number of columns increased by 1 to avoid having to migrate the data, e.g. before: `Some(9)`, after: `10`. - `DatabaseConfig::default()` defaults to 1 column - - `Database::with_columns` still accepts `u32`, but panics if `0` is provided + - `Database::with_columns` still accepts `u32`, but panics if `0` is provided - `Database::open` panics if configuration with 0 columns is provided - Add `num_keys(col)` to get an estimate of the number of keys in a column (See [PR #285](https://github.com/paritytech/parity-common/pull/285)). -- Remove `ElasticArray` and use the new `DBValue` (alias for `Vec`) and `DBKey` types from `kvdb`. (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) +- Remove `ElasticArray` and use the new `DBValue` (alias for `Vec`) and `DBKey` types from `kvdb`. (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) ## [0.2.0] - 2019-11-28 - Switched away from using [parity-rocksdb](https://crates.io/crates/parity-rocksdb) in favour of upstream [rust-rocksdb](https://crates.io/crates/rocksdb) (see [PR #257](https://github.com/paritytech/parity-common/pull/257) for details) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index bc71a0b93..778bc0736 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.3.0" +version = "0.4.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -15,14 +15,14 @@ harness = false smallvec = "1.0.0" fs-swap = "0.2.4" interleaved-ordered = "0.1.1" -kvdb = { path = "../kvdb", version = "0.2" } +kvdb = { path = "../kvdb", version = "0.3" } log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.9.0" regex = "1.3.1" rocksdb = { version = "0.13", features = ["snappy"], default-features = false } owning_ref = "0.4.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.3" } +parity-util-mem = { path = "../parity-util-mem", version = "0.4" } [dev-dependencies] alloc_counter = "0.0.4" diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index 4679a4900..1b2158c3b 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -7,4 +7,4 @@ description = "Shared tests for kvdb functionality, to be executed against actua license = "GPL-3.0" [dependencies] -kvdb = { path = "../kvdb", version = "0.2" } +kvdb = { path = "../kvdb", version = "0.3" } diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 5307cd70e..985e453c2 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-web" -version = "0.2.0" +version = "0.3.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" @@ -11,12 +11,12 @@ edition = "2018" [dependencies] wasm-bindgen = "0.2.54" js-sys = "0.3.31" -kvdb = { version = "0.2", path = "../kvdb" } -kvdb-memorydb = { version = "0.2", path = "../kvdb-memorydb" } +kvdb = { version = "0.3", path = "../kvdb" } +kvdb-memorydb = { version = "0.3", path = "../kvdb-memorydb" } futures = "0.3" log = "0.4.8" send_wrapper = "0.3.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.3" } +parity-util-mem = { path = "../parity-util-mem", version = "0.4" } [dependencies.web-sys] version = "0.3.31" diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 41a94676a..f6b634d76 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -1,11 +1,15 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +## [0.3.0] - 2020-01-03 +- I/O statistics API. [#294](https://github.com/paritytech/parity-common/pull/294) +- Removed `KeyValueDBHandler` trait. [#304](https://github.com/paritytech/parity-common/pull/304) + ## [0.2.0] - 2019-12-19 ### Changed - Default column support removed from the API diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 56f2a7a5e..46aad54c6 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.2.0" +version = "0.3.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" @@ -10,4 +10,4 @@ edition = "2018" [dependencies] smallvec = "1.0.0" bytes = { package = "parity-bytes", version = "0.1", path = "../parity-bytes" } -parity-util-mem = { path = "../parity-util-mem", version = "0.3" } +parity-util-mem = { path = "../parity-util-mem", version = "0.4" } diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index e7d58fa35..4330e0304 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -1,11 +1,17 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +## [0.4.0] - 2020-01-01 +- Added implementation of `MallocSizeOf` for non-std `hashbrown::HashMap` and `lru::LRUMap`. [#293](https://github.com/paritytech/parity-common/pull/293) +- Introduced our own version of `#[derive(MallocSizeOf)]` [#291](https://github.com/paritytech/parity-common/pull/291) +- Added implementation of `MallocSizeOf` for `parking_lot` locking primitives. [#290](https://github.com/paritytech/parity-common/pull/290) +- Added default implementation of `MallocSizeOf` for tuples up to 12. [#300](https://github.com/paritytech/parity-common/pull/300) + ## [0.3.0] - 2019-12-19 - Remove `MallocSizeOf` impls for `ElasticArray` and implement it for `SmallVec` (32 and 36). (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index ff69fc3aa..b9dc68913 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.3.0" +version = "0.4.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index ed6699fde..12b958c30 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -1,11 +1,14 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +## [0.6.2] - 2019-01-03 +- Expose to_hex and from_hex from impl-serde. [#302](https://github.com/paritytech/parity-common/pull/302) + ## [0.6.1] - 2019-10-24 ### Dependencies - Updated dependencies (https://github.com/paritytech/parity-common/pull/239) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 1ea39518c..31ef6235c 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.6.1" +version = "0.6.2" authors = ["Parity Technologies "] license = "Apache-2.0/MIT" homepage = "https://github.com/paritytech/parity-common" From 477e6d234f423b15f81e3f803031a4397134770a Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 6 Jan 2020 12:08:15 +0300 Subject: [PATCH 065/359] Use proper memory queries to rocksdb (#308) * use proper queries to rocksdb * fix test --- kvdb-rocksdb/src/lib.rs | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index d13603da9..d478a0c6d 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -232,22 +232,18 @@ struct DBAndColumns { column_names: Vec, } -fn static_property_or_warn(db: &DB, prop: &str) -> usize { - match db.property_int_value(prop) { - Ok(Some(v)) => v as usize, - _ => { - warn!("Cannot read expected static property of RocksDb database: {}", prop); - 0 - } - } -} - impl MallocSizeOf for DBAndColumns { fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { - self.column_names.size_of(ops) - + static_property_or_warn(&self.db, "rocksdb.estimate-table-readers-mem") - + static_property_or_warn(&self.db, "rocksdb.cur-size-all-mem-tables") - + static_property_or_warn(&self.db, "rocksdb.block-cache-usage") + let mut total = self.column_names.size_of(ops) + // we have at least one column always, so we can call property on it + + self.static_property_or_warn(0, "rocksdb.block-cache-usage"); + + for v in 0..self.column_names.len() { + total += self.static_property_or_warn(v, "rocksdb.estimate-table-readers-mem"); + total += self.static_property_or_warn(v, "rocksdb.cur-size-all-mem-tables"); + } + + total } } @@ -255,6 +251,16 @@ impl DBAndColumns { fn cf(&self, i: usize) -> &ColumnFamily { self.db.cf_handle(&self.column_names[i]).expect("the specified column name is correct; qed") } + + fn static_property_or_warn(&self, col: usize, prop: &str) -> usize { + match self.db.property_int_value_cf(self.cf(col), prop) { + Ok(Some(v)) => v as usize, + _ => { + warn!("Cannot read expected static property of RocksDb database: {}", prop); + 0 + } + } + } } /// Key-Value database. @@ -856,7 +862,7 @@ mod tests { { let db = db.db.read(); db.as_ref().map(|db| { - assert!(super::static_property_or_warn(&db.db, "rocksdb.cur-size-all-mem-tables") > 512); + assert!(db.static_property_or_warn(0, "rocksdb.cur-size-all-mem-tables") > 512); }); } } From 102d3c8ee618916d7d4122554cfc96867a156804 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 6 Jan 2020 13:04:29 +0300 Subject: [PATCH 066/359] Update features and feature dependencies (#307) * Don't use more then needed * remove std * also move smallvec and fixed arrays from impl module * rename mod * move teests * run tests on std only --- kvdb-memorydb/Cargo.toml | 4 +- kvdb-rocksdb/Cargo.toml | 4 +- kvdb-web/Cargo.toml | 4 +- kvdb/Cargo.toml | 4 +- parity-util-mem/Cargo.toml | 4 +- parity-util-mem/src/ethereum_impls.rs | 22 +++++ parity-util-mem/src/impls.rs | 116 -------------------------- parity-util-mem/src/lib.rs | 2 +- parity-util-mem/src/malloc_size.rs | 89 ++++++++++++++++++++ 9 files changed, 122 insertions(+), 127 deletions(-) create mode 100644 parity-util-mem/src/ethereum_impls.rs delete mode 100644 parity-util-mem/src/impls.rs diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 74c81818c..2ba57bbc4 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.3.0" +version = "0.3.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -8,7 +8,7 @@ license = "GPL-3.0" edition = "2018" [dependencies] -parity-util-mem = { path = "../parity-util-mem", version = "0.4" } +parity-util-mem = { path = "../parity-util-mem", version = "0.4", default-features = false, features = ["std"] } parking_lot = "0.9.0" kvdb = { version = "0.3", path = "../kvdb" } diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 778bc0736..657c0392c 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.4.0" +version = "0.4.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -22,7 +22,7 @@ parking_lot = "0.9.0" regex = "1.3.1" rocksdb = { version = "0.13", features = ["snappy"], default-features = false } owning_ref = "0.4.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.4" } +parity-util-mem = { path = "../parity-util-mem", version = "0.4", default-features = false, features = ["std", "smallvec"] } [dev-dependencies] alloc_counter = "0.0.4" diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 985e453c2..ea77e5855 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-web" -version = "0.3.0" +version = "0.3.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" @@ -16,7 +16,7 @@ kvdb-memorydb = { version = "0.3", path = "../kvdb-memorydb" } futures = "0.3" log = "0.4.8" send_wrapper = "0.3.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.4" } +parity-util-mem = { path = "../parity-util-mem", version = "0.4", default-features = false } [dependencies.web-sys] version = "0.3.31" diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 46aad54c6..c176644ac 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.3.0" +version = "0.3.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" @@ -10,4 +10,4 @@ edition = "2018" [dependencies] smallvec = "1.0.0" bytes = { package = "parity-bytes", version = "0.1", path = "../parity-bytes" } -parity-util-mem = { path = "../parity-util-mem", version = "0.4" } +parity-util-mem = { path = "../parity-util-mem", version = "0.4", default-features = false } diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index b9dc68913..9aee6db3e 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -35,7 +35,7 @@ version = "0.3.2" optional = true [features] -default = ["std", "ethereum-impls", "lru", "hashbrown"] +default = ["std", "ethereum-impls", "lru", "hashbrown", "smallvec"] std = ["parking_lot"] # use dlmalloc as global allocator dlmalloc-global = ["dlmalloc", "estimate-heapsize"] @@ -46,6 +46,6 @@ jemalloc-global = ["jemallocator"] # use mimalloc as global allocator mimalloc-global = ["mimallocator", "mimalloc-sys"] # implement additional types -ethereum-impls = ["ethereum-types", "smallvec"] +ethereum-impls = ["ethereum-types"] # Full estimate: no call to allocator estimate-heapsize = [] diff --git a/parity-util-mem/src/ethereum_impls.rs b/parity-util-mem/src/ethereum_impls.rs new file mode 100644 index 000000000..243230106 --- /dev/null +++ b/parity-util-mem/src/ethereum_impls.rs @@ -0,0 +1,22 @@ +// Copyright 2015-2019 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Implementation of `MallocSize` for common ethereum types: fixed hashes +//! and uints. + +use ethereum_types::{Bloom, H128, H160, H256, H264, H32, H512, H520, H64, U128, U256, U512, U64}; + +malloc_size_of_is_0!(U64, U128, U256, U512, H32, H64, H128, H160, H256, H264, H512, H520, Bloom); diff --git a/parity-util-mem/src/impls.rs b/parity-util-mem/src/impls.rs deleted file mode 100644 index 4124b132a..000000000 --- a/parity-util-mem/src/impls.rs +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Implementation of `MallocSize` for common types : -//! - ethereum types uint and fixed hash. -//! - smallvec arrays of sizes 32, 36 - -use super::{MallocSizeOf, MallocSizeOfOps}; - -use ethereum_types::{Bloom, H128, H160, H256, H264, H32, H512, H520, H64, U128, U256, U512, U64}; -use smallvec::SmallVec; - -#[cfg(not(feature = "std"))] -use core as std; - -#[cfg(feature = "std")] -malloc_size_of_is_0!(std::time::Instant); -malloc_size_of_is_0!(std::time::Duration); - -malloc_size_of_is_0!(U64, U128, U256, U512, H32, H64, H128, H160, H256, H264, H512, H520, Bloom); - -malloc_size_of_is_0!( - [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 9], [u8; 10], [u8; 11], [u8; 12], - [u8; 13], [u8; 14], [u8; 15], [u8; 16], [u8; 17], [u8; 18], [u8; 19], [u8; 20], [u8; 21], [u8; 22], [u8; 23], - [u8; 24], [u8; 25], [u8; 26], [u8; 27], [u8; 28], [u8; 29], [u8; 30], [u8; 31], [u8; 32] -); - -macro_rules! impl_smallvec { - ($size: expr) => { - impl MallocSizeOf for SmallVec<[T; $size]> - where - T: MallocSizeOf, - { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = if self.spilled() { self.capacity() * core::mem::size_of::() } else { 0 }; - for elem in self.iter() { - n += elem.size_of(ops); - } - n - } - } - }; -} - -impl_smallvec!(32); // kvdb uses this -impl_smallvec!(36); // trie-db uses this - -#[cfg(test)] -mod tests { - use crate::{allocators::new_malloc_size_ops, MallocSizeOf, MallocSizeOfOps}; - use smallvec::SmallVec; - use std::mem; - impl_smallvec!(3); - - #[test] - fn test_smallvec_stack_allocated_type() { - let mut v: SmallVec<[u8; 3]> = SmallVec::new(); - let mut ops = new_malloc_size_ops(); - assert_eq!(v.size_of(&mut ops), 0); - v.push(1); - v.push(2); - v.push(3); - assert_eq!(v.size_of(&mut ops), 0); - assert!(!v.spilled()); - v.push(4); - assert!(v.spilled(), "SmallVec spills when going beyond the capacity of the inner backing array"); - assert_eq!(v.size_of(&mut ops), 4); // 4 u8s on the heap - } - - #[test] - fn test_smallvec_boxed_stack_allocated_type() { - let mut v: SmallVec<[Box; 3]> = SmallVec::new(); - let mut ops = new_malloc_size_ops(); - assert_eq!(v.size_of(&mut ops), 0); - v.push(Box::new(1u8)); - v.push(Box::new(2u8)); - v.push(Box::new(3u8)); - assert!(v.size_of(&mut ops) >= 3); - assert!(!v.spilled()); - v.push(Box::new(4u8)); - assert!(v.spilled(), "SmallVec spills when going beyond the capacity of the inner backing array"); - let mut ops = new_malloc_size_ops(); - let expected_min_allocs = mem::size_of::>() * 4 + 4; - assert!(v.size_of(&mut ops) >= expected_min_allocs); - } - - #[test] - fn test_smallvec_heap_allocated_type() { - let mut v: SmallVec<[String; 3]> = SmallVec::new(); - let mut ops = new_malloc_size_ops(); - assert_eq!(v.size_of(&mut ops), 0); - v.push("COW".into()); - v.push("PIG".into()); - v.push("DUCK".into()); - assert!(!v.spilled()); - assert!(v.size_of(&mut ops) >= "COW".len() + "PIG".len() + "DUCK".len()); - v.push("ÖWL".into()); - assert!(v.spilled()); - let mut ops = new_malloc_size_ops(); - let expected_min_allocs = mem::size_of::() * 4 + "ÖWL".len() + "COW".len() + "PIG".len() + "DUCK".len(); - assert!(v.size_of(&mut ops) >= expected_min_allocs); - } -} diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index 2d45cf9b4..ddd8f1fd5 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -66,7 +66,7 @@ pub mod sizeof; mod malloc_size; #[cfg(feature = "ethereum-impls")] -pub mod impls; +pub mod ethereum_impls; pub use allocators::MallocSizeOfExt; pub use malloc_size::{MallocSizeOf, MallocSizeOfOps}; diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index b9761527f..2f180a676 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -639,3 +639,92 @@ where n } } + +malloc_size_of_is_0!( + [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 9], [u8; 10], [u8; 11], [u8; 12], + [u8; 13], [u8; 14], [u8; 15], [u8; 16], [u8; 17], [u8; 18], [u8; 19], [u8; 20], [u8; 21], [u8; 22], [u8; 23], + [u8; 24], [u8; 25], [u8; 26], [u8; 27], [u8; 28], [u8; 29], [u8; 30], [u8; 31], [u8; 32] +); + +macro_rules! impl_smallvec { + ($size: expr) => { + #[cfg(feature = "smallvec")] + impl MallocSizeOf for smallvec::SmallVec<[T; $size]> + where + T: MallocSizeOf, + { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = if self.spilled() { self.capacity() * core::mem::size_of::() } else { 0 }; + for elem in self.iter() { + n += elem.size_of(ops); + } + n + } + } + }; +} + +impl_smallvec!(32); // kvdb uses this +impl_smallvec!(36); // trie-db uses this + +#[cfg(feature = "std")] +malloc_size_of_is_0!(std::time::Instant); +#[cfg(feature = "std")] +malloc_size_of_is_0!(std::time::Duration); + +#[cfg(all(test, feature = "std"))] // tests are using std implementations +mod tests { + use crate::{allocators::new_malloc_size_ops, MallocSizeOf, MallocSizeOfOps}; + use smallvec::SmallVec; + use std::mem; + impl_smallvec!(3); + + #[test] + fn test_smallvec_stack_allocated_type() { + let mut v: SmallVec<[u8; 3]> = SmallVec::new(); + let mut ops = new_malloc_size_ops(); + assert_eq!(v.size_of(&mut ops), 0); + v.push(1); + v.push(2); + v.push(3); + assert_eq!(v.size_of(&mut ops), 0); + assert!(!v.spilled()); + v.push(4); + assert!(v.spilled(), "SmallVec spills when going beyond the capacity of the inner backing array"); + assert_eq!(v.size_of(&mut ops), 4); // 4 u8s on the heap + } + + #[test] + fn test_smallvec_boxed_stack_allocated_type() { + let mut v: SmallVec<[Box; 3]> = SmallVec::new(); + let mut ops = new_malloc_size_ops(); + assert_eq!(v.size_of(&mut ops), 0); + v.push(Box::new(1u8)); + v.push(Box::new(2u8)); + v.push(Box::new(3u8)); + assert!(v.size_of(&mut ops) >= 3); + assert!(!v.spilled()); + v.push(Box::new(4u8)); + assert!(v.spilled(), "SmallVec spills when going beyond the capacity of the inner backing array"); + let mut ops = new_malloc_size_ops(); + let expected_min_allocs = mem::size_of::>() * 4 + 4; + assert!(v.size_of(&mut ops) >= expected_min_allocs); + } + + #[test] + fn test_smallvec_heap_allocated_type() { + let mut v: SmallVec<[String; 3]> = SmallVec::new(); + let mut ops = new_malloc_size_ops(); + assert_eq!(v.size_of(&mut ops), 0); + v.push("COW".into()); + v.push("PIG".into()); + v.push("DUCK".into()); + assert!(!v.spilled()); + assert!(v.size_of(&mut ops) >= "COW".len() + "PIG".len() + "DUCK".len()); + v.push("ÖWL".into()); + assert!(v.spilled()); + let mut ops = new_malloc_size_ops(); + let expected_min_allocs = mem::size_of::() * 4 + "ÖWL".len() + "COW".len() + "PIG".len() + "DUCK".len(); + assert!(v.size_of(&mut ops) >= expected_min_allocs); + } +} From c6119b7dbeb0ca398d7f6908ca7420c0092d5b71 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 6 Jan 2020 14:41:58 +0300 Subject: [PATCH 067/359] update parity-util-mem (#309) --- parity-util-mem/CHANGELOG.md | 3 +++ parity-util-mem/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 4330e0304..8a24ebce3 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.4.1] - 2020-01-06 +- Implementation of `MallocSizeOf` for SmallVec no longer requires ethereum `ethereum-impls` feature. [#307](https://github.com/paritytech/parity-common/pull/307) + ## [0.4.0] - 2020-01-01 - Added implementation of `MallocSizeOf` for non-std `hashbrown::HashMap` and `lru::LRUMap`. [#293](https://github.com/paritytech/parity-common/pull/293) - Introduced our own version of `#[derive(MallocSizeOf)]` [#291](https://github.com/paritytech/parity-common/pull/291) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 9aee6db3e..baf11981e 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.4.0" +version = "0.4.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" From 687c04d466d0a077d4a11dce601f95a4d0698397 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 15 Jan 2020 00:24:58 +0100 Subject: [PATCH 068/359] keccak-hash: switch benches to criterion (#315) * keccak-hash: upgrade benches to criterion * ci: cargo check --all --benches * cargo fix --edition-idioms * cargo fmt * upgrade docs to 2018 edition * Revert "cargo fix --edition-idioms" This reverts commit de30cd7c8284890d62cc5b0b5438a32b3116a477. * fixed-hash: update README to 2018 edition * Update keccak-hash/benches/keccak_256.rs Co-Authored-By: David Co-authored-by: David --- .travis.yml | 1 + ethbloom/src/lib.rs | 69 ++++++------ fixed-hash/README.md | 5 +- fixed-hash/src/hash.rs | 26 ++--- fixed-hash/src/lib.rs | 3 - keccak-hash/Cargo.toml | 1 + keccak-hash/benches/keccak_256.rs | 47 ++++---- kvdb-rocksdb/Cargo.toml | 1 + parity-crypto/benches/bench.rs | 5 +- primitive-types/impls/serde/src/serialize.rs | 4 +- rlp/src/lib.rs | 24 ++-- rlp/src/stream.rs | 110 ++++++++----------- triehash/src/lib.rs | 61 ++++------ 13 files changed, 145 insertions(+), 212 deletions(-) diff --git a/.travis.yml b/.travis.yml index 3b9bbf6e6..bf3a27276 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,6 +36,7 @@ script: cargo fmt -- --check; fi - cargo check --all --tests + - cargo check --all --benches - cargo build --all - cargo test --all --exclude uint --exclude fixed-hash - if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index 9ef11e3ee..49a18214f 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -1,45 +1,42 @@ //! -//! ```rust -//! extern crate ethbloom; -//! #[macro_use] extern crate hex_literal; +//! ``` +//! use hex_literal::hex; //! use ethbloom::{Bloom, Input}; //! -//! fn main() { -//! use std::str::FromStr; -//! let bloom = Bloom::from_str( -//! "00000000000000000000000000000000\ -//! 00000000100000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000002020000000000000000000000\ -//! 00000000000000000000000800000000\ -//! 10000000000000000000000000000000\ -//! 00000000000000000000001000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000\ -//! 00000000000000000000000000000000" -//! ).unwrap(); -//! let address = hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106"); -//! let topic = hex!("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"); +//! use std::str::FromStr; +//! let bloom = Bloom::from_str( +//! "00000000000000000000000000000000\ +//! 00000000100000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000002020000000000000000000000\ +//! 00000000000000000000000800000000\ +//! 10000000000000000000000000000000\ +//! 00000000000000000000001000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000\ +//! 00000000000000000000000000000000" +//! ).unwrap(); +//! let address = hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106"); +//! let topic = hex!("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"); //! -//! let mut my_bloom = Bloom::default(); -//! assert!(!my_bloom.contains_input(Input::Raw(&address))); -//! assert!(!my_bloom.contains_input(Input::Raw(&topic))); +//! let mut my_bloom = Bloom::default(); +//! assert!(!my_bloom.contains_input(Input::Raw(&address))); +//! assert!(!my_bloom.contains_input(Input::Raw(&topic))); //! -//! my_bloom.accrue(Input::Raw(&address)); -//! assert!(my_bloom.contains_input(Input::Raw(&address))); -//! assert!(!my_bloom.contains_input(Input::Raw(&topic))); +//! my_bloom.accrue(Input::Raw(&address)); +//! assert!(my_bloom.contains_input(Input::Raw(&address))); +//! assert!(!my_bloom.contains_input(Input::Raw(&topic))); //! -//! my_bloom.accrue(Input::Raw(&topic)); -//! assert!(my_bloom.contains_input(Input::Raw(&address))); -//! assert!(my_bloom.contains_input(Input::Raw(&topic))); -//! assert_eq!(my_bloom, bloom); -//! } +//! my_bloom.accrue(Input::Raw(&topic)); +//! assert!(my_bloom.contains_input(Input::Raw(&address))); +//! assert!(my_bloom.contains_input(Input::Raw(&topic))); +//! assert_eq!(my_bloom, bloom); //! ``` //! diff --git a/fixed-hash/README.md b/fixed-hash/README.md index 19f4c79f9..c07db2f23 100644 --- a/fixed-hash/README.md +++ b/fixed-hash/README.md @@ -7,7 +7,7 @@ Provides macros to construct custom fixed-size hash types. Simple 256 bit (32 bytes) hash type. ```rust -#[macro_use] extern crate fixed_hash; +use fixed_hash::construct_fixed_hash; construct_fixed_hash! { /// My 256 bit hash type. @@ -30,9 +30,6 @@ assert_eq!(H160::from(H256::zero()), H160::zero()); It is possible to add attributes to your types, for example to make them serializable. ```rust -extern crate serde; -#[macro_use] extern crate serde_derive; - construct_fixed_hash!{ /// My serializable hash type. #[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 25098b5ad..ea2210076 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -13,38 +13,30 @@ /// Create a public unformatted hash type with 32 bytes size. /// /// ``` -/// # #[macro_use] extern crate fixed_hash; +/// use fixed_hash::construct_fixed_hash; +/// /// construct_fixed_hash!{ pub struct H256(32); } -/// # fn main() { -/// # assert_eq!(std::mem::size_of::(), 32); -/// # } +/// assert_eq!(std::mem::size_of::(), 32); /// ``` /// /// With additional attributes and doc comments. /// /// ``` -/// # #[macro_use] extern crate fixed_hash; -/// // Add the below two lines to import serde and its derive -/// // extern crate serde; -/// // #[macro_use] extern crate serde_derive; +/// use fixed_hash::construct_fixed_hash; /// construct_fixed_hash!{ /// /// My unformatted 160 bytes sized hash type. /// #[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] /// pub struct H160(20); /// } -/// # fn main() { -/// # assert_eq!(std::mem::size_of::(), 20); -/// # } +/// assert_eq!(std::mem::size_of::(), 20); /// ``` /// /// The visibility modifier is optional and you can create a private hash type. /// /// ``` -/// # #[macro_use] extern crate fixed_hash; +/// use fixed_hash::construct_fixed_hash; /// construct_fixed_hash!{ struct H512(64); } -/// # fn main() { -/// # assert_eq!(std::mem::size_of::(), 64); -/// # } +/// assert_eq!(std::mem::size_of::(), 64); /// ``` #[macro_export(local_inner_macros)] macro_rules! construct_fixed_hash { @@ -761,15 +753,13 @@ macro_rules! impl_ops_for_hash { /// # Example /// /// ``` -/// #[macro_use] extern crate fixed_hash; +/// use fixed_hash::{construct_fixed_hash, impl_fixed_hash_conversions}; /// construct_fixed_hash!{ struct H160(20); } /// construct_fixed_hash!{ struct H256(32); } /// impl_fixed_hash_conversions!(H256, H160); /// // now use it! -/// # fn main() { /// assert_eq!(H256::from(H160::zero()), H256::zero()); /// assert_eq!(H160::from(H256::zero()), H160::zero()); -/// # } /// ``` #[macro_export(local_inner_macros)] macro_rules! impl_fixed_hash_conversions { diff --git a/fixed-hash/src/lib.rs b/fixed-hash/src/lib.rs index bfc210a84..a3e7af6f7 100644 --- a/fixed-hash/src/lib.rs +++ b/fixed-hash/src/lib.rs @@ -54,9 +54,6 @@ pub use rand; #[doc(hidden)] pub use quickcheck; -#[cfg(test)] -extern crate rand_xorshift; - #[macro_use] mod hash; diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 14b6b19ba..3d7d88943 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -14,6 +14,7 @@ primitive-types = { path = "../primitive-types", version = "0.6", default-featur [dev-dependencies] tempdir = "0.3.7" +criterion = "0.3.0" [features] default = ["std"] diff --git a/keccak-hash/benches/keccak_256.rs b/keccak-hash/benches/keccak_256.rs index 5cf5f2526..5c5794bf5 100644 --- a/keccak-hash/benches/keccak_256.rs +++ b/keccak-hash/benches/keccak_256.rs @@ -14,37 +14,36 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -#![feature(test)] - -extern crate test; - +use criterion::{black_box, criterion_group, criterion_main, Criterion}; use keccak_hash::keccak; -use test::Bencher; -#[bench] -fn bench_keccak_256_with_empty_input(b: &mut Bencher) { +criterion_group!(keccak_256, keccak_256_with_empty_input, keccak_256_with_typical_input, keccak_256_with_large_input,); +criterion_main!(keccak_256); + +pub fn keccak_256_with_empty_input(c: &mut Criterion) { let empty = [0u8; 0]; - b.bytes = empty.len() as u64; - b.iter(|| { - let _out = keccak(empty); - }) + c.bench_function("keccak_256_with_empty_input", |b| { + b.iter(|| { + let _out = keccak(black_box(empty)); + }) + }); } -#[bench] -fn bench_keccak_256_with_typical_input(b: &mut Bencher) { - let data: Vec = From::from("some medum length string with important information"); - b.bytes = data.len() as u64; - b.iter(|| { - let _out = keccak(&data); - }) +pub fn keccak_256_with_typical_input(c: &mut Criterion) { + let data: Vec = From::from("some medium length string with important information"); + c.bench_function("keccak_256_with_typical_input", |b| { + b.iter(|| { + let _out = keccak(black_box(&data)); + }) + }); } -#[bench] -fn bench_keccak_256_with_large_input(b: &mut Bencher) { +pub fn keccak_256_with_large_input(c: &mut Criterion) { // 4096 chars let data: Vec = From::from("IGxcKBr1Qp7tuqtpSVhAbvt7UgWLEi7mCA6Wa185seLSIJLFS8K1aAFO9AwtO9b3n9SM3Qg136JMmy9Mj9gZ84IaUm8XioPtloabFDU5ZR1wvauJT6jNTkvBVBpUigIsyU7C1u3s99vKP64LpXqvo1hwItZKtISxmUAgzzjv5q14V4G9bkKAnmc4M5xixgLsDGZmnj6HcOMY3XRkWtxN3RscSKwPA0bfpgtz27ZVHplbXwloYRgRLpjRhZJc7sqO8RFnTHKasVkxVRcUoDBvWNJK27TbLvQQcfxETI2Q1H6c2cBAchi8unSiuxqy5rIvVxcl9rsmmRY4IXLEG9qKntUGbiIRLjEffIP9ODoWog0GbWLmMtfvtf24hWVwXz6Ap5oUAR0kLgb7HYIYrOwKjvfV25iEF7GW8cjhl8yowXx1zcgW4t6NJNqJlGzRKx8MvRWQXvHz8h8JxcHl7S64i6PAkxI9eCLXLvs8cpbEQQHt05Zu6GKm6IInjc9mSh52WFuGhgjbno69XzfkBufJs6c9tZuBf6ErVPj4UxmT82ajCruDusk79Tlvb8oQMLjoplQc1alQaLQwSsMac9iVp9MiE3PeYnTTepJ1V10tp79fciDAnNPJgPcRfDYv0REcSFgR9Q7yWhbpPpyBjO7HwOykDQVGtV0ZbDFrFRygLAXagAIkOPc9HDfcBNID1Q2MGk8ijVWMyvmGz1wzbpNfFcQaSOm8olhwoLyHUGvkyXegh44iNsPBUvSicNxTTDowtMqO5azleuWEjzxCobYbASDopvl6JeJjRtEBBO5YCQJiHsYjlXh9QR5Q543GsqhzRLgcHNRSZYLMZqDmIABXZi8VRNJMZyWXDRKHOGDmcHWe55uZomW6FnyU0uSRKxxz66K0JWfxuFzzxAR0vR4ZZCTemgDRQuDwL1loC3KUMjDpU13jUgoPc4UJUVfwQ4f4BUY3X51Cfw9FLw4oX39KoFoiCP2Z6z27gZUY1IlE59WoXGLj4KjTp4C16ZihG080gfDIWlXnDEk3VwBuBFyKWARB63sGLrGnn27b1gHWMaop6sPvkQgWxkEKIqsxDIvXLZJg2s23V8Gqtt0FeA7R3RCvBysF4jNjQ7NiQTIQWQZ8G9gO4mEsftolSZv6FlSpNeBKIIwYWSO2R6vkgeiz06euE9bwwnenOjwPNGTGk8WHIOZBJ1hIP0ejVU2i2ca9ON0phSAnewqjo5W3PtZf2Q7mDvp9imuVWoy4t8XcZq8I2Un9jVjes9Xi0FLN2t71vLFWLWZmGDzwXxpqEgkARS1WjtJoYXCBmRnXEPj6jQfwMZWKPYSIrmOogxMVoWvA8wrof6utfJna9JezyTnrBJSCuGTSNmwwAXRLoFYxF1RITyN8mI2KmHSfvLXBrbE6kmAkjsm4XJb6kria7oUQQ1gzJuCyB7oNHjZTBFNhNa7VeQ1s1xLOwZXLOAjZ4MDTYKnF7giGJGyswb5KQxkOV9orbuAu6pJsjtql6h1UD3BcNUkG3oz8kJNepbuCN3vNCJcZOX1VrQi0PWkDwyvECrQ2E1CgbU6GpWatpg2sCTpo9W62pCcWBK2FKUFWqU3qo2T7T1Mk2ZtM6hE9I8op0M7xlGE91Mn7ea6aq93MWp7nvFlBvbaMIoeU4MpDx0BeOSkROY03ZBJ0x7K8nJrNUhAtvxp17c9oFk0VxLiuRbAAcwDUormOmpVXZNIcqnap4twEVYaSIowfcNojyUSrFL5nPc8ZG93WgNNl9rpUPZhssVml3DvXghI80A9SW3QauzohTQAX2bkWelFBHnuG2LKrsJ8en51N6CkjcS5b87y1DVMZELcZ1n5s8PCAA1wyn7OSZlgw00GRzch1YwMoHzBBgIUtMO9HrMyuhgqIPJP7KcKbQkKhtvBXKplX8SCfSlOwUkLwHNKm3HYVE0uVfJ91NAsUrGoCOjYiXYpoRT8bjAPWTm6fDlTq2sbPOyTMoc4xRasmiOJ7B0PT6UxPzCPImM4100sPFxp7Kofv4okKZWTPKTefeYiPefI3jRgfDtEIP9E6a35LZD75lBNMXYlAqL3qlnheUQD1WQimFTHiDsW6bmURptNvtkMjEXzXzpWbnyxBskUGTvP2YQjtSAhWliDXkv6t1x71cYav7TQbqvbIzMRQQsguSGYMbs8YIC4DC9ep5reWAfanlTxcxksbEhQ7FGzXOvcufeGnDl2C85gWfryVzwN7kOZiSEktFMOQ1ngRC23y1fCOiHQVQJ2nLnaW7GILb9wkN1mBTRuHsOefRJST0TnRxcn4bBq4MIibIitVyjPRy7G5XvPEcL4pFaW1HCPGm6pUOEEwTer32JObNGCyTFB1BI2cRLJu5BHPjgG3mmb0gGkGlIfh8D2b2amogpivqEn2r9Y1KOKQ8ufJvG2mYfkevco9DuEZ9Nmzkm6XkCTZaFMNHqbfQaKqsEYK7i2N1KfkBct1leW2H9MQ9QO7AHCqXHK47b1kWVIm6pSJA1yV4funzCqXnIJCEURQgHiKf38YpN7ylLhe1J4UvSG3KeesZNeFFIZOEP9HZUSFMpnN1MOrwejojK0D4qzwucYWtXrTQ8I7UP5QhlijIsCKckUa9C1Osjrq8cgSclYNGt19wpy0onUbX1rOQBUlAAUJs4CyXNU0wmVUjw7tG1LUC8my4s9KZDUj4R5UcPz3VaZRrx1RqYu6YxjroJW70I1LyG4WEiQbOkCoLmaiWo9WzbUS2cErlOo2RPymlkWHxbNnZawX2Bc872ivRHSWqNpRHyuR5QewXmcyghH3EhESBAxTel5E2xuQXfLCEVK0kEk0Mj22KPsckKKyH7sVYC1F4YItQh5hj9Titb7KflQb9vnXQ44UHxY3zBhTQT5PSYv1Kv8HxXCsnpmhZCiBru16iX9oEB33icBVB2KKcZZEEKnCGPVxJlM9RTlyNyQmjHf7z4GeTDuMAUrsMO31WvgZBnWcAOtn6ulBTUCAaqxJiWqzlMx2FSANAlyAjAxqzmQjzPLvQRjskUnBFN3woKB1m2bSo2c5thwA1fKiPvN5LW8tl1rnfNy3rJ0GJpK8nZjkzHMztYrKYAe56pX4SvplpTyibTIiRXLyEVsmuByTHCZhO3fvGoFsav3ZuRhe9eAAWeqAh13eKDTcA0ufME3ZnmJheXEZ3OwrxnFjSf3U0clkWYVont3neh77ODKHhYnX0bOmnJJlr4RqFoLBitskY0kcGMKcZlaej21SENjDcFgaka3CfHbAH5vIFqnoX1JZrZPkQ65PZqQWImP79U3gXWKvz96lElyJZAFqn0Mbltllqw4MhlI766AvHraOmMsJoNvjv1QR7pCSnC0iX6nbqW1eVPaUSZDuZRtRIxfLA8HC9VbxufT2KZV3qG0l7wrZna5Di2MNcBE9uthuVLZcqp8vCmEhINDhRRlipR7tC2iRBHecS5WtxBCpbEm1y1kgNG5o60UKgAswxxuJ3RQ9Y49mPIApBMmp4LFpuKRfcrZb4UJnCfR3pNbQ70nnZ6Be2M7tuJUCoFfHrhqHXNz5A0uWMgxUS50c60zLl6QAELxHaCGba4WCMOHIo5nSKcUuYtDyDoDlrezALW5mZR4PRPRxnjrXxbJI14qrpymRReC3QgFDJp6sT5TLwvSHaavPlEbt2Eu0Kh5SXklGHXP9YuF3glGuJzSob3NakW1RXF5786U1MHhtJby64LyGWvNn4QXie3VjeL3QQu4C9crEAxSSiOJOfnL3DYIVOY4ipUkKFlF7Rp2q6gZazDvcUCp1cbcr7T7B4s22rXzjN7mHYWOyWuZGwlImeorY3aVKi7BaXbhgOFw6BUmIc1HeGFELHIEnPE9MwOjZam3LOm0rhBHlvJJZkXvJKmDUJrGlyqC5GtC5lDWLfXewyDWDqq7PY0atVQily5GWqib6wub6u6LZ3HZDNP8gK64Nf4kC259AE4V2hCohDnSsXAIoOkehwXyp6CkDT42NJb6sXHUv2N6cm292MiKA22PKWrwUGsan599KI2V67YRDfcfiB4ZHRDiSe62MBE0fGLIgXLIWw1xTWYbPQ9YAj3xovBvmewbJ1De4k6uS"); - b.bytes = data.len() as u64; - b.iter(|| { - let _out = keccak(&data); - }) + c.bench_function("keccak_256_with_large_input", |b| { + b.iter(|| { + let _out = keccak(black_box(&data)); + }) + }); } diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 657c0392c..d7cef5999 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -27,6 +27,7 @@ parity-util-mem = { path = "../parity-util-mem", version = "0.4", default-featur [dev-dependencies] alloc_counter = "0.0.4" criterion = "0.3" +ethereum-types = { path = "../ethereum-types" } kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.1" } rand = "0.7.2" tempdir = "0.3.7" diff --git a/parity-crypto/benches/bench.rs b/parity-crypto/benches/bench.rs index e0830bb98..6c13aa369 100644 --- a/parity-crypto/benches/bench.rs +++ b/parity-crypto/benches/bench.rs @@ -14,11 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -#[macro_use] -extern crate criterion; - use crate::parity_crypto::publickey::Generator; -use criterion::{Bencher, Criterion}; +use criterion::{criterion_group, criterion_main, Bencher, Criterion}; criterion_group!(benches, input_len, ecdh_agree,); diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index 01e85c036..431a56ec7 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -272,9 +272,7 @@ where #[cfg(test)] mod tests { use super::*; - extern crate serde_derive; - - use self::serde_derive::{Deserialize, Serialize}; + use serde_derive::{Deserialize, Serialize}; #[derive(Serialize, Deserialize)] struct Bytes(#[serde(with = "super")] Vec); diff --git a/rlp/src/lib.rs b/rlp/src/lib.rs index ab386e689..a4c66e2ac 100644 --- a/rlp/src/lib.rs +++ b/rlp/src/lib.rs @@ -59,14 +59,10 @@ pub const EMPTY_LIST_RLP: [u8; 1] = [0xC0; 1]; /// Shortcut function to decode trusted rlp /// -/// ```rust -/// extern crate rlp; -/// -/// fn main () { -/// let data = vec![0x83, b'c', b'a', b't']; -/// let animal: String = rlp::decode(&data).expect("could not decode"); -/// assert_eq!(animal, "cat".to_owned()); -/// } +/// ``` +/// let data = vec![0x83, b'c', b'a', b't']; +/// let animal: String = rlp::decode(&data).expect("could not decode"); +/// assert_eq!(animal, "cat".to_owned()); /// ``` pub fn decode(bytes: &[u8]) -> Result where @@ -86,14 +82,10 @@ where /// Shortcut function to encode structure into rlp. /// -/// ```rust -/// extern crate rlp; -/// -/// fn main () { -/// let animal = "cat"; -/// let out = rlp::encode(&animal); -/// assert_eq!(out, vec![0x83, b'c', b'a', b't']); -/// } +/// ``` +/// let animal = "cat"; +/// let out = rlp::encode(&animal); +/// assert_eq!(out, vec![0x83, b'c', b'a', b't']); /// ``` pub fn encode(object: &E) -> Vec where diff --git a/rlp/src/stream.rs b/rlp/src/stream.rs index 6dcf1500e..851b845b9 100644 --- a/rlp/src/stream.rs +++ b/rlp/src/stream.rs @@ -53,16 +53,12 @@ impl RlpStream { /// Apends null to the end of stream, chainable. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(2); - /// stream.append_empty_data().append_empty_data(); - /// let out = stream.out(); - /// assert_eq!(out, vec![0xc2, 0x80, 0x80]); - /// } + /// ``` + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(2); + /// stream.append_empty_data().append_empty_data(); + /// let out = stream.out(); + /// assert_eq!(out, vec![0xc2, 0x80, 0x80]); /// ``` pub fn append_empty_data(&mut self) -> &mut Self { // self push raw item @@ -94,16 +90,12 @@ impl RlpStream { /// Appends value to the end of stream, chainable. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(2); - /// stream.append(&"cat").append(&"dog"); - /// let out = stream.out(); - /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); - /// } + /// ``` + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(2); + /// stream.append(&"cat").append(&"dog"); + /// let out = stream.out(); + /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); /// ``` pub fn append(&mut self, value: &E) -> &mut Self where @@ -119,16 +111,12 @@ impl RlpStream { /// Appends iterator to the end of stream, chainable. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(2); - /// stream.append(&"cat").append_iter("dog".as_bytes().iter().cloned()); - /// let out = stream.out(); - /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); - /// } + /// ``` + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(2); + /// stream.append(&"cat").append_iter("dog".as_bytes().iter().cloned()); + /// let out = stream.out(); + /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); /// ``` pub fn append_iter(&mut self, value: I) -> &mut Self where @@ -167,17 +155,13 @@ impl RlpStream { /// Declare appending the list of given size, chainable. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(2); - /// stream.begin_list(2).append(&"cat").append(&"dog"); - /// stream.append(&""); - /// let out = stream.out(); - /// assert_eq!(out, vec![0xca, 0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g', 0x80]); - /// } + /// ``` + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(2); + /// stream.begin_list(2).append(&"cat").append(&"dog"); + /// stream.append(&""); + /// let out = stream.out(); + /// assert_eq!(out, vec![0xca, 0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g', 0x80]); /// ``` pub fn begin_list(&mut self, len: usize) -> &mut RlpStream { self.finished_list = false; @@ -249,18 +233,15 @@ impl RlpStream { /// Clear the output stream so far. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(3); - /// stream.append(&"cat"); - /// stream.clear(); - /// stream.append(&"dog"); - /// let out = stream.out(); - /// assert_eq!(out, vec![0x83, b'd', b'o', b'g']); - /// } + /// ``` + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(3); + /// stream.append(&"cat"); + /// stream.clear(); + /// stream.append(&"dog"); + /// let out = stream.out(); + /// assert_eq!(out, vec![0x83, b'd', b'o', b'g']); + /// ``` pub fn clear(&mut self) { // clear bytes self.buffer.clear(); @@ -271,19 +252,16 @@ impl RlpStream { /// Returns true if stream doesnt expect any more items. /// - /// ```rust - /// extern crate rlp; - /// use rlp::*; - /// - /// fn main () { - /// let mut stream = RlpStream::new_list(2); - /// stream.append(&"cat"); - /// assert_eq!(stream.is_finished(), false); - /// stream.append(&"dog"); - /// assert_eq!(stream.is_finished(), true); - /// let out = stream.out(); - /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); - /// } + /// ``` + /// use rlp::RlpStream; + /// let mut stream = RlpStream::new_list(2); + /// stream.append(&"cat"); + /// assert_eq!(stream.is_finished(), false); + /// stream.append(&"dog"); + /// assert_eq!(stream.is_finished(), true); + /// let out = stream.out(); + /// assert_eq!(out, vec![0xc8, 0x83, b'c', b'a', b't', 0x83, b'd', b'o', b'g']); + /// ``` pub fn is_finished(&self) -> bool { self.unfinished_lists.is_empty() } diff --git a/triehash/src/lib.rs b/triehash/src/lib.rs index 964e7e14f..81a005826 100644 --- a/triehash/src/lib.rs +++ b/triehash/src/lib.rs @@ -47,20 +47,15 @@ fn shared_prefix_len(first: &[T], second: &[T]) -> usize { /// Generates a trie root hash for a vector of values /// -/// ```rust -/// extern crate triehash; -/// extern crate keccak_hasher; -/// extern crate ethereum_types; -/// #[macro_use] extern crate hex_literal; +/// ``` +/// use hex_literal::hex; /// use ethereum_types::H256; /// use triehash::ordered_trie_root; /// use keccak_hasher::KeccakHasher; /// -/// fn main() { -/// let v = &["doe", "reindeer"]; -/// let root = H256::from(hex!("e766d5d51b89dc39d981b41bda63248d7abce4f0225eefd023792a540bcffee3")); -/// assert_eq!(ordered_trie_root::(v), root.as_ref()); -/// } +/// let v = &["doe", "reindeer"]; +/// let root = H256::from(hex!("e766d5d51b89dc39d981b41bda63248d7abce4f0225eefd023792a540bcffee3")); +/// assert_eq!(ordered_trie_root::(v), root.as_ref()); /// ``` pub fn ordered_trie_root(input: I) -> H::Out where @@ -74,25 +69,20 @@ where /// Generates a trie root hash for a vector of key-value tuples /// -/// ```rust -/// extern crate triehash; -/// extern crate ethereum_types; -/// extern crate keccak_hasher; -/// #[macro_use] extern crate hex_literal; +/// ``` +/// use hex_literal::hex; /// use triehash::trie_root; /// use ethereum_types::H256; /// use keccak_hasher::KeccakHasher; /// -/// fn main() { -/// let v = vec![ -/// ("doe", "reindeer"), -/// ("dog", "puppy"), -/// ("dogglesworth", "cat"), -/// ]; +/// let v = vec![ +/// ("doe", "reindeer"), +/// ("dog", "puppy"), +/// ("dogglesworth", "cat"), +/// ]; /// -/// let root = H256::from(hex!("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3")); -/// assert_eq!(trie_root::(v), root.as_ref()); -/// } +/// let root = H256::from(hex!("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3")); +/// assert_eq!(trie_root::(v), root.as_ref()); /// ``` pub fn trie_root(input: I) -> H::Out where @@ -126,25 +116,20 @@ where /// Generates a key-hashed (secure) trie root hash for a vector of key-value tuples. /// -/// ```rust -/// extern crate triehash; -/// extern crate keccak_hasher; -/// extern crate ethereum_types; -/// #[macro_use] extern crate hex_literal; +/// ``` +/// use hex_literal::hex; /// use ethereum_types::H256; /// use triehash::sec_trie_root; /// use keccak_hasher::KeccakHasher; /// -/// fn main() { -/// let v = vec![ -/// ("doe", "reindeer"), -/// ("dog", "puppy"), -/// ("dogglesworth", "cat"), -/// ]; +/// let v = vec![ +/// ("doe", "reindeer"), +/// ("dog", "puppy"), +/// ("dogglesworth", "cat"), +/// ]; /// -/// let root = H256::from(hex!("d4cd937e4a4368d7931a9cf51686b7e10abb3dce38a39000fd7902a092b64585")); -/// assert_eq!(sec_trie_root::(v), root.as_ref()); -/// } +/// let root = H256::from(hex!("d4cd937e4a4368d7931a9cf51686b7e10abb3dce38a39000fd7902a092b64585")); +/// assert_eq!(sec_trie_root::(v), root.as_ref()); /// ``` pub fn sec_trie_root(input: I) -> H::Out where From a163bead36bb3003ab743742fa0d9b0698c3dada Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 15 Jan 2020 14:01:15 +0100 Subject: [PATCH 069/359] README: fix appveyor badge (#316) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b9a2936ac..b4647e7e9 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [travis-image]: https://travis-ci.org/paritytech/parity-common.svg?branch=master [travis-url]: https://travis-ci.org/paritytech/parity-common -[appveyor-image]: https://ci.appveyor.com/api/projects/status/github/paritytech/parity-common/branch/master?svg=true +[appveyor-image]: https://ci.appveyor.com/api/projects/status/github/paritytech/parity-common?branch=master&svg=true [appveyor-url]: https://ci.appveyor.com/project/paritytech/parity-common/branch/master # parity-common From 97f682ae24a6ce7c1e76ebf1c708443b47b8b7fb Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Sun, 26 Jan 2020 20:33:43 +0300 Subject: [PATCH 070/359] uint: make zero const fn (#318) --- uint/src/uint.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 64990bf5c..b15726b72 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -642,8 +642,8 @@ macro_rules! construct_uint { /// Zero (additive identity) of this type. #[inline] - pub fn zero() -> Self { - From::from(0u64) + pub const fn zero() -> Self { + Self([0; $n_words]) } /// One (multiplicative identity) of this type. From de318bffc7a034455bc55e9080feb095ba7326da Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Mon, 27 Jan 2020 12:53:18 +0300 Subject: [PATCH 071/359] Expand const fn coverage (#319) * Expand const fn coverage * also const fn rlp * make more uint methods const fn * restore inline attribute --- fixed-hash/src/hash.rs | 10 +++++----- rlp/src/rlpin.rs | 8 ++++---- uint/src/uint.rs | 18 +++++++++--------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index ea2210076..09ce43d8c 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -107,19 +107,19 @@ macro_rules! construct_fixed_hash { impl $name { /// Returns a new fixed hash where all bits are set to the given byte. #[inline] - pub fn repeat_byte(byte: u8) -> $name { + pub const fn repeat_byte(byte: u8) -> $name { $name([byte; $n_bytes]) } /// Returns a new zero-initialized fixed hash. #[inline] - pub fn zero() -> $name { + pub const fn zero() -> $name { $name::repeat_byte(0u8) } /// Returns the size of this hash in bytes. #[inline] - pub fn len_bytes() -> usize { + pub const fn len_bytes() -> usize { $n_bytes } @@ -137,7 +137,7 @@ macro_rules! construct_fixed_hash { /// Extracts a reference to the byte array containing the entire fixed hash. #[inline] - pub fn as_fixed_bytes(&self) -> &[u8; $n_bytes] { + pub const fn as_fixed_bytes(&self) -> &[u8; $n_bytes] { &self.0 } @@ -149,7 +149,7 @@ macro_rules! construct_fixed_hash { /// Returns the inner bytes array. #[inline] - pub fn to_fixed_bytes(self) -> [u8; $n_bytes] { + pub const fn to_fixed_bytes(self) -> [u8; $n_bytes] { self.0 } diff --git a/rlp/src/rlpin.rs b/rlp/src/rlpin.rs index f1c488626..6cdfaa808 100644 --- a/rlp/src/rlpin.rs +++ b/rlp/src/rlpin.rs @@ -25,7 +25,7 @@ struct OffsetCache { } impl OffsetCache { - fn new(index: usize, offset: usize) -> OffsetCache { + const fn new(index: usize, offset: usize) -> OffsetCache { OffsetCache { index, offset } } } @@ -68,7 +68,7 @@ fn calculate_payload_info(header_bytes: &[u8], len_of_len: usize) -> Result PayloadInfo { + const fn new(header_len: usize, value_len: usize) -> PayloadInfo { PayloadInfo { header_len, value_len } } @@ -128,7 +128,7 @@ impl<'a> fmt::Display for Rlp<'a> { } impl<'a> Rlp<'a> { - pub fn new(bytes: &'a [u8]) -> Rlp<'a> { + pub const fn new(bytes: &'a [u8]) -> Rlp<'a> { Rlp { bytes, offset_cache: Cell::new(None), count_cache: Cell::new(None) } } @@ -374,7 +374,7 @@ pub struct BasicDecoder<'a> { } impl<'a> BasicDecoder<'a> { - pub fn new(rlp: &'a [u8]) -> BasicDecoder<'a> { + pub const fn new(rlp: &'a [u8]) -> BasicDecoder<'a> { BasicDecoder { rlp } } diff --git a/uint/src/uint.rs b/uint/src/uint.rs index b15726b72..7dd1bca42 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -371,7 +371,7 @@ macro_rules! construct_uint { impl $name { /// Low 2 words (u128) #[inline] - pub fn low_u128(&self) -> u128 { + pub const fn low_u128(&self) -> u128 { let &$name(ref arr) = self; ((arr[1] as u128) << 64) + arr[0] as u128 } @@ -473,14 +473,14 @@ macro_rules! construct_uint { /// Conversion to u32 #[inline] - pub fn low_u32(&self) -> u32 { + pub const fn low_u32(&self) -> u32 { let &$name(ref arr) = self; arr[0] as u32 } /// Low word (u64) #[inline] - pub fn low_u64(&self) -> u64 { + pub const fn low_u64(&self) -> u64 { let &$name(ref arr) = self; arr[0] } @@ -560,7 +560,7 @@ macro_rules! construct_uint { /// /// Panics if `index` exceeds the bit width of the number. #[inline] - pub fn bit(&self, index: usize) -> bool { + pub const fn bit(&self, index: usize) -> bool { let &$name(ref arr) = self; arr[index / 64] & (1 << (index % 64)) != 0 } @@ -601,7 +601,7 @@ macro_rules! construct_uint { /// /// Panics if `index` exceeds the byte width of the number. #[inline] - pub fn byte(&self, index: usize) -> u8 { + pub const fn byte(&self, index: usize) -> u8 { let &$name(ref arr) = self; (arr[index / 8] >> (((index % 8)) * 8)) as u8 } @@ -1066,18 +1066,18 @@ macro_rules! construct_uint { } #[inline(always)] - fn mul_u64(a: u64, b: u64, carry: u64) -> (u64, u64) { - let (hi, lo) = Self::split_u128(u128::from(a) * u128::from(b) + u128::from(carry)); + const fn mul_u64(a: u64, b: u64, carry: u64) -> (u64, u64) { + let (hi, lo) = Self::split_u128(a as u128 * b as u128 + carry as u128); (lo, hi) } #[inline(always)] - fn split(a: u64) -> (u64, u64) { + const fn split(a: u64) -> (u64, u64) { (a >> 32, a & 0xFFFF_FFFF) } #[inline(always)] - fn split_u128(a: u128) -> (u64, u64) { + const fn split_u128(a: u128) -> (u64, u64) { ((a >> 64) as _, (a & 0xFFFFFFFFFFFFFFFF) as _) } From 97cf7820ee7297f6a19152663046fb1226b2470d Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 4 Feb 2020 11:10:36 +0100 Subject: [PATCH 072/359] travis: disable kvdb-web tests for chrome (#324) --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index bf3a27276..a8741dccc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -54,5 +54,5 @@ script: - cd parity-util-mem/ && cargo test --no-default-features --features=dlmalloc-global && cd .. - cd rlp/ && cargo test --no-default-features && cargo check --benches && cd .. - cd triehash/ && cargo check --benches && cd .. - - cd kvdb-web/ && wasm-pack test --headless --chrome --firefox && cd .. + - cd kvdb-web/ && wasm-pack test --headless --firefox && cd .. From 8440c05c286a55cc35fafa69f8decb9aca6013e1 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 4 Feb 2020 03:23:54 -0800 Subject: [PATCH 073/359] split off primitives (#323) --- parity-util-mem/Cargo.toml | 7 ++--- parity-util-mem/src/ethereum_impls.rs | 4 +-- parity-util-mem/src/lib.rs | 3 +++ parity-util-mem/src/primitives_impls.rs | 34 +++++++++++++++++++++++++ 4 files changed, 43 insertions(+), 5 deletions(-) create mode 100644 parity-util-mem/src/primitives_impls.rs diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index baf11981e..f38ea6ac5 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -26,6 +26,7 @@ impl-trait-for-tuples = "0.1.3" smallvec = { version = "1.0.0", optional = true } ethereum-types = { version = "0.8.0", optional = true, path = "../ethereum-types" } parking_lot = { version = "0.9.0", optional = true } +primitive-types = { version = "0.6", path = "../primitive-types", default-features = false, optional = true } [target.'cfg(target_os = "windows")'.dependencies] winapi = { version = "0.3.8", features = ["heapapi"] } @@ -35,7 +36,7 @@ version = "0.3.2" optional = true [features] -default = ["std", "ethereum-impls", "lru", "hashbrown", "smallvec"] +default = ["std", "ethereum-impls", "lru", "hashbrown", "smallvec", "primitive-types"] std = ["parking_lot"] # use dlmalloc as global allocator dlmalloc-global = ["dlmalloc", "estimate-heapsize"] @@ -46,6 +47,6 @@ jemalloc-global = ["jemallocator"] # use mimalloc as global allocator mimalloc-global = ["mimallocator", "mimalloc-sys"] # implement additional types -ethereum-impls = ["ethereum-types"] +ethereum-impls = ["ethereum-types", "primitive-types"] # Full estimate: no call to allocator -estimate-heapsize = [] +estimate-heapsize = [] \ No newline at end of file diff --git a/parity-util-mem/src/ethereum_impls.rs b/parity-util-mem/src/ethereum_impls.rs index 243230106..4379b3b0e 100644 --- a/parity-util-mem/src/ethereum_impls.rs +++ b/parity-util-mem/src/ethereum_impls.rs @@ -17,6 +17,6 @@ //! Implementation of `MallocSize` for common ethereum types: fixed hashes //! and uints. -use ethereum_types::{Bloom, H128, H160, H256, H264, H32, H512, H520, H64, U128, U256, U512, U64}; +use ethereum_types::{Bloom, H128, H264, H32, H520, H64, U64}; -malloc_size_of_is_0!(U64, U128, U256, U512, H32, H64, H128, H160, H256, H264, H512, H520, Bloom); +malloc_size_of_is_0!(U64, H32, H64, H128, H264, H520, Bloom); diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index ddd8f1fd5..cdea52e42 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -68,6 +68,9 @@ mod malloc_size; #[cfg(feature = "ethereum-impls")] pub mod ethereum_impls; +#[cfg(feature = "primitive-types")] +pub mod primitives_impls; + pub use allocators::MallocSizeOfExt; pub use malloc_size::{MallocSizeOf, MallocSizeOfOps}; diff --git a/parity-util-mem/src/primitives_impls.rs b/parity-util-mem/src/primitives_impls.rs new file mode 100644 index 000000000..ab5953dcc --- /dev/null +++ b/parity-util-mem/src/primitives_impls.rs @@ -0,0 +1,34 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Implementation of `MallocSize` primitive types. + +use primitive_types::{H160, H256, H512, U128, U256, U512}; + +malloc_size_of_is_0!(U128, U256, U512, H160, H256, H512); + +#[cfg(test)] +mod tests { + + use primitive_types::H256; + + #[test] + fn smoky() { + let v = vec![H256::zero(), H256::zero()]; + + assert!(crate::MallocSizeOfExt::malloc_size_of(&v) >= 64); + } +} From 31aed7d6bb214dace05cb18925e44990c188a768 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 4 Feb 2020 03:38:51 -0800 Subject: [PATCH 074/359] MallocSizeOf for BTreeSet (#325) * split off primitives * add for BTreeSet * Update parity-util-mem/src/malloc_size.rs Co-Authored-By: Andronik Ordian * cargo fmt Co-authored-by: Andronik Ordian --- parity-util-mem/src/malloc_size.rs | 36 ++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index 2f180a676..864c94abd 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -455,6 +455,31 @@ where } } +impl MallocShallowSizeOf for rstd::collections::BTreeSet { + fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + if ops.has_malloc_enclosing_size_of() { + // See implementation for HashSet how this works. + self.iter().next().map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) }) + } else { + // An estimate. + self.len() * (size_of::() + size_of::()) + } + } +} + +impl MallocSizeOf for rstd::collections::BTreeSet +where + T: MallocSizeOf, +{ + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + let mut n = self.shallow_size_of(ops); + for k in self.iter() { + n += k.size_of(ops); + } + n + } +} + // PhantomData is always 0. impl MallocSizeOf for rstd::marker::PhantomData { fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { @@ -676,6 +701,7 @@ malloc_size_of_is_0!(std::time::Duration); mod tests { use crate::{allocators::new_malloc_size_ops, MallocSizeOf, MallocSizeOfOps}; use smallvec::SmallVec; + use std::collections::BTreeSet; use std::mem; impl_smallvec!(3); @@ -727,4 +753,14 @@ mod tests { let expected_min_allocs = mem::size_of::() * 4 + "ÖWL".len() + "COW".len() + "PIG".len() + "DUCK".len(); assert!(v.size_of(&mut ops) >= expected_min_allocs); } + + #[test] + fn btree_set() { + let mut set = BTreeSet::new(); + for t in 0..100 { + set.insert(vec![t]); + } + // ~36 per value + assert!(crate::malloc_size(&set) > 3000); + } } From 4986eb58f2740859a2f37589f154ab7035510105 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 4 Feb 2020 13:19:22 +0100 Subject: [PATCH 075/359] kvdb-rocksdb: fix iter_from_prefix being slow (#326) --- kvdb-rocksdb/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index d478a0c6d..40ee0f595 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -616,7 +616,7 @@ impl Database { // We're not using "Prefix Seek" mode, so the iterator will return // keys not starting with the given prefix as well, // see https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes - optional.into_iter().flat_map(identity).filter(move |(k, _)| k.starts_with(prefix)) + optional.into_iter().flat_map(identity).take_while(move |(k, _)| k.starts_with(prefix)) } /// Close the database From 3377cd4a72b64987155f80f71e13a4b76e439184 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 4 Feb 2020 13:39:22 +0100 Subject: [PATCH 076/359] kvdb-rocksdb: release 0.4.2 (#327) * kvdb-rocksdb: fix iter_from_prefix being slow * kvdb-rocksdb: update the CHANGELOG * kvdb-rocksdb: bump version to 0.4.2 --- kvdb-rocksdb/CHANGELOG.md | 7 +++++++ kvdb-rocksdb/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index f2754de3b..f7cd5a5e9 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,13 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.4.2] - 2019-02-04 +### Fixes +- Fixed `iter_from_prefix` being slow. [#326](https://github.com/paritytech/parity-common/pull/326) + +## [0.4.1] - 2019-01-06 +- Updated features and feature dependencies. [#307](https://github.com/paritytech/parity-common/pull/307) + ## [0.4.0] - 2019-01-03 - Add I/O statistics for RocksDB. [#294](https://github.com/paritytech/parity-common/pull/294) - Support querying memory footprint via `MallocSizeOf` trait. [#292](https://github.com/paritytech/parity-common/pull/292) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index d7cef5999..797203ffe 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.4.1" +version = "0.4.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" From 06ae74e64aa61633a67afb5c16ed943ffd4d9542 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 4 Feb 2020 14:27:33 +0100 Subject: [PATCH 077/359] remove libc feature from fixed-hash (#317) * fixed-hash: rename impl_libc to impl_cmp * fixed-hash: add a cmp bench * fied-hash: try cmp without libc * cargo fmt * update CHANGELOGs * fixed-hash: remove unused type in the bench --- ethbloom/CHANGELOG.md | 3 +- ethbloom/Cargo.toml | 3 +- fixed-hash/CHANGELOG.md | 5 +- fixed-hash/Cargo.toml | 10 ++-- fixed-hash/benches/cmp.rs | 98 ++++++++++++++++++++++++++++++++++++ fixed-hash/src/hash.rs | 58 +-------------------- primitive-types/CHANGELOG.md | 1 + primitive-types/Cargo.toml | 1 - 8 files changed, 113 insertions(+), 66 deletions(-) create mode 100644 fixed-hash/benches/cmp.rs diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index f1da71a00..1edf138b9 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -1,10 +1,11 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) ## [0.8.1] - 2019-10-24 ### Dependencies diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 952f043c3..f9f9960fb 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -22,10 +22,9 @@ rand = "0.7.2" hex-literal = "0.2.1" [features] -default = ["std", "serialize", "libc", "rustc-hex"] +default = ["std", "serialize", "rustc-hex"] std = ["fixed-hash/std", "crunchy/std"] serialize = ["std", "impl-serde"] -libc = ["fixed-hash/libc"] rustc-hex = ["fixed-hash/rustc-hex"] [[bench]] diff --git a/fixed-hash/CHANGELOG.md b/fixed-hash/CHANGELOG.md index ae22eee7f..adf179a5e 100644 --- a/fixed-hash/CHANGELOG.md +++ b/fixed-hash/CHANGELOG.md @@ -1,14 +1,15 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) ## [0.5.2] - 2019-12-19 ### Fixed -- re-export `alloc` for both std and no-std to fix compilation (See [PR #268](https://github.com/paritytech/parity-common/pull/268)) +- re-export `alloc` for both std and no-std to fix compilation (See [PR #268](https://github.com/paritytech/parity-common/pull/268)) ## [0.5.1] - 2019-10-24 ### Dependencies diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index cc3d3f3a8..c12d35c57 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -22,12 +22,14 @@ static_assertions = "1.0.0" [dev-dependencies] rand_xorshift = "0.2.0" - -[target.'cfg(not(target_os = "unknown"))'.dependencies] -libc = { version = "0.2.65", optional = true, default-features = false } +criterion = "0.3.0" [features] -default = ["std", "libc", "rand", "rustc-hex", "byteorder"] +default = ["std", "rand", "rustc-hex", "byteorder"] std = ["rustc-hex/std", "rand/std", "byteorder/std"] api-dummy = [] # Feature used by docs.rs to display documentation of hash types + +[[bench]] +name = "cmp" +harness = false diff --git a/fixed-hash/benches/cmp.rs b/fixed-hash/benches/cmp.rs new file mode 100644 index 000000000..fd8918006 --- /dev/null +++ b/fixed-hash/benches/cmp.rs @@ -0,0 +1,98 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Benchmarks for fixed-hash cmp implementation. + +use criterion::{black_box, Criterion, ParameterizedBenchmark}; +use criterion::{criterion_group, criterion_main}; + +use fixed_hash::construct_fixed_hash; + +construct_fixed_hash! { pub struct H256(32); } + +criterion_group!(cmp, eq_equal, eq_nonequal, compare,); +criterion_main!(cmp); + +fn eq_equal(c: &mut Criterion) { + c.bench( + "eq_equal", + ParameterizedBenchmark::new( + "", + |b, x| b.iter(|| black_box(x.eq(black_box(x)))), + vec![ + H256::zero(), + H256::repeat_byte(0xAA), + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, + 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + H256([u8::max_value(); 32]), + ], + ), + ); +} + +fn eq_nonequal(c: &mut Criterion) { + c.bench( + "eq_nonequal", + ParameterizedBenchmark::new( + "", + |b, (x, y)| b.iter(|| black_box(x.eq(black_box(y)))), + vec![ + ( + H256::zero(), + H256::from([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + ]), + ), + (H256::repeat_byte(0xAA), H256::repeat_byte(0xA1)), + ( + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, + 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + H256::from([ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, + 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + ), + ], + ), + ); +} + +fn compare(c: &mut Criterion) { + c.bench( + "compare", + ParameterizedBenchmark::new( + "", + |b, (x, y)| b.iter(|| black_box(x.cmp(black_box(y)))), + vec![ + ( + H256::zero(), + H256::from([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + ]), + ), + (H256::zero(), H256::zero()), + (H256::repeat_byte(0xAA), H256::repeat_byte(0xAA)), + (H256::repeat_byte(0xAA), H256::repeat_byte(0xA1)), + ( + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, + 0x40, 0x84, 0xC2, 0xDF, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, + 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + ), + ], + ), + ); +} diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 09ce43d8c..46f5c7748 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -313,7 +313,7 @@ macro_rules! construct_fixed_hash { impl_byteorder_for_fixed_hash!($name); impl_rand_for_fixed_hash!($name); - impl_libc_for_fixed_hash!($name); + impl_cmp_for_fixed_hash!($name); impl_rustc_hex_for_fixed_hash!($name); impl_quickcheck_for_fixed_hash!($name); } @@ -527,17 +527,9 @@ macro_rules! impl_rand_for_fixed_hash { }; } -// Implementation for disabled libc crate support. -// -// # Note -// -// Feature guarded macro definitions instead of feature guarded impl blocks -// to work around the problems of introducing `libc` crate feature in -// a user crate. -#[cfg(not(all(feature = "libc", not(target_os = "unknown"))))] #[macro_export] #[doc(hidden)] -macro_rules! impl_libc_for_fixed_hash { +macro_rules! impl_cmp_for_fixed_hash { ( $name:ident ) => { impl $crate::core_::cmp::PartialEq for $name { #[inline] @@ -555,52 +547,6 @@ macro_rules! impl_libc_for_fixed_hash { }; } -// Implementation for enabled libc crate support. -// -// # Note -// -// Feature guarded macro definitions instead of feature guarded impl blocks -// to work around the problems of introducing `libc` crate feature in -// a user crate. -#[cfg(all(feature = "libc", not(target_os = "unknown")))] -#[macro_export] -#[doc(hidden)] -macro_rules! impl_libc_for_fixed_hash { - ( $name:ident ) => { - impl $crate::core_::cmp::PartialEq for $name { - #[inline] - fn eq(&self, other: &Self) -> bool { - unsafe { - $crate::libc::memcmp( - self.as_ptr() as *const $crate::libc::c_void, - other.as_ptr() as *const $crate::libc::c_void, - Self::len_bytes(), - ) == 0 - } - } - } - - impl $crate::core_::cmp::Ord for $name { - fn cmp(&self, other: &Self) -> $crate::core_::cmp::Ordering { - let r = unsafe { - $crate::libc::memcmp( - self.as_ptr() as *const $crate::libc::c_void, - other.as_ptr() as *const $crate::libc::c_void, - Self::len_bytes(), - ) - }; - if r < 0 { - return $crate::core_::cmp::Ordering::Less; - } - if r > 0 { - return $crate::core_::cmp::Ordering::Greater; - } - $crate::core_::cmp::Ordering::Equal - } - } - }; -} - // Implementation for disabled rustc-hex crate support. // // # Note diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 12b958c30..87ee03e6f 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) ## [0.6.2] - 2019-01-03 - Expose to_hex and from_hex from impl-serde. [#302](https://github.com/paritytech/parity-common/pull/302) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 31ef6235c..3131a001f 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -18,7 +18,6 @@ impl-rlp = { version = "0.2", path = "impls/rlp", default-features = false, opti default = ["std"] std = ["uint/std", "fixed-hash/std", "impl-codec/std"] byteorder = ["fixed-hash/byteorder"] -libc = ["fixed-hash/libc"] rustc-hex = ["fixed-hash/rustc-hex"] serde = ["std", "impl-serde"] codec = ["impl-codec"] From 2f73b64efce7554b3eda682f90f68595db139d91 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 4 Feb 2020 06:05:22 -0800 Subject: [PATCH 078/359] bump parity-util-mem to 0.4.2 (#328) --- parity-util-mem/CHANGELOG.md | 4 ++++ parity-util-mem/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 8a24ebce3..f9c5dc482 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.4.2] - 2020-02-04 +- Implementation of `MallocSizeOf` for BTreeSet.[#325](https://github.com/paritytech/parity-common/pull/325) +- Split off implementation of `MallocSizeOf` for primitive-types. [#323](https://github.com/paritytech/parity-common/pull/323) + ## [0.4.1] - 2020-01-06 - Implementation of `MallocSizeOf` for SmallVec no longer requires ethereum `ethereum-impls` feature. [#307](https://github.com/paritytech/parity-common/pull/307) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index f38ea6ac5..7cabe525b 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.4.1" +version = "0.4.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" From a8329435e19d46b3e50145a9b5a399bfac52a31f Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 4 Feb 2020 19:12:56 +0100 Subject: [PATCH 079/359] update changelogs (#329) --- kvdb-memorydb/CHANGELOG.md | 3 +++ kvdb-web/CHANGELOG.md | 8 +++++++- kvdb/CHANGELOG.md | 3 +++ parity-util-mem/CHANGELOG.md | 4 ++-- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index a76dd644f..50bfeda77 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.3.1] - 2019-01-06 +- Updated features and feature dependencies. [#307](https://github.com/paritytech/parity-common/pull/307) + ## [0.3.0] - 2019-01-03 - InMemory key-value database now can report memory used (via `MallocSizeOf`). [#292](https://github.com/paritytech/parity-common/pull/292) diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index 475d4f564..df4565ccf 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -1,11 +1,17 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +## [0.3.1] - 2019-01-06 +- Updated features and feature dependencies. [#307](https://github.com/paritytech/parity-common/pull/307) + +## [0.3.0] - 2019-01-04 +- Updated to new `kvdb` and `parity-util-mem` versions. [#299](https://github.com/paritytech/parity-common/pull/299) + ## [0.2.0] - 2019-12-19 ### Changed - Default column support removed from the API diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index f6b634d76..4fcc88e95 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.3.1] - 2019-01-06 +- Updated features and feature dependencies. [#307](https://github.com/paritytech/parity-common/pull/307) + ## [0.3.0] - 2020-01-03 - I/O statistics API. [#294](https://github.com/paritytech/parity-common/pull/294) - Removed `KeyValueDBHandler` trait. [#304](https://github.com/paritytech/parity-common/pull/304) diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index f9c5dc482..595632d79 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -7,8 +7,8 @@ The format is based on [Keep a Changelog]. ## [Unreleased] ## [0.4.2] - 2020-02-04 -- Implementation of `MallocSizeOf` for BTreeSet.[#325](https://github.com/paritytech/parity-common/pull/325) -- Split off implementation of `MallocSizeOf` for primitive-types. [#323](https://github.com/paritytech/parity-common/pull/323) +- Implementation of `MallocSizeOf` for `BTreeSet`. [#325](https://github.com/paritytech/parity-common/pull/325) +- Split off implementation of `MallocSizeOf` for `primitive-types`. [#323](https://github.com/paritytech/parity-common/pull/323) ## [0.4.1] - 2020-01-06 - Implementation of `MallocSizeOf` for SmallVec no longer requires ethereum `ethereum-impls` feature. [#307](https://github.com/paritytech/parity-common/pull/307) From 996d3703489b574946ff22b513d6f15f1d747808 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 6 Feb 2020 01:15:17 -0800 Subject: [PATCH 080/359] Remove libc completely (#333) It is no longer used anywhere, right? --- fixed-hash/src/lib.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/fixed-hash/src/lib.rs b/fixed-hash/src/lib.rs index a3e7af6f7..027ba2284 100644 --- a/fixed-hash/src/lib.rs +++ b/fixed-hash/src/lib.rs @@ -18,10 +18,6 @@ pub extern crate alloc as alloc_; #[doc(hidden)] pub use core as core_; -#[cfg(all(feature = "libc", not(target_os = "unknown")))] -#[doc(hidden)] -pub use libc; - // This disables a warning for unused #[macro_use(..)] // which is incorrect since the compiler does not check // for all available configurations. @@ -38,10 +34,6 @@ pub use static_assertions::const_assert; #[doc(hidden)] pub use byteorder; -#[cfg(not(feature = "libc"))] -#[doc(hidden)] -pub mod libc {} - #[cfg(feature = "rustc-hex")] #[doc(hidden)] pub use rustc_hex; From 27e218f86a89f43ec6f6837c3235a7d75878fa57 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 6 Feb 2020 13:32:22 +0100 Subject: [PATCH 081/359] Bump parking_lot to 0.10 and minor versions (#332) * bump versions * update changelog * change bump to 0.5.0 * update deps * major bump * also kvdb-web * also shared tests to 0.2 --- kvdb-memorydb/CHANGELOG.md | 3 +++ kvdb-memorydb/Cargo.toml | 10 +++++----- kvdb-rocksdb/CHANGELOG.md | 3 +++ kvdb-rocksdb/Cargo.toml | 10 +++++----- kvdb-shared-tests/Cargo.toml | 4 ++-- kvdb-web/CHANGELOG.md | 3 +++ kvdb-web/Cargo.toml | 10 +++++----- kvdb/CHANGELOG.md | 3 +++ kvdb/Cargo.toml | 4 ++-- parity-util-mem/CHANGELOG.md | 3 +++ parity-util-mem/Cargo.toml | 4 ++-- 11 files changed, 36 insertions(+), 21 deletions(-) diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index 50bfeda77..f2f80a709 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.4.0] - 2019-02-05 +- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) + ## [0.3.1] - 2019-01-06 - Updated features and feature dependencies. [#307](https://github.com/paritytech/parity-common/pull/307) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 2ba57bbc4..954a1e117 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.3.1" +version = "0.4.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -8,9 +8,9 @@ license = "GPL-3.0" edition = "2018" [dependencies] -parity-util-mem = { path = "../parity-util-mem", version = "0.4", default-features = false, features = ["std"] } -parking_lot = "0.9.0" -kvdb = { version = "0.3", path = "../kvdb" } +parity-util-mem = { path = "../parity-util-mem", version = "0.5", default-features = false, features = ["std"] } +parking_lot = "0.10.0" +kvdb = { version = "0.4", path = "../kvdb" } [dev-dependencies] -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.1" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.2" } diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index f7cd5a5e9..10710343c 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.5.0] - 2019-02-05 +- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332 + ## [0.4.2] - 2019-02-04 ### Fixes - Fixed `iter_from_prefix` being slow. [#326](https://github.com/paritytech/parity-common/pull/326) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 797203ffe..2f67b8d99 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.4.2" +version = "0.5.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -15,19 +15,19 @@ harness = false smallvec = "1.0.0" fs-swap = "0.2.4" interleaved-ordered = "0.1.1" -kvdb = { path = "../kvdb", version = "0.3" } +kvdb = { path = "../kvdb", version = "0.4" } log = "0.4.8" num_cpus = "1.10.1" -parking_lot = "0.9.0" +parking_lot = "0.10.0" regex = "1.3.1" rocksdb = { version = "0.13", features = ["snappy"], default-features = false } owning_ref = "0.4.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.4", default-features = false, features = ["std", "smallvec"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.5", default-features = false, features = ["std", "smallvec"] } [dev-dependencies] alloc_counter = "0.0.4" criterion = "0.3" ethereum-types = { path = "../ethereum-types" } -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.1" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.2" } rand = "0.7.2" tempdir = "0.3.7" diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index 1b2158c3b..91259e851 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "kvdb-shared-tests" -version = "0.1.0" +version = "0.2.0" authors = ["Parity Technologies "] edition = "2018" description = "Shared tests for kvdb functionality, to be executed against actual implementations" license = "GPL-3.0" [dependencies] -kvdb = { path = "../kvdb", version = "0.3" } +kvdb = { path = "../kvdb", version = "0.4" } diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index df4565ccf..69d10f44a 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.4.0] - 2019-02-05 +- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) + ## [0.3.1] - 2019-01-06 - Updated features and feature dependencies. [#307](https://github.com/paritytech/parity-common/pull/307) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index ea77e5855..a686f6b26 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-web" -version = "0.3.1" +version = "0.4.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" @@ -11,12 +11,12 @@ edition = "2018" [dependencies] wasm-bindgen = "0.2.54" js-sys = "0.3.31" -kvdb = { version = "0.3", path = "../kvdb" } -kvdb-memorydb = { version = "0.3", path = "../kvdb-memorydb" } +kvdb = { version = "0.4", path = "../kvdb" } +kvdb-memorydb = { version = "0.4", path = "../kvdb-memorydb" } futures = "0.3" log = "0.4.8" send_wrapper = "0.3.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.4", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.5", default-features = false } [dependencies.web-sys] version = "0.3.31" @@ -39,6 +39,6 @@ features = [ [dev-dependencies] console_log = "0.1.2" -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.1" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.2" } wasm-bindgen-test = "0.3.4" wasm-bindgen-futures = "0.4.4" diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 4fcc88e95..e202aaa7a 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.4.0] - 2019-01-06 +- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) + ## [0.3.1] - 2019-01-06 - Updated features and feature dependencies. [#307](https://github.com/paritytech/parity-common/pull/307) diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index c176644ac..e6d738877 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.3.1" +version = "0.4.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" @@ -10,4 +10,4 @@ edition = "2018" [dependencies] smallvec = "1.0.0" bytes = { package = "parity-bytes", version = "0.1", path = "../parity-bytes" } -parity-util-mem = { path = "../parity-util-mem", version = "0.4", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.5", default-features = false } diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 595632d79..b6f2dba48 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.5.0] - 2019-02-05 +- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332 + ## [0.4.2] - 2020-02-04 - Implementation of `MallocSizeOf` for `BTreeSet`. [#325](https://github.com/paritytech/parity-common/pull/325) - Split off implementation of `MallocSizeOf` for `primitive-types`. [#323](https://github.com/paritytech/parity-common/pull/323) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 7cabe525b..02e0e222a 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.4.2" +version = "0.5.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" @@ -25,7 +25,7 @@ impl-trait-for-tuples = "0.1.3" smallvec = { version = "1.0.0", optional = true } ethereum-types = { version = "0.8.0", optional = true, path = "../ethereum-types" } -parking_lot = { version = "0.9.0", optional = true } +parking_lot = { version = "0.10.0", optional = true } primitive-types = { version = "0.6", path = "../primitive-types", default-features = false, optional = true } [target.'cfg(target_os = "windows")'.dependencies] From 2c5273c2e983aa6398cd11d3af65c3d3cf7e6851 Mon Sep 17 00:00:00 2001 From: David Date: Thu, 6 Feb 2020 15:26:54 +0100 Subject: [PATCH 082/359] [parity-crypto] Use upstream secp256k1 (#258) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use upstream secp256k1 – WIP * Use upstream 0.16 * Patch to use upstream @ 0.17 * Refactor the Generate trait * Sort out todos: we do need the context sometimes, we do use both verify and signing * Elaborate on todo * Fix vrs conversion test * Fix more tests * Fix more tests * Use new API to do what we did in C before (`ecdh_hash_function_raw()`) * Add a test for agree() to check we're agreeing with previous impl * Add todos and use new `inv_assign()` * Use wip-branch of parity fork * Sort out a few todos, add some more * Use "thin fork" of upstream secp256k1 * Relax version constraints. Something somewhere in the `eth` dependency graph requires `= 0.8.0` * Remove [patch] * Remove `inv()` from `SecretKey` * Clean up, resolve todos * Resolve todo * Apply stable rustfmt * Relax version requirements (srsly though, wtf?) * More info in the CHANGELOG --- parity-crypto/CHANGELOG.md | 6 ++ parity-crypto/Cargo.toml | 8 +- parity-crypto/src/publickey/ec_math_utils.rs | 42 ++++------ parity-crypto/src/publickey/ecdh.rs | 26 +++++-- .../src/publickey/ecdsa_signature.rs | 53 ++++++------- parity-crypto/src/publickey/ecies.rs | 4 +- parity-crypto/src/publickey/error.rs | 2 +- parity-crypto/src/publickey/extended_keys.rs | 32 ++++---- parity-crypto/src/publickey/keypair.rs | 13 ++-- .../src/publickey/keypair_generator.rs | 22 +----- parity-crypto/src/publickey/mod.rs | 13 ++-- parity-crypto/src/publickey/secret_key.rs | 76 ++++++++----------- 12 files changed, 138 insertions(+), 159 deletions(-) diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index 927c9dc9c..c915ee3c9 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -5,3 +5,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Remove `inv()` from `SecretKey` (breaking) +- `Generate::generate()` does not return error +- `Secp256k1` is no longer exported +- Remove `public_is_valid()` as it is now impossible to create invalid public keys +- 0-valued `Secp::Message`s are disallowed (signatures on them are forgeable for all keys) +- updates to upstream `rust-secp256k1` at v0.17.2 diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index d3e7b1994..5a7ea3b9b 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -16,13 +16,13 @@ required-features = ["publickey"] [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } scrypt = { version = "0.2.0", default-features = false } -parity-secp256k1 = { version = "0.7.0", optional = true } +secp256k1 = { version = "0.17.2", optional = true, features = ["recovery", "rand-std"] } ethereum-types = { version = "0.8.0", optional = true } lazy_static = { version = "1.0", optional = true } ripemd160 = "0.8.0" sha2 = "0.8.0" -digest = "0.8.1" -hmac = "0.7.1" +digest = "0.8" +hmac = "0.7" aes = "0.3.2" aes-ctr = "0.3.0" block-modes = "0.3.3" @@ -40,4 +40,4 @@ hex-literal = "0.2.1" default = [] # public key crypto utils # moved from ethkey module in parity ethereum repository -publickey = ["parity-secp256k1", "lazy_static", "ethereum-types"] +publickey = ["secp256k1", "lazy_static", "ethereum-types"] diff --git a/parity-crypto/src/publickey/ec_math_utils.rs b/parity-crypto/src/publickey/ec_math_utils.rs index 1aa55db7e..bd8653b79 100644 --- a/parity-crypto/src/publickey/ec_math_utils.rs +++ b/parity-crypto/src/publickey/ec_math_utils.rs @@ -37,25 +37,20 @@ lazy_static! { pub static ref CURVE_ORDER: U256 = H256::from_slice(&SECP256K1_CURVE_ORDER).into_uint(); } -/// Whether the public key is valid. -pub fn public_is_valid(public: &Public) -> bool { - to_secp256k1_public(public).ok().map_or(false, |p| p.is_valid()) -} - /// In-place multiply public key by secret key (EC point * scalar) pub fn public_mul_secret(public: &mut Public, secret: &Secret) -> Result<(), Error> { let key_secret = secret.to_secp256k1_secret()?; let mut key_public = to_secp256k1_public(public)?; - key_public.mul_assign(&SECP256K1, &key_secret)?; + key_public.mul_assign(&SECP256K1, &key_secret[..])?; set_public(public, &key_public); Ok(()) } /// In-place add one public key to another (EC point + EC point) pub fn public_add(public: &mut Public, other: &Public) -> Result<(), Error> { - let mut key_public = to_secp256k1_public(public)?; + let key_public = to_secp256k1_public(public)?; let other_public = to_secp256k1_public(other)?; - key_public.add_assign(&SECP256K1, &other_public)?; + let key_public = key_public.combine(&other_public)?; set_public(public, &key_public); Ok(()) } @@ -63,10 +58,10 @@ pub fn public_add(public: &mut Public, other: &Public) -> Result<(), Error> { /// In-place sub one public key from another (EC point - EC point) pub fn public_sub(public: &mut Public, other: &Public) -> Result<(), Error> { let mut key_neg_other = to_secp256k1_public(other)?; - key_neg_other.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + key_neg_other.mul_assign(&SECP256K1, super::MINUS_ONE_KEY)?; let mut key_public = to_secp256k1_public(public)?; - key_public.add_assign(&SECP256K1, &key_neg_other)?; + key_public = key_public.combine(&key_neg_other)?; set_public(public, &key_public); Ok(()) } @@ -74,15 +69,14 @@ pub fn public_sub(public: &mut Public, other: &Public) -> Result<(), Error> { /// Replace a public key with its additive inverse (EC point = - EC point) pub fn public_negate(public: &mut Public) -> Result<(), Error> { let mut key_public = to_secp256k1_public(public)?; - key_public.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + key_public.mul_assign(&SECP256K1, super::MINUS_ONE_KEY)?; set_public(public, &key_public); Ok(()) } /// Return the generation point (aka base point) of secp256k1 pub fn generation_point() -> Public { - let public_key = - key::PublicKey::from_slice(&SECP256K1, &BASE_POINT_BYTES).expect("constructed using constants; qed"); + let public_key = key::PublicKey::from_slice(&BASE_POINT_BYTES).expect("constructed using constants; qed"); let mut public = Public::default(); set_public(&mut public, &public_key); public @@ -95,24 +89,24 @@ fn to_secp256k1_public(public: &Public) -> Result { temp }; - Ok(key::PublicKey::from_slice(&SECP256K1, &public_data)?) + Ok(key::PublicKey::from_slice(&public_data)?) } fn set_public(public: &mut Public, key_public: &key::PublicKey) { - let key_public_serialized = key_public.serialize_vec(&SECP256K1, false); + let key_public_serialized = key_public.serialize_uncompressed(); public.as_bytes_mut().copy_from_slice(&key_public_serialized[1..65]); } #[cfg(test)] mod tests { use super::super::{Generator, Random, Secret}; - use super::{generation_point, public_add, public_is_valid, public_mul_secret, public_negate, public_sub}; + use super::{generation_point, public_add, public_mul_secret, public_negate, public_sub}; use std::str::FromStr; #[test] fn public_addition_is_commutative() { - let public1 = Random.generate().unwrap().public().clone(); - let public2 = Random.generate().unwrap().public().clone(); + let public1 = Random.generate().public().clone(); + let public2 = Random.generate().public().clone(); let mut left = public1.clone(); public_add(&mut left, &public2).unwrap(); @@ -125,8 +119,8 @@ mod tests { #[test] fn public_addition_is_reversible_with_subtraction() { - let public1 = Random.generate().unwrap().public().clone(); - let public2 = Random.generate().unwrap().public().clone(); + let public1 = Random.generate().public().clone(); + let public2 = Random.generate().public().clone(); let mut sum = public1.clone(); public_add(&mut sum, &public2).unwrap(); @@ -137,7 +131,7 @@ mod tests { #[test] fn public_negation_is_involutory() { - let public = Random.generate().unwrap().public().clone(); + let public = Random.generate().public().clone(); let mut negation = public.clone(); public_negate(&mut negation).unwrap(); public_negate(&mut negation).unwrap(); @@ -145,12 +139,6 @@ mod tests { assert_eq!(negation, public); } - #[test] - fn known_public_is_valid() { - let public = Random.generate().unwrap().public().clone(); - assert!(public_is_valid(&public)); - } - #[test] fn generation_point_expected() { let point = generation_point(); diff --git a/parity-crypto/src/publickey/ecdh.rs b/parity-crypto/src/publickey/ecdh.rs index ab22c2a09..8cdaf793a 100644 --- a/parity-crypto/src/publickey/ecdh.rs +++ b/parity-crypto/src/publickey/ecdh.rs @@ -16,21 +16,37 @@ //! ECDH key agreement scheme implemented as a free function. -use super::{Error, Public, Secret, SECP256K1}; +use super::{Error, Public, Secret}; use secp256k1::{self, ecdh, key}; /// Agree on a shared secret pub fn agree(secret: &Secret, public: &Public) -> Result { - let context = &SECP256K1; let pdata = { let mut temp = [4u8; 65]; (&mut temp[1..65]).copy_from_slice(&public[0..64]); temp }; - let publ = key::PublicKey::from_slice(context, &pdata)?; - let sec = key::SecretKey::from_slice(context, secret.as_bytes())?; - let shared = ecdh::SharedSecret::new_raw(context, &publ, &sec); + let publ = key::PublicKey::from_slice(&pdata)?; + let sec = key::SecretKey::from_slice(secret.as_bytes())?; + let shared = ecdh::SharedSecret::new_with_hash(&publ, &sec, |x, _| x.into())?; Secret::import_key(&shared[0..32]).map_err(|_| Error::Secp(secp256k1::Error::InvalidSecretKey)) } + +#[cfg(test)] +mod tests { + use super::{agree, Public, Secret}; + use std::str::FromStr; + + #[test] + fn test_agree() { + // Just some random values for secret/public to check we agree with previous implementation. + let secret = Secret::from_str("01a400760945613ff6a46383b250bf27493bfe679f05274916182776f09b28f1").unwrap(); + let public= Public::from_str("e37f3cbb0d0601dc930b8d8aa56910dd5629f2a0979cc742418960573efc5c0ff96bc87f104337d8c6ab37e597d4f9ffbd57302bc98a825519f691b378ce13f5").unwrap(); + let shared = agree(&secret, &public); + + assert!(shared.is_ok()); + assert_eq!(shared.unwrap().to_hex(), "28ab6fad6afd854ff27162e0006c3f6bd2daafc0816c85b5dfb05dbb865fa6ac",); + } +} diff --git a/parity-crypto/src/publickey/ecdsa_signature.rs b/parity-crypto/src/publickey/ecdsa_signature.rs index 6801adf10..ae245cffb 100644 --- a/parity-crypto/src/publickey/ecdsa_signature.rs +++ b/parity-crypto/src/publickey/ecdsa_signature.rs @@ -20,7 +20,10 @@ use super::{public_to_address, Address, Error, Message, Public, Secret, SECP256K use ethereum_types::{H256, H520}; use rustc_hex::{FromHex, ToHex}; use secp256k1::key::{PublicKey, SecretKey}; -use secp256k1::{Error as SecpError, Message as SecpMessage, RecoverableSignature, RecoveryId}; +use secp256k1::{ + recovery::{RecoverableSignature, RecoveryId}, + Error as SecpError, Message as SecpMessage, +}; use std::cmp::PartialEq; use std::fmt; use std::hash::{Hash, Hasher}; @@ -208,12 +211,12 @@ impl DerefMut for Signature { } /// Signs message with the given secret key. -/// Returns the corresponding signature +/// Returns the corresponding signature. pub fn sign(secret: &Secret, message: &Message) -> Result { let context = &SECP256K1; - let sec = SecretKey::from_slice(context, secret.as_ref())?; - let s = context.sign_recoverable(&SecpMessage::from_slice(&message[..])?, &sec)?; - let (rec_id, data) = s.serialize_compact(context); + let sec = SecretKey::from_slice(secret.as_ref())?; + let s = context.sign_recoverable(&SecpMessage::from_slice(&message[..])?, &sec); + let (rec_id, data) = s.serialize_compact(); let mut data_arr = [0; 65]; // no need to check if s is low, it always is @@ -225,9 +228,8 @@ pub fn sign(secret: &Secret, message: &Message) -> Result { /// Performs verification of the signature for the given message with corresponding public key pub fn verify_public(public: &Public, signature: &Signature, message: &Message) -> Result { let context = &SECP256K1; - let rsig = - RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; - let sig = rsig.to_standard(context); + let rsig = RecoverableSignature::from_compact(&signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; + let sig = rsig.to_standard(); let pdata: [u8; 65] = { let mut temp = [4u8; 65]; @@ -235,7 +237,7 @@ pub fn verify_public(public: &Public, signature: &Signature, message: &Message) temp }; - let publ = PublicKey::from_slice(context, &pdata)?; + let publ = PublicKey::from_slice(&pdata)?; match context.verify(&SecpMessage::from_slice(&message[..])?, &sig, &publ) { Ok(_) => Ok(true), Err(SecpError::IncorrectSignature) => Ok(false), @@ -253,10 +255,9 @@ pub fn verify_address(address: &Address, signature: &Signature, message: &Messag /// Recovers the public key from the signature for the message pub fn recover(signature: &Signature, message: &Message) -> Result { let context = &SECP256K1; - let rsig = - RecoverableSignature::from_compact(context, &signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; + let rsig = RecoverableSignature::from_compact(&signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; let pubkey = context.recover(&SecpMessage::from_slice(&message[..])?, &rsig)?; - let serialized = pubkey.serialize_vec(context, false); + let serialized = pubkey.serialize_uncompressed(); let mut public = Public::default(); public.as_bytes_mut().copy_from_slice(&serialized[1..65]); @@ -272,9 +273,9 @@ mod tests { #[test] fn vrs_conversion() { // given - let keypair = Random.generate().unwrap(); - let message = Message::default(); - let signature = sign(keypair.secret(), &message).unwrap(); + let keypair = Random.generate(); + let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); + let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message"); // when let vrs = signature.clone().into_electrum(); @@ -286,9 +287,9 @@ mod tests { #[test] fn signature_to_and_from_str() { - let keypair = Random.generate().unwrap(); - let message = Message::default(); - let signature = sign(keypair.secret(), &message).unwrap(); + let keypair = Random.generate(); + let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); + let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message"); let string = format!("{}", signature); let deserialized = Signature::from_str(&string).unwrap(); assert_eq!(signature, deserialized); @@ -296,25 +297,25 @@ mod tests { #[test] fn sign_and_recover_public() { - let keypair = Random.generate().unwrap(); - let message = Message::default(); + let keypair = Random.generate(); + let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); let signature = sign(keypair.secret(), &message).unwrap(); assert_eq!(keypair.public(), &recover(&signature, &message).unwrap()); } #[test] fn sign_and_verify_public() { - let keypair = Random.generate().unwrap(); - let message = Message::default(); - let signature = sign(keypair.secret(), &message).unwrap(); + let keypair = Random.generate(); + let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); + let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message"); assert!(verify_public(keypair.public(), &signature, &message).unwrap()); } #[test] fn sign_and_verify_address() { - let keypair = Random.generate().unwrap(); - let message = Message::default(); - let signature = sign(keypair.secret(), &message).unwrap(); + let keypair = Random.generate(); + let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); + let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message"); assert!(verify_address(&keypair.address(), &signature, &message).unwrap()); } } diff --git a/parity-crypto/src/publickey/ecies.rs b/parity-crypto/src/publickey/ecies.rs index 7b963c138..d7c1354f3 100644 --- a/parity-crypto/src/publickey/ecies.rs +++ b/parity-crypto/src/publickey/ecies.rs @@ -27,7 +27,7 @@ const ENC_VERSION: u8 = 0x04; /// /// Authenticated data may be empty. pub fn encrypt(public: &Public, auth_data: &[u8], plain: &[u8]) -> Result, Error> { - let r = Random.generate()?; + let r = Random.generate(); let z = ecdh::agree(r.secret(), public)?; let mut key = [0u8; 32]; kdf(&z, &[0u8; 0], &mut key); @@ -122,7 +122,7 @@ mod tests { #[test] fn ecies_shared() { - let kp = Random.generate().unwrap(); + let kp = Random.generate(); let message = b"So many books, so little time"; let shared = b"shared"; diff --git a/parity-crypto/src/publickey/error.rs b/parity-crypto/src/publickey/error.rs index 322be394b..7f9bfb8e0 100644 --- a/parity-crypto/src/publickey/error.rs +++ b/parity-crypto/src/publickey/error.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -//! Module specific errors +//! Module specific errors. use crate::error::SymmError; use std::{error::Error as StdError, fmt, result}; diff --git a/parity-crypto/src/publickey/extended_keys.rs b/parity-crypto/src/publickey/extended_keys.rs index 40b33640e..44307fdf2 100644 --- a/parity-crypto/src/publickey/extended_keys.rs +++ b/parity-crypto/src/publickey/extended_keys.rs @@ -84,7 +84,7 @@ pub struct ExtendedSecret { impl ExtendedSecret { /// New extended key from given secret and chain code. pub fn with_code(secret: Secret, chain_code: H256) -> ExtendedSecret { - ExtendedSecret { secret: secret, chain_code: chain_code } + ExtendedSecret { secret, chain_code } } /// New extended key from given secret with the random chain code. @@ -93,7 +93,7 @@ impl ExtendedSecret { } /// New extended key from given secret. - /// Chain code will be derived from the secret itself (in a deterministic way). + /// Chain code will be derived from the secret itself (deterministically). pub fn new(secret: Secret) -> ExtendedSecret { let chain_code = derivation::chain_code(*secret); ExtendedSecret::with_code(secret, chain_code) @@ -263,10 +263,9 @@ mod derivation { let mut data = vec![0u8; 33 + T::len()]; let sec_private = - SecretKey::from_slice(&SECP256K1, private_key.as_bytes()).expect("Caller should provide valid private key"); - let sec_public = - PublicKey::from_secret_key(&SECP256K1, &sec_private).expect("Caller should provide valid private key"); - let public_serialized = sec_public.serialize_vec(&SECP256K1, true); + SecretKey::from_slice(private_key.as_bytes()).expect("Caller should provide valid private key"); + let sec_public = PublicKey::from_secret_key(&SECP256K1, &sec_private); + let public_serialized = sec_public.serialize(); // curve point (compressed public key) -- index // 0.33 -- 33..end @@ -319,8 +318,8 @@ mod derivation { let mut public_sec_raw = [0u8; 65]; public_sec_raw[0] = 4; public_sec_raw[1..65].copy_from_slice(public_key.as_bytes()); - let public_sec = PublicKey::from_slice(&SECP256K1, &public_sec_raw).map_err(|_| Error::InvalidPoint)?; - let public_serialized = public_sec.serialize_vec(&SECP256K1, true); + let public_sec = PublicKey::from_slice(&public_sec_raw).map_err(|_| Error::InvalidPoint)?; + let public_serialized = public_sec.serialize(); let mut data = vec![0u8; 33 + T::len()]; // curve point (compressed public key) -- index @@ -339,16 +338,15 @@ mod derivation { if *CURVE_ORDER <= new_private.into_uint() { return Err(Error::MissingIndex); } - let new_private_sec = SecretKey::from_slice(&SECP256K1, new_private.as_bytes()).expect( + let new_private_sec = SecretKey::from_slice(new_private.as_bytes()).expect( "Private key belongs to the field [0..CURVE_ORDER) (checked above); So initializing can never fail; qed", ); - let mut new_public = PublicKey::from_secret_key(&SECP256K1, &new_private_sec) - .expect("Valid private key produces valid public key"); + let mut new_public = PublicKey::from_secret_key(&SECP256K1, &new_private_sec); // Adding two points on the elliptic curves (combining two public keys) - new_public.add_assign(&SECP256K1, &public_sec).expect("Addition of two valid points produce valid point"); + new_public = new_public.combine(&public_sec).expect("Addition of two valid points produce valid point"); - let serialized = new_public.serialize_vec(&SECP256K1, false); + let serialized = new_public.serialize_uncompressed(); Ok((H512::from_slice(&serialized[1..65]), new_chain_code)) } @@ -367,9 +365,9 @@ mod derivation { } pub fn point(secret: H256) -> Result { - let sec = SecretKey::from_slice(&SECP256K1, secret.as_bytes()).map_err(|_| Error::InvalidPoint)?; - let public_sec = PublicKey::from_secret_key(&SECP256K1, &sec).map_err(|_| Error::InvalidPoint)?; - let serialized = public_sec.serialize_vec(&SECP256K1, false); + let sec = SecretKey::from_slice(secret.as_bytes()).map_err(|_| Error::InvalidPoint)?; + let public_sec = PublicKey::from_secret_key(&SECP256K1, &sec); + let serialized = public_sec.serialize_uncompressed(); Ok(H512::from_slice(&serialized[1..65])) } @@ -490,7 +488,7 @@ mod tests { } #[test] - fn match_() { + fn test_key_derivation() { let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::from_low_u64_be(1)); let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); diff --git a/parity-crypto/src/publickey/keypair.rs b/parity-crypto/src/publickey/keypair.rs index a9dc05cab..d4fa13b39 100644 --- a/parity-crypto/src/publickey/keypair.rs +++ b/parity-crypto/src/publickey/keypair.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -//! Key pair (public + secrect) description +//! Key pair (public + secret) description. use super::{Address, Error, Public, Secret, SECP256K1}; use crate::Keccak256; @@ -48,14 +48,14 @@ impl KeyPair { /// Create a pair from secret key pub fn from_secret(secret: Secret) -> Result { let context = &SECP256K1; - let s: key::SecretKey = key::SecretKey::from_slice(context, &secret[..])?; - let pub_key = key::PublicKey::from_secret_key(context, &s)?; - let serialized = pub_key.serialize_vec(context, false); + let s: key::SecretKey = key::SecretKey::from_slice(&secret[..])?; + let pub_key = key::PublicKey::from_secret_key(context, &s); + let serialized = pub_key.serialize_uncompressed(); let mut public = Public::default(); public.as_bytes_mut().copy_from_slice(&serialized[1..65]); - let keypair = KeyPair { secret: secret, public: public }; + let keypair = KeyPair { secret, public }; Ok(keypair) } @@ -67,8 +67,7 @@ impl KeyPair { /// Copies a pair from another one pub fn from_keypair(sec: key::SecretKey, publ: key::PublicKey) -> Self { - let context = &SECP256K1; - let serialized = publ.serialize_vec(context, false); + let serialized = publ.serialize_uncompressed(); let secret = Secret::from(sec); let mut public = Public::default(); public.as_bytes_mut().copy_from_slice(&serialized[1..65]); diff --git a/parity-crypto/src/publickey/keypair_generator.rs b/parity-crypto/src/publickey/keypair_generator.rs index 2ae91db6e..3afd86a97 100644 --- a/parity-crypto/src/publickey/keypair_generator.rs +++ b/parity-crypto/src/publickey/keypair_generator.rs @@ -17,29 +17,13 @@ //! Random key pair generator. Relies on the secp256k1 C-library to generate random data. use super::{Generator, KeyPair, SECP256K1}; -use rand::rngs::OsRng; -use std::convert::Infallible; /// Randomly generates new keypair, instantiating the RNG each time. pub struct Random; impl Generator for Random { - type Error = std::io::Error; - - fn generate(&mut self) -> Result { - match OsRng.generate() { - Ok(pair) => Ok(pair), - Err(void) => match void {}, // LLVM unreachable - } - } -} - -impl Generator for OsRng { - type Error = Infallible; - - fn generate(&mut self) -> Result { - let (sec, publ) = SECP256K1.generate_keypair(self).expect("context always created with full capabilities; qed"); - - Ok(KeyPair::from_keypair(sec, publ)) + fn generate(&mut self) -> KeyPair { + let (sec, publ) = SECP256K1.generate_keypair(&mut secp256k1::rand::thread_rng()); + KeyPair::from_keypair(sec, publ) } } diff --git a/parity-crypto/src/publickey/mod.rs b/parity-crypto/src/publickey/mod.rs index 12b07d176..8d487b5ad 100644 --- a/parity-crypto/src/publickey/mod.rs +++ b/parity-crypto/src/publickey/mod.rs @@ -28,7 +28,6 @@ pub mod ecdh; pub mod ecies; pub mod error; -pub use self::ec_math_utils::public_is_valid; pub use self::ecdsa_signature::{recover, sign, verify_address, verify_public, Signature}; pub use self::error::Error; pub use self::extended_keys::{Derivation, DerivationError, ExtendedKeyPair, ExtendedPublic, ExtendedSecret}; @@ -42,14 +41,18 @@ use lazy_static::lazy_static; pub use ethereum_types::{Address, Public}; pub type Message = H256; +/// The number -1 encoded as a secret key +const MINUS_ONE_KEY: &'static [u8] = &[ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, + 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40, +]; + lazy_static! { - pub static ref SECP256K1: secp256k1::Secp256k1 = secp256k1::Secp256k1::new(); + static ref SECP256K1: secp256k1::Secp256k1 = secp256k1::Secp256k1::new(); } /// Generates new keypair. pub trait Generator { - type Error; - /// Should be called to generate new keypair. - fn generate(&mut self) -> Result; + fn generate(&mut self) -> KeyPair; } diff --git a/parity-crypto/src/publickey/secret_key.rs b/parity-crypto/src/publickey/secret_key.rs index e721ec66b..68522ca29 100644 --- a/parity-crypto/src/publickey/secret_key.rs +++ b/parity-crypto/src/publickey/secret_key.rs @@ -14,18 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -//! Secret key implementation +//! Secret key implementation. -use super::{Error, SECP256K1}; -use ethereum_types::H256; -use secp256k1::constants::SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE; -use secp256k1::key; use std::convert::TryFrom; use std::fmt; use std::ops::Deref; use std::str::FromStr; + +use ethereum_types::H256; +use secp256k1::constants::SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE; +use secp256k1::key; use zeroize::Zeroize; +use crate::publickey::Error; + /// Represents secret key #[derive(Clone, PartialEq, Eq)] pub struct Secret { @@ -74,7 +76,7 @@ impl Secret { /// Imports and validates the key. pub fn import_key(key: &[u8]) -> Result { - let secret = key::SecretKey::from_slice(&super::SECP256K1, key)?; + let secret = key::SecretKey::from_slice(key)?; Ok(secret.into()) } @@ -99,7 +101,7 @@ impl Secret { (false, false) => { let mut key_secret = self.to_secp256k1_secret()?; let other_secret = other.to_secp256k1_secret()?; - key_secret.add_assign(&SECP256K1, &other_secret)?; + key_secret.add_assign(&other_secret[..])?; *self = key_secret.into(); Ok(()) @@ -118,8 +120,8 @@ impl Secret { (false, false) => { let mut key_secret = self.to_secp256k1_secret()?; let mut other_secret = other.to_secp256k1_secret()?; - other_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; - key_secret.add_assign(&SECP256K1, &other_secret)?; + other_secret.mul_assign(super::MINUS_ONE_KEY)?; + key_secret.add_assign(&other_secret[..])?; *self = key_secret.into(); Ok(()) @@ -131,12 +133,13 @@ impl Secret { pub fn dec(&mut self) -> Result<(), Error> { match self.is_zero() { true => { - *self = key::MINUS_ONE_KEY.into(); + *self = Secret::try_from(super::MINUS_ONE_KEY) + .expect("Constructing a secret key from a known-good constant works; qed."); Ok(()) } false => { let mut key_secret = self.to_secp256k1_secret()?; - key_secret.add_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + key_secret.add_assign(super::MINUS_ONE_KEY)?; *self = key_secret.into(); Ok(()) @@ -155,7 +158,7 @@ impl Secret { (false, false) => { let mut key_secret = self.to_secp256k1_secret()?; let other_secret = other.to_secp256k1_secret()?; - key_secret.mul_assign(&SECP256K1, &other_secret)?; + key_secret.mul_assign(&other_secret[..])?; *self = key_secret.into(); Ok(()) @@ -169,7 +172,7 @@ impl Secret { true => Ok(()), false => { let mut key_secret = self.to_secp256k1_secret()?; - key_secret.mul_assign(&SECP256K1, &key::MINUS_ONE_KEY)?; + key_secret.mul_assign(super::MINUS_ONE_KEY)?; *self = key_secret.into(); Ok(()) @@ -177,15 +180,6 @@ impl Secret { } } - /// Inplace inverse secret key (1 / scalar) - pub fn inv(&mut self) -> Result<(), Error> { - let mut key_secret = self.to_secp256k1_secret()?; - key_secret.inv_assign(&SECP256K1)?; - - *self = key_secret.into(); - Ok(()) - } - /// Compute power of secret key inplace (secret ^ pow). pub fn pow(&mut self, pow: usize) -> Result<(), Error> { if self.is_zero() { @@ -206,9 +200,9 @@ impl Secret { Ok(()) } - /// Create `secp256k1::key::SecretKey` based on this secret + /// Create a `secp256k1::key::SecretKey` based on this secret. pub fn to_secp256k1_secret(&self) -> Result { - Ok(key::SecretKey::from_slice(&SECP256K1, &self[..])?) + key::SecretKey::from_slice(&self[..]).map_err(Into::into) } } @@ -239,6 +233,17 @@ impl TryFrom<&str> for Secret { } } +impl TryFrom<&[u8]> for Secret { + type Error = Error; + + fn try_from(b: &[u8]) -> Result { + if b.len() != SECP256K1_SECRET_KEY_SIZE { + return Err(Error::InvalidSecretKey); + } + Ok(Self { inner: H256::from_slice(b) }) + } +} + impl From for Secret { fn from(key: key::SecretKey) -> Self { let mut a = [0; SECP256K1_SECRET_KEY_SIZE]; @@ -261,30 +266,9 @@ mod tests { use super::Secret; use std::str::FromStr; - #[test] - fn multiplicating_secret_inversion_with_secret_gives_one() { - let secret = Random.generate().unwrap().secret().clone(); - let mut inversion = secret.clone(); - inversion.inv().unwrap(); - inversion.mul(&secret).unwrap(); - assert_eq!( - inversion, - Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap() - ); - } - - #[test] - fn secret_inversion_is_reversible_with_inversion() { - let secret = Random.generate().unwrap().secret().clone(); - let mut inversion = secret.clone(); - inversion.inv().unwrap(); - inversion.inv().unwrap(); - assert_eq!(inversion, secret); - } - #[test] fn secret_pow() { - let secret = Random.generate().unwrap().secret().clone(); + let secret = Random.generate().secret().clone(); let mut pow0 = secret.clone(); pow0.pow(0).unwrap(); From 11ad766065b8fb61e73233447f2b9a101d7ffff7 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 6 Feb 2020 22:10:40 +0100 Subject: [PATCH 083/359] Add different mode for malloc_size_of_is_0 macro dealing with generics (#334) * add another mode for malloc_size_of_is_0 macro dealing with generics * identations * add version bump and log * add doc comments * fix version --- parity-util-mem/CHANGELOG.md | 5 +- parity-util-mem/Cargo.toml | 2 +- parity-util-mem/src/malloc_size.rs | 92 +++++++++++++++++++++++------- 3 files changed, 77 insertions(+), 22 deletions(-) diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index b6f2dba48..855ea42fe 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -6,8 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.5.1] - 2019-02-05 +- Add different mode for malloc_size_of_is_0 macro dealing with generics #334. [#332](https://github.com/paritytech/parity-common/pull/334) + ## [0.5.0] - 2019-02-05 -- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332 +- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) ## [0.4.2] - 2020-02-04 - Implementation of `MallocSizeOf` for `BTreeSet`. [#325](https://github.com/paritytech/parity-common/pull/325) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 02e0e222a..d9024ea1c 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.5.0" +version = "0.5.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index 864c94abd..fccdb9bcd 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -564,28 +564,68 @@ impl MallocSizeOf for parking_lot::RwLock { } } +/// Implement notion of 0 allocation size for some type(s). +/// +/// if used for generics, by default it will require that generaic arguments +/// should implement `MallocSizeOf`. This can be avoided with passing "any: " +/// in front of type list. +/// +/// ```rust +/// use parity_util_mem::{malloc_size, malloc_size_of_is_0}; +/// +/// struct Data

{ +/// phantom: std::marker::PhantomData

, +/// } +/// +/// malloc_size_of_is_0!(any: Data

); +/// +/// // MallocSizeOf is NOT implemented for [u8; 333] +/// assert_eq!(malloc_size(&Data::<[u8; 333]> { phantom: std::marker::PhantomData }), 0); +/// ``` +/// +/// and when no "any: " +/// +/// use parity_util_mem::{malloc_size, malloc_size_of_is_0}; +/// +/// struct Data { pub T } +/// +/// // generic argument (`T`) must be `impl MallocSizeOf` +/// malloc_size_of_is_0!(Data); +/// +/// assert_eq!(malloc_size(&Data(0u8), 0); +/// ``` #[macro_export] macro_rules! malloc_size_of_is_0( - ($($ty:ty),+) => ( - $( - impl $crate::MallocSizeOf for $ty { - #[inline(always)] - fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { - 0 - } - } - )+ - ); - ($($ty:ident<$($gen:ident),+>),+) => ( - $( - impl<$($gen: $crate::MallocSizeOf),+> $crate::MallocSizeOf for $ty<$($gen),+> { - #[inline(always)] - fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { - 0 - } - } - )+ - ); + ($($ty:ty),+) => ( + $( + impl $crate::MallocSizeOf for $ty { + #[inline(always)] + fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { + 0 + } + } + )+ + ); + (any: $($ty:ident<$($gen:ident),+>),+) => ( + $( + impl<$($gen),+> $crate::MallocSizeOf for $ty<$($gen),+> { + #[inline(always)] + fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { + 0 + } + } + )+ + ); + ($($ty:ident<$($gen:ident),+>),+) => ( + $( + impl<$($gen: $crate::MallocSizeOf),+> $crate::MallocSizeOf for $ty<$($gen),+> { + #[inline(always)] + fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { + 0 + } + } + )+ + ); ); malloc_size_of_is_0!(bool, char, str); @@ -763,4 +803,16 @@ mod tests { // ~36 per value assert!(crate::malloc_size(&set) > 3000); } + + #[test] + fn special_malloc_size_of_0() { + struct Data

{ + phantom: std::marker::PhantomData

, + } + + malloc_size_of_is_0!(any: Data

); + + // MallocSizeOf is not implemented for [u8; 333] + assert_eq!(crate::malloc_size(&Data::<[u8; 333]> { phantom: std::marker::PhantomData }), 0); + } } From 4c087c0e926a5bdfc495097aa6aa1edc7bee35e4 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 7 Feb 2020 19:50:12 +0300 Subject: [PATCH 084/359] Update doc comment (#335) * Update malloc_size.rs * Update parity-util-mem/src/malloc_size.rs Co-Authored-By: Andronik Ordian * fix doc test Co-authored-by: Andronik Ordian --- parity-util-mem/src/malloc_size.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index fccdb9bcd..c68f8f660 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -585,14 +585,15 @@ impl MallocSizeOf for parking_lot::RwLock { /// /// and when no "any: " /// +/// ```rust /// use parity_util_mem::{malloc_size, malloc_size_of_is_0}; /// -/// struct Data { pub T } +/// struct Data(pub T); /// /// // generic argument (`T`) must be `impl MallocSizeOf` /// malloc_size_of_is_0!(Data); /// -/// assert_eq!(malloc_size(&Data(0u8), 0); +/// assert_eq!(malloc_size(&Data(0u8)), 0); /// ``` #[macro_export] macro_rules! malloc_size_of_is_0( From 96c29eb185d9aa33ec0ce296192dedcadf7bdb67 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Sat, 8 Feb 2020 19:41:43 +0100 Subject: [PATCH 085/359] [parity crypto]: remove unused depend `rustc_hex` (#337) --- parity-crypto/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 5a7ea3b9b..291d29371 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -30,7 +30,6 @@ pbkdf2 = "0.3.0" subtle = "2.2.1" zeroize = { version = "1.0.0", default-features = false } rand = "0.7.2" -rustc-hex = "2.0" [dev-dependencies] criterion = "0.3.0" From fb349559e00fb505f7fcc0c4748ffd553102eb25 Mon Sep 17 00:00:00 2001 From: David Date: Sat, 8 Feb 2020 21:24:22 +0100 Subject: [PATCH 086/359] [parity-crypto] prepare 0.5.0 (#336) * [parity-crypto] prepare 0.5.0 * Update parity-crypto/CHANGELOG.md Co-Authored-By: Andronik Ordian * Include https://github.com/paritytech/parity-common/pull/337 * Sort out rustc-hex * Make sure we test parity-crypto with the publockey feature enabled Co-authored-by: Andronik Ordian --- .travis.yml | 3 ++- parity-crypto/CHANGELOG.md | 15 +++++++++------ parity-crypto/Cargo.toml | 5 +++-- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index a8741dccc..bea20cf75 100644 --- a/.travis.yml +++ b/.travis.yml @@ -38,7 +38,7 @@ script: - cargo check --all --tests - cargo check --all --benches - cargo build --all - - cargo test --all --exclude uint --exclude fixed-hash + - cargo test --all --exclude uint --exclude fixed-hash --exclude parity-crypto - if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cd contract-address/ && cargo test --features=external_doc && cd ..; fi @@ -48,6 +48,7 @@ script: - cd keccak-hash/ && cargo test --no-default-features && cd .. - cd plain_hasher/ && cargo test --no-default-features && cargo check --benches && cd .. - cd parity-bytes/ && cargo test --no-default-features && cd .. + - cd parity-crypto/ && cargo test --all-features && cd .. - cd parity-util-mem/ && cargo test --features=estimate-heapsize && cd .. - cd parity-util-mem/ && cargo test --features=jemalloc-global && cd .. - cd parity-util-mem/ && cargo test --features=mimalloc-global && cd .. diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index c915ee3c9..591ee42d3 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -5,9 +5,12 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -- Remove `inv()` from `SecretKey` (breaking) -- `Generate::generate()` does not return error -- `Secp256k1` is no longer exported -- Remove `public_is_valid()` as it is now impossible to create invalid public keys -- 0-valued `Secp::Message`s are disallowed (signatures on them are forgeable for all keys) -- updates to upstream `rust-secp256k1` at v0.17.2 + +## [0.5.0] - 2020-02-08 +- Remove `inv()` from `SecretKey` (breaking) (https://github.com/paritytech/parity-common/pull/258) +- `Generate::generate()` does not return error (https://github.com/paritytech/parity-common/pull/258) +- `Secp256k1` is no longer exported (https://github.com/paritytech/parity-common/pull/258) +- Remove `public_is_valid()` as it is now impossible to create invalid public keys (https://github.com/paritytech/parity-common/pull/258) +- 0-valued `Secp::Message`s are disallowed (signatures on them are forgeable for all keys) (https://github.com/paritytech/parity-common/pull/258) +- Switch to upstream `rust-secp256k1` at v0.17.2 (https://github.com/paritytech/parity-common/pull/258) +- make `rustc_hex` dependency optional (https://github.com/paritytech/parity-common/pull/337) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 291d29371..aa5d7b654 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-crypto" -version = "0.4.2" +version = "0.5.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Crypto utils used by ethstore and network." @@ -30,6 +30,7 @@ pbkdf2 = "0.3.0" subtle = "2.2.1" zeroize = { version = "1.0.0", default-features = false } rand = "0.7.2" +rustc-hex = { version = "2.1.0", default-features = false, optional = true } [dev-dependencies] criterion = "0.3.0" @@ -39,4 +40,4 @@ hex-literal = "0.2.1" default = [] # public key crypto utils # moved from ethkey module in parity ethereum repository -publickey = ["secp256k1", "lazy_static", "ethereum-types"] +publickey = ["secp256k1", "lazy_static", "ethereum-types", "rustc-hex"] From 742827541564cd6a58bc036d7ba9cac560038c0c Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 10 Feb 2020 19:41:23 +0300 Subject: [PATCH 087/359] Disable cache if explicit memory budget=0 passed (#339) * Disable cache if exlipcit memory budget=0 passed * don't warn on block cache property anymore * remove onliner --- kvdb-rocksdb/src/lib.rs | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 40ee0f595..681883c01 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -236,7 +236,11 @@ impl MallocSizeOf for DBAndColumns { fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { let mut total = self.column_names.size_of(ops) // we have at least one column always, so we can call property on it - + self.static_property_or_warn(0, "rocksdb.block-cache-usage"); + + self.db + .property_int_value_cf(self.cf(0), "rocksdb.block-cache-usage") + .unwrap_or(Some(0)) + .map(|x| x as usize) + .unwrap_or(0); for v in 0..self.column_names.len() { total += self.static_property_or_warn(v, "rocksdb.estimate-table-readers-mem"); @@ -325,12 +329,16 @@ fn generate_block_based_options(config: &DatabaseConfig) -> BlockBasedOptions { // Set cache size as recommended by // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#block-cache-size let cache_size = config.memory_budget() / 3; - block_opts.set_lru_cache(cache_size); - // "index and filter blocks will be stored in block cache, together with all other data blocks." - // See: https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#indexes-and-filter-blocks - block_opts.set_cache_index_and_filter_blocks(true); - // Don't evict L0 filter/index blocks from the cache - block_opts.set_pin_l0_filter_and_index_blocks_in_cache(true); + if cache_size == 0 { + block_opts.disable_cache() + } else { + block_opts.set_lru_cache(cache_size); + // "index and filter blocks will be stored in block cache, together with all other data blocks." + // See: https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#indexes-and-filter-blocks + block_opts.set_cache_index_and_filter_blocks(true); + // Don't evict L0 filter/index blocks from the cache + block_opts.set_pin_l0_filter_and_index_blocks_in_cache(true); + } block_opts.set_bloom_filter(10, true); block_opts From fc93b7ebac72a338911c140bca5e95f7c2176bd1 Mon Sep 17 00:00:00 2001 From: Anton Gavrilov Date: Tue, 11 Feb 2020 17:04:33 +0100 Subject: [PATCH 088/359] Parity runtime moved to parity common for publication in crates.io (#271) * Replace `tokio_core` with `tokio` (`ring` -> 0.13) (#9657) * Replace `tokio_core` with `tokio`. * Remove `tokio-core` and replace with `tokio` in - `ethcore/stratum` - `secret_store` - `util/fetch` - `util/reactor` * Bump hyper to 0.12 in - `miner` - `util/fake-fetch` - `util/fetch` - `secret_store` * Bump `jsonrpc-***` to 0.9 in - `parity` - `ethcore/stratum` - `ipfs` - `rpc` - `rpc_client` - `whisper` * Bump `ring` to 0.13 * Use a more graceful shutdown process in `secret_store` tests. * Convert some mutexes to rwlocks in `secret_store`. * Consolidate Tokio Runtime use, remove `CpuPool`. * Rename and move the `tokio_reactor` crate (`util/reactor`) to `tokio_runtime` (`util/runtime`). * Rename `EventLoop` to `Runtime`. - Rename `EventLoop::spawn` to `Runtime::with_default_thread_count`. - Add the `Runtime::with_thread_count` method. - Rename `Remote` to `Executor`. * Remove uses of `CpuPool` and spawn all tasks via the `Runtime` executor instead. * Other changes related to `CpuPool` removal: - Remove `Reservations::with_pool`. `::new` now takes an `Executor` as an argument. - Remove `SenderReservations::with_pool`. `::new` now takes an `Executor` as an argument. * Remove secret_store runtimes. (#9888) * Remove the independent runtimes from `KeyServerHttpListener` and `KeyServerCore` and instead require a `parity_runtime::Executor` to be passed upon creation of each. * Remove the `threads` parameter from both `ClusterConfiguration` structs. * Implement the `future::Executor` trait for `parity_runtime::Executor`. * Update tests. - Update the `loop_until` function to instead use a oneshot to signal completion. - Modify the `make_key_servers` function to create and return a runtime. * misc: bump license header to 2019 (#10135) * misc: bump license header to 2019 * misc: remove_duplicate_empty_lines.sh * misc: run license header script * commit cargo lock * Upgrade to jsonrpc v14 (#11151) * Upgrade to jsonrpc v14 Contains https://github.com/paritytech/jsonrpc/pull/495 with good bugfixes to resource usage. * Bump tokio & futures. * Bump even further. * Upgrade tokio to 0.1.22 * Partially revert "Bump tokio & futures." This reverts commit 100907eb91907aa124d856d52374637256118e86. * Added README, CHANGELOG and several meta tags in Cargo.toml * Proper pr in changelog * Remove categories tag * Comments and usage fixed * Declare test usage for methods explicitly * Crate name in readme modified, complete removed * Test helpers feature added, functions marked as test only * Processed by fmt tool * Illustrative example added * Sample moved into the separate directory * use examples directory instead of custom crate * Wait till scanning completed * Timeout decreased * Unused methods removed Co-authored-by: Nick Sanders Co-authored-by: 5chdn <5chdn@users.noreply.github.com> Co-authored-by: David Co-authored-by: Nikolay Volf --- Cargo.toml | 1 + runtime/CHANGELOG.MD | 11 +++ runtime/Cargo.toml | 19 ++++ runtime/README.MD | 6 ++ runtime/examples/simple.rs | 41 ++++++++ runtime/src/lib.rs | 198 +++++++++++++++++++++++++++++++++++++ 6 files changed, 276 insertions(+) create mode 100644 runtime/CHANGELOG.MD create mode 100644 runtime/Cargo.toml create mode 100644 runtime/README.MD create mode 100644 runtime/examples/simple.rs create mode 100644 runtime/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 1ba7acb78..15633c072 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,7 @@ members = [ "parity-path", "plain_hasher", "rlp", + "runtime", "transaction-pool", "trace-time", "triehash", diff --git a/runtime/CHANGELOG.MD b/runtime/CHANGELOG.MD new file mode 100644 index 000000000..59d2c7ffe --- /dev/null +++ b/runtime/CHANGELOG.MD @@ -0,0 +1,11 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.1.1] - 2019-11-25 +### Changed +- Moved to parity common repo, prepared for publishing (https://github.com/paritytech/parity-common/pull/271) diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml new file mode 100644 index 000000000..ca53759a0 --- /dev/null +++ b/runtime/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "parity-runtime" +version = "0.1.1" +authors = ["Parity Technologies "] +edition = "2018" + +description = "Tokio runtime wrapper" +license = "GPL-3.0" +readme = "README.md" +homepage = "https://www.parity.io/" +keywords = ["parity", "runtime", "tokio"] +include = ["Cargo.toml", "src/**/*.rs", "README.md", "CHANGELOG.md"] + +[dependencies] +futures = "0.1" +tokio = "0.1.22" + +[features] +test-helpers = [] \ No newline at end of file diff --git a/runtime/README.MD b/runtime/README.MD new file mode 100644 index 000000000..7cda2a31f --- /dev/null +++ b/runtime/README.MD @@ -0,0 +1,6 @@ +# parity-runtime + +Wrapper over tokio runtime. Provides: +- Customizable runtime with ability to spawn it in different thread models +- Corresponding runtime executor for tasks +- Runtime handle diff --git a/runtime/examples/simple.rs b/runtime/examples/simple.rs new file mode 100644 index 000000000..6448bcbf6 --- /dev/null +++ b/runtime/examples/simple.rs @@ -0,0 +1,41 @@ +// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Simple example, illustating usage of runtime wrapper. + +use futures::{Future, Stream}; +use parity_runtime::Runtime; +use std::thread::park_timeout; +use std::time::Duration; +use tokio::fs::read_dir; + +/// Read current directory in a future, which is executed in the created runtime +fn main() { + let fut = read_dir(".") + .flatten_stream() + .for_each(|dir| { + println!("{:?}", dir.path()); + Ok(()) + }) + .map_err(|err| { + eprintln!("Error: {:?}", err); + () + }); + let runtime = Runtime::with_default_thread_count(); + runtime.executor().spawn(fut); + let timeout = Duration::from_secs(3); + park_timeout(timeout); +} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs new file mode 100644 index 000000000..acb1e4b6e --- /dev/null +++ b/runtime/src/lib.rs @@ -0,0 +1,198 @@ +// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +//! Tokio Runtime wrapper. + +use futures::{future, Future, IntoFuture}; +use std::sync::mpsc; +use std::{fmt, thread}; +pub use tokio::runtime::{Builder as TokioRuntimeBuilder, Runtime as TokioRuntime, TaskExecutor}; +pub use tokio::timer::Delay; + +/// Runtime for futures. +/// +/// Runs in a separate thread. +pub struct Runtime { + executor: Executor, + handle: RuntimeHandle, +} + +impl Runtime { + fn new(runtime_bldr: &mut TokioRuntimeBuilder) -> Self { + let mut runtime = runtime_bldr.build().expect( + "Building a Tokio runtime will only fail when mio components \ + cannot be initialized (catastrophic)", + ); + let (stop, stopped) = futures::oneshot(); + let (tx, rx) = mpsc::channel(); + let handle = thread::spawn(move || { + tx.send(runtime.executor()).expect("Rx is blocking upper thread."); + runtime + .block_on(futures::empty().select(stopped).map(|_| ()).map_err(|_| ())) + .expect("Tokio runtime should not have unhandled errors."); + }); + let executor = rx.recv().expect("tx is transfered to a newly spawned thread."); + + Runtime { + executor: Executor { inner: Mode::Tokio(executor) }, + handle: RuntimeHandle { close: Some(stop), handle: Some(handle) }, + } + } + + /// Spawns a new tokio runtime with a default thread count on a background + /// thread and returns a `Runtime` which can be used to spawn tasks via + /// its executor. + pub fn with_default_thread_count() -> Self { + let mut runtime_bldr = TokioRuntimeBuilder::new(); + Self::new(&mut runtime_bldr) + } + + /// Spawns a new tokio runtime with a the specified thread count on a + /// background thread and returns a `Runtime` which can be used to spawn + /// tasks via its executor. + #[cfg(any(test, feature = "test-helpers"))] + pub fn with_thread_count(thread_count: usize) -> Self { + let mut runtime_bldr = TokioRuntimeBuilder::new(); + runtime_bldr.core_threads(thread_count); + + Self::new(&mut runtime_bldr) + } + + /// Returns this runtime raw executor. + #[cfg(any(test, feature = "test-helpers"))] + pub fn raw_executor(&self) -> TaskExecutor { + if let Mode::Tokio(ref executor) = self.executor.inner { + executor.clone() + } else { + panic!("Runtime is not initialized in Tokio mode.") + } + } + + /// Returns runtime executor. + pub fn executor(&self) -> Executor { + self.executor.clone() + } +} + +#[derive(Clone)] +enum Mode { + Tokio(TaskExecutor), + // Mode used in tests + #[allow(dead_code)] + Sync, + // Mode used in tests + #[allow(dead_code)] + ThreadPerFuture, +} + +impl fmt::Debug for Mode { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use self::Mode::*; + + match *self { + Tokio(_) => write!(fmt, "tokio"), + Sync => write!(fmt, "synchronous"), + ThreadPerFuture => write!(fmt, "thread per future"), + } + } +} + +#[derive(Debug, Clone)] +pub struct Executor { + inner: Mode, +} + +impl Executor { + /// Synchronous executor, used for tests. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_sync() -> Self { + Executor { inner: Mode::Sync } + } + + /// Spawns a new thread for each future (use only for tests). + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_thread_per_future() -> Self { + Executor { inner: Mode::ThreadPerFuture } + } + + /// Spawn a future on this runtime + pub fn spawn(&self, r: R) + where + R: IntoFuture + Send + 'static, + R::Future: Send + 'static, + { + match self.inner { + Mode::Tokio(ref executor) => executor.spawn(r.into_future()), + Mode::Sync => { + let _ = r.into_future().wait(); + } + Mode::ThreadPerFuture => { + thread::spawn(move || { + let _ = r.into_future().wait(); + }); + } + } + } +} + +impl + Send + 'static> future::Executor for Executor { + fn execute(&self, future: F) -> Result<(), future::ExecuteError> { + match self.inner { + Mode::Tokio(ref executor) => executor.execute(future), + Mode::Sync => { + let _ = future.wait(); + Ok(()) + } + Mode::ThreadPerFuture => { + thread::spawn(move || { + let _ = future.wait(); + }); + Ok(()) + } + } + } +} + +/// A handle to a runtime. Dropping the handle will cause runtime to shutdown. +pub struct RuntimeHandle { + close: Option>, + handle: Option>, +} + +impl From for RuntimeHandle { + fn from(el: Runtime) -> Self { + el.handle + } +} + +impl Drop for RuntimeHandle { + fn drop(&mut self) { + self.close.take().map(|v| v.send(())); + } +} + +impl RuntimeHandle { + /// Blocks current thread and waits until the runtime is finished. + pub fn wait(mut self) -> thread::Result<()> { + self.handle.take().expect("Handle is taken only in `wait`, `wait` is consuming; qed").join() + } + + /// Finishes this runtime. + pub fn close(mut self) { + let _ = + self.close.take().expect("Close is taken only in `close` and `drop`. `close` is consuming; qed").send(()); + } +} From 5b84cd9dde1584299abbf343cb5d129615682d54 Mon Sep 17 00:00:00 2001 From: Anton Gavrilov Date: Tue, 11 Feb 2020 18:12:13 +0100 Subject: [PATCH 089/359] Format for readme and changelog corrected (#341) --- runtime/{CHANGELOG.MD => CHANGELOG.md} | 0 runtime/{README.MD => README.md} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename runtime/{CHANGELOG.MD => CHANGELOG.md} (100%) rename runtime/{README.MD => README.md} (100%) diff --git a/runtime/CHANGELOG.MD b/runtime/CHANGELOG.md similarity index 100% rename from runtime/CHANGELOG.MD rename to runtime/CHANGELOG.md diff --git a/runtime/README.MD b/runtime/README.md similarity index 100% rename from runtime/README.MD rename to runtime/README.md From 9091bf03d313b5999fb91119d7115886601f1b1a Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 12 Feb 2020 18:33:06 +0100 Subject: [PATCH 090/359] rlp-derive extracted (#343) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Move a bunch of stuff around (#10101) * Move devtools. * Merge stop_guard & rename memzero * Move price-info to miner. * Group account management * Clean up workspace members. * Move local store closer to miner. * Move clib examples. * Move registrar and hash-fetch * Move rpc_cli/rpc_client * Move stratum closer to miner. * Fix naming convention of crates. * Update Cpp examples path. * Fix paths for clib-example. * Fix removing build. * misc: bump license header to 2019 (#10135) * misc: bump license header to 2019 * misc: remove_duplicate_empty_lines.sh * misc: run license header script * commit cargo lock * Upgrade ethereum types (#10670) * cargo upgrade "ethereum-types" --all --allow-prerelease * [ethash] fix compilation errors * [ethkey] fix compilation errors * [journaldb] fix compilation errors * [dir] fix compilation errors * [ethabi] update to 0.7 * wip * [eip-712] fix compilation errors * [ethjson] fix compilation errors * [Cargo.toml] add TODO to remove patches * [ethstore] fix compilation errors * use patched keccak-hash with new primitive-types * wip * [ethcore-network-devp2p] fix compilation errors * [vm] fix compilation errors * [common-types, evm, wasm] fix compilation errors * [ethcore-db] Require AsRef instead of Deref for keys * [ethcore-blockchain] fix some compilation errors * [blooms-db] fix compilation errors Thanks a lot @dvdplm :) * we don't need no rlp ethereum feature * [ethcore] fix some compilation errors * [parity-ipfs-api] fix compilation error * [ethcore-light] fix compilation errors * [Cargo.lock] update parity-common * [ethcore-private-tx] fix some compilation errors * wip * [ethcore-private-tx] fix compilation errors * [parity-updater] fix compilation errors * [parity-rpc] fix compilation errors * [parity-bin] fix other compilation errors * update to new ethereum-types * update keccak-hash * [fastmap] fix compilation in tests * [blooms-db] fix compilation in tests * [common-types] fix compilation in tests * [triehash-ethereum] fix compilation in tests * [ethkey] fix compilation in tests * [pwasm-run-test] fix compilation errors * [wasm] fix compilation errors * [ethjson] fix compilation in tests * [eip-712] fix compilation in tests * [ethcore-blockchain] fix compilation in tests * [ethstore] fix compilation in tests * [ethstore-accounts] fix compilation in tests * [parity-hash-fetch] fix compilation in tests * [parity-whisper] fix compilation in tests * [ethcore-miner] fix compilation in tests * [ethcore-network-devp2p] fix compilation in tests * [*] upgrade rand to 0.6 * [evm] get rid of num-bigint conversions * [ethcore] downgrade trie-standardmap and criterion * [ethcore] fix some warnings * [ethcore] fix compilation in tests * [evmbin] fix compilation in tests * [updater] fix compilation in tests * [ethash] fix compilation in tests * [ethcore-secretstore] fix compilation in tests * [ethcore-sync] fix compilation in tests * [parity-rpc] fix compilation in tests * [ethcore] finally fix compilation in tests FUCK YEAH!!! * [ethstore] lazy_static is unused * [ethcore] fix test * fix up bad merge * [Cargo.toml] remove unused patches * [*] replace some git dependencies with crates.io * [Cargo.toml] remove unused lazy_static * [*] clean up * [ethcore] fix transaction_filter_deprecated test * [private-tx] fix serialization tests * fix more serialization tests * [ethkey] fix smoky test * [rpc] fix tests, please? * [ethcore] remove commented out code * Apply suggestions from code review Co-Authored-By: Tomasz Drwięga * [ethstore] remove unused dev-dependency * [ethcore] remove resolved TODO * [*] resolve keccak-hash TODO * [*] s/Address::default()/Address::zero() * [rpc] remove Subscribers::new_test * [rpc] remove EthPubSubClient::new_test * [ethcore] use trie-standardmap from crates.io * [dir] fix db_root_path * [ethcore] simplify snapshot::tests::helpers::fill_storage * Apply suggestions from code review Co-Authored-By: David * [ethcore-secretstore] resolve TODO in serialization * [ethcore-network-devp2p] resolve TODO in save_key * [Cargo.lock] update triehash * [*] use ethabi from crates.io * [ethkey] use secp256k1 from master branch * [Cargo.lock] update eth-secp256k1 * Update copyright notice 2020 (#11386) * Update copyright noticed 2020 * Update copyright in two overlooked files * rlp_derive: cleanup (#11446) * rlp_derive: update syn & co * rlp_derive: remove dummy_const * rlp_derive: remove unused attirubutes * rlp-derive: change authors * backwards compatible call_type creation_method (#11450) * rlp_derive: update syn & co * rlp_derive: remove dummy_const * rlp_derive: remove unused attirubutes * rlp-derive: change authors * rlp_derive: add rlp(default) attribute * Revert "Revert "[Trace] Distinguish between `create` and `create2` (#11311)" (#11427)" This reverts commit 5d4993b0f856bf9e0e2c78849b72e581f0cde686. * trace: backwards compatible call_type and creation_method * trace: add rlp backward compatibility tests * cleanup * i know, i hate backwards compatibility too * address review grumbles * rlp-derive: change license and add a changelog * rlp-derive: tests license header as well * add rlp-derive to workspace * rename to rlp-derive * remove unnecessary line * rlp-derive: more module docs * cargo fmt * trigger the ci * Revert "trigger the ci" This reverts commit 5f21a4f30f38996eaadec68d0accc67f9122a6ad. Co-authored-by: Tomasz Drwięga Co-authored-by: 5chdn <5chdn@users.noreply.github.com> Co-authored-by: s3krit --- Cargo.toml | 1 + rlp-derive/CHANGELOG.md | 10 +++ rlp-derive/Cargo.toml | 19 +++++ rlp-derive/src/de.rs | 163 ++++++++++++++++++++++++++++++++++++++++ rlp-derive/src/en.rs | 109 +++++++++++++++++++++++++++ rlp-derive/src/lib.rs | 54 +++++++++++++ rlp-derive/tests/rlp.rs | 71 +++++++++++++++++ 7 files changed, 427 insertions(+) create mode 100644 rlp-derive/CHANGELOG.md create mode 100644 rlp-derive/Cargo.toml create mode 100644 rlp-derive/src/de.rs create mode 100644 rlp-derive/src/en.rs create mode 100644 rlp-derive/src/lib.rs create mode 100644 rlp-derive/tests/rlp.rs diff --git a/Cargo.toml b/Cargo.toml index 15633c072..019b989de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,7 @@ members = [ "parity-path", "plain_hasher", "rlp", + "rlp-derive", "runtime", "transaction-pool", "trace-time", diff --git a/rlp-derive/CHANGELOG.md b/rlp-derive/CHANGELOG.md new file mode 100644 index 000000000..592d3fbf9 --- /dev/null +++ b/rlp-derive/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] + +## [0.1.0] - 2020-02-12 +- Extracted from parity-ethereum repo. [#343](https://github.com/paritytech/parity-common/pull/343) diff --git a/rlp-derive/Cargo.toml b/rlp-derive/Cargo.toml new file mode 100644 index 000000000..bf4d0eaf0 --- /dev/null +++ b/rlp-derive/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "rlp-derive" +version = "0.1.0" +authors = ["Parity Technologies "] +license = "MIT/Apache-2.0" +description = "Derive macro for #[derive(RlpEncodable, RlpDecodable)]" +homepage = "http://parity.io" +edition = "2018" + +[lib] +proc-macro = true + +[dependencies] +syn = "1.0.14" +quote = "1.0.2" +proc-macro2 = "1.0.8" + +[dev-dependencies] +rlp = "0.4.0" diff --git a/rlp-derive/src/de.rs b/rlp-derive/src/de.rs new file mode 100644 index 000000000..d1b4e4ca5 --- /dev/null +++ b/rlp-derive/src/de.rs @@ -0,0 +1,163 @@ +// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use proc_macro2::TokenStream; +use quote::quote; + +struct ParseQuotes { + single: TokenStream, + list: TokenStream, + takes_index: bool, +} + +fn decodable_parse_quotes() -> ParseQuotes { + ParseQuotes { single: quote! { rlp.val_at }, list: quote! { rlp.list_at }, takes_index: true } +} + +fn decodable_wrapper_parse_quotes() -> ParseQuotes { + ParseQuotes { single: quote! { rlp.as_val }, list: quote! { rlp.as_list }, takes_index: false } +} + +pub fn impl_decodable(ast: &syn::DeriveInput) -> TokenStream { + let body = match ast.data { + syn::Data::Struct(ref s) => s, + _ => panic!("#[derive(RlpDecodable)] is only defined for structs."), + }; + + let mut default_attribute_encountered = false; + let stmts: Vec<_> = body + .fields + .iter() + .enumerate() + .map(|(i, field)| decodable_field(i, field, decodable_parse_quotes(), &mut default_attribute_encountered)) + .collect(); + let name = &ast.ident; + + let impl_block = quote! { + impl rlp::Decodable for #name { + fn decode(rlp: &rlp::Rlp) -> Result { + let result = #name { + #(#stmts)* + }; + + Ok(result) + } + } + }; + + quote! { + const _: () = { + extern crate rlp; + #impl_block + }; + } +} + +pub fn impl_decodable_wrapper(ast: &syn::DeriveInput) -> TokenStream { + let body = match ast.data { + syn::Data::Struct(ref s) => s, + _ => panic!("#[derive(RlpDecodableWrapper)] is only defined for structs."), + }; + + let stmt = { + let fields: Vec<_> = body.fields.iter().collect(); + if fields.len() == 1 { + let field = fields.first().expect("fields.len() == 1; qed"); + let mut default_attribute_encountered = false; + decodable_field(0, field, decodable_wrapper_parse_quotes(), &mut default_attribute_encountered) + } else { + panic!("#[derive(RlpEncodableWrapper)] is only defined for structs with one field.") + } + }; + + let name = &ast.ident; + + let impl_block = quote! { + impl rlp::Decodable for #name { + fn decode(rlp: &rlp::Rlp) -> Result { + let result = #name { + #stmt + }; + + Ok(result) + } + } + }; + + quote! { + const _: () = { + extern crate rlp; + #impl_block + }; + } +} + +fn decodable_field( + index: usize, + field: &syn::Field, + quotes: ParseQuotes, + default_attribute_encountered: &mut bool, +) -> TokenStream { + let id = match field.ident { + Some(ref ident) => quote! { #ident }, + None => { + let index: syn::Index = index.into(); + quote! { #index } + } + }; + + let index = index - *default_attribute_encountered as usize; + let index = quote! { #index }; + + let single = quotes.single; + let list = quotes.list; + + let attributes = &field.attrs; + let default = if let Some(attr) = attributes.iter().find(|attr| attr.path.is_ident("rlp")) { + if *default_attribute_encountered { + panic!("only 1 #[rlp(default)] attribute is allowed in a struct") + } + match attr.parse_args() { + Ok(proc_macro2::TokenTree::Ident(ident)) if ident.to_string() == "default" => {} + _ => panic!("only #[rlp(default)] attribute is supported"), + } + *default_attribute_encountered = true; + true + } else { + false + }; + + match field.ty { + syn::Type::Path(ref path) => { + let ident = &path.path.segments.first().expect("there must be at least 1 segment").ident; + let ident_type = ident.to_string(); + if &ident_type == "Vec" { + if quotes.takes_index { + if default { + quote! { #id: #list(#index).unwrap_or_default(), } + } else { + quote! { #id: #list(#index)?, } + } + } else { + quote! { #id: #list()?, } + } + } else { + if quotes.takes_index { + if default { + quote! { #id: #single(#index).unwrap_or_default(), } + } else { + quote! { #id: #single(#index)?, } + } + } else { + quote! { #id: #single()?, } + } + } + } + _ => panic!("rlp_derive not supported"), + } +} diff --git a/rlp-derive/src/en.rs b/rlp-derive/src/en.rs new file mode 100644 index 000000000..9eb0d6afb --- /dev/null +++ b/rlp-derive/src/en.rs @@ -0,0 +1,109 @@ +// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use proc_macro2::TokenStream; +use quote::quote; + +pub fn impl_encodable(ast: &syn::DeriveInput) -> TokenStream { + let body = match ast.data { + syn::Data::Struct(ref s) => s, + _ => panic!("#[derive(RlpEncodable)] is only defined for structs."), + }; + + let stmts: Vec<_> = body.fields.iter().enumerate().map(|(i, field)| encodable_field(i, field)).collect(); + let name = &ast.ident; + + let stmts_len = stmts.len(); + let stmts_len = quote! { #stmts_len }; + let impl_block = quote! { + impl rlp::Encodable for #name { + fn rlp_append(&self, stream: &mut rlp::RlpStream) { + stream.begin_list(#stmts_len); + #(#stmts)* + } + } + }; + + quote! { + const _: () = { + extern crate rlp; + #impl_block + }; + } +} + +pub fn impl_encodable_wrapper(ast: &syn::DeriveInput) -> TokenStream { + let body = match ast.data { + syn::Data::Struct(ref s) => s, + _ => panic!("#[derive(RlpEncodableWrapper)] is only defined for structs."), + }; + + let stmt = { + let fields: Vec<_> = body.fields.iter().collect(); + if fields.len() == 1 { + let field = fields.first().expect("fields.len() == 1; qed"); + encodable_field(0, field) + } else { + panic!("#[derive(RlpEncodableWrapper)] is only defined for structs with one field.") + } + }; + + let name = &ast.ident; + + let impl_block = quote! { + impl rlp::Encodable for #name { + fn rlp_append(&self, stream: &mut rlp::RlpStream) { + #stmt + } + } + }; + + quote! { + const _: () = { + extern crate rlp; + #impl_block + }; + } +} + +fn encodable_field(index: usize, field: &syn::Field) -> TokenStream { + let ident = match field.ident { + Some(ref ident) => quote! { #ident }, + None => { + let index: syn::Index = index.into(); + quote! { #index } + } + }; + + let id = quote! { self.#ident }; + + match field.ty { + syn::Type::Path(ref path) => { + let top_segment = path.path.segments.first().expect("there must be at least 1 segment"); + let ident = &top_segment.ident; + if &ident.to_string() == "Vec" { + let inner_ident = match top_segment.arguments { + syn::PathArguments::AngleBracketed(ref angle) => { + let ty = angle.args.first().expect("Vec has only one angle bracketed type; qed"); + match *ty { + syn::GenericArgument::Type(syn::Type::Path(ref path)) => { + &path.path.segments.first().expect("there must be at least 1 segment").ident + } + _ => panic!("rlp_derive not supported"), + } + } + _ => unreachable!("Vec has only one angle bracketed type; qed"), + }; + quote! { stream.append_list::<#inner_ident, _>(&#id); } + } else { + quote! { stream.append(&#id); } + } + } + _ => panic!("rlp_derive not supported"), + } +} diff --git a/rlp-derive/src/lib.rs b/rlp-derive/src/lib.rs new file mode 100644 index 000000000..47efd2ffe --- /dev/null +++ b/rlp-derive/src/lib.rs @@ -0,0 +1,54 @@ +// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Derive macro for `#[derive(RlpEncodable, RlpDecodable)]`. +//! +//! For example of usage see `./tests/rlp.rs`. +//! +//! This library also supports up to 1 `#[rlp(default)]` in a struct, +//! which is similar to [`#[serde(default)]`](https://serde.rs/field-attrs.html#default) +//! with the caveat that we use the `Default` value if +//! the field deserialization fails, as we don't serialize field +//! names and there is no way to tell if it is present or not. + +extern crate proc_macro; + +mod de; +mod en; + +use de::{impl_decodable, impl_decodable_wrapper}; +use en::{impl_encodable, impl_encodable_wrapper}; +use proc_macro::TokenStream; + +#[proc_macro_derive(RlpEncodable, attributes(rlp))] +pub fn encodable(input: TokenStream) -> TokenStream { + let ast = syn::parse(input).unwrap(); + let gen = impl_encodable(&ast); + gen.into() +} + +#[proc_macro_derive(RlpEncodableWrapper)] +pub fn encodable_wrapper(input: TokenStream) -> TokenStream { + let ast = syn::parse(input).unwrap(); + let gen = impl_encodable_wrapper(&ast); + gen.into() +} + +#[proc_macro_derive(RlpDecodable, attributes(rlp))] +pub fn decodable(input: TokenStream) -> TokenStream { + let ast = syn::parse(input).unwrap(); + let gen = impl_decodable(&ast); + gen.into() +} + +#[proc_macro_derive(RlpDecodableWrapper)] +pub fn decodable_wrapper(input: TokenStream) -> TokenStream { + let ast = syn::parse(input).unwrap(); + let gen = impl_decodable_wrapper(&ast); + gen.into() +} diff --git a/rlp-derive/tests/rlp.rs b/rlp-derive/tests/rlp.rs new file mode 100644 index 000000000..e3cda4dbc --- /dev/null +++ b/rlp-derive/tests/rlp.rs @@ -0,0 +1,71 @@ +// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rlp::{decode, encode}; +use rlp_derive::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; + +#[derive(Debug, PartialEq, RlpEncodable, RlpDecodable)] +struct Foo { + a: String, +} + +#[derive(Debug, PartialEq, RlpEncodableWrapper, RlpDecodableWrapper)] +struct FooWrapper { + a: String, +} + +#[test] +fn test_encode_foo() { + let foo = Foo { a: "cat".into() }; + + let expected = vec![0xc4, 0x83, b'c', b'a', b't']; + let out = encode(&foo); + assert_eq!(out, expected); + + let decoded = decode(&expected).expect("decode failure"); + assert_eq!(foo, decoded); +} + +#[test] +fn test_encode_foo_wrapper() { + let foo = FooWrapper { a: "cat".into() }; + + let expected = vec![0x83, b'c', b'a', b't']; + let out = encode(&foo); + assert_eq!(out, expected); + + let decoded = decode(&expected).expect("decode failure"); + assert_eq!(foo, decoded); +} + +#[test] +fn test_encode_foo_default() { + #[derive(Debug, PartialEq, RlpEncodable, RlpDecodable)] + struct FooDefault { + a: String, + /// It works with other attributes. + #[rlp(default)] + b: Option>, + } + + let attack_of = String::from("clones"); + let foo = Foo { a: attack_of.clone() }; + + let expected = vec![0xc7, 0x86, b'c', b'l', b'o', b'n', b'e', b's']; + let out = encode(&foo); + assert_eq!(out, expected); + + let foo_default = FooDefault { a: attack_of.clone(), b: None }; + + let decoded = decode(&expected).expect("default failure"); + assert_eq!(foo_default, decoded); + + let foo_some = FooDefault { a: attack_of.clone(), b: Some(vec![1, 2, 3]) }; + let out = encode(&foo_some); + assert_eq!(decode(&out), Ok(foo_some)); +} From 46d8f6b5419edea4c7b2d46a09ecf3bee3facc7b Mon Sep 17 00:00:00 2001 From: David Date: Wed, 12 Feb 2020 23:31:29 +0100 Subject: [PATCH 091/359] Update/change licenses: MIT/Apache2.0 (#342) * Change license for all GPL crates to dual MIT/Apache2.0 license Change license for all MIT crates to dual MIT/Apache2.0 license Normalize "license" key in all Cargo.tomls: MIT/Apache2.0 Add MIT and Apache2.0 licenses to the repo root * License headers * Single year is enough * Add note to changelog * Review grumbles. * typo --- LICENSE | 674 ------------------ rlp/LICENSE-APACHE2 => LICENSE-APACHE2 | 0 rlp/LICENSE-MIT => LICENSE-MIT | 2 +- contract-address/CHANGELOG.md | 1 + contract-address/Cargo.toml | 2 +- contract-address/src/lib.rs | 22 +- ethbloom/CHANGELOG.md | 1 + ethbloom/Cargo.toml | 2 +- ethbloom/benches/bloom.rs | 8 + ethbloom/benches/unrolling.rs | 8 + ethbloom/src/lib.rs | 8 + ethereum-types/CHANGELOG.md | 2 + ethereum-types/Cargo.toml | 2 +- ethereum-types/src/hash.rs | 8 + ethereum-types/src/lib.rs | 8 + ethereum-types/src/uint.rs | 8 + ethereum-types/tests/serde.rs | 2 +- fixed-hash/CHANGELOG.md | 1 + fixed-hash/Cargo.toml | 2 +- fixed-hash/src/hash.rs | 2 +- fixed-hash/src/lib.rs | 2 +- fixed-hash/src/tests.rs | 8 + keccak-hash/CHANGELOG.md | 1 + keccak-hash/Cargo.toml | 2 +- keccak-hash/benches/keccak_256.rs | 22 +- keccak-hash/src/lib.rs | 22 +- kvdb-memorydb/CHANGELOG.md | 1 + kvdb-memorydb/Cargo.toml | 2 +- kvdb-memorydb/src/lib.rs | 22 +- kvdb-rocksdb/CHANGELOG.md | 1 + kvdb-rocksdb/Cargo.toml | 2 +- kvdb-rocksdb/benches/bench_read_perf.rs | 22 +- kvdb-rocksdb/src/iter.rs | 22 +- kvdb-rocksdb/src/lib.rs | 22 +- kvdb-rocksdb/src/stats.rs | 22 +- kvdb-shared-tests/Cargo.toml | 2 +- kvdb-shared-tests/src/lib.rs | 22 +- kvdb-web/CHANGELOG.md | 1 + kvdb-web/Cargo.toml | 2 +- kvdb-web/src/error.rs | 22 +- kvdb-web/src/indexed_db.rs | 22 +- kvdb-web/src/lib.rs | 22 +- kvdb-web/tests/indexed_db.rs | 22 +- kvdb/CHANGELOG.md | 1 + kvdb/Cargo.toml | 2 +- kvdb/src/io_stats.rs | 22 +- kvdb/src/lib.rs | 22 +- rlp/license-header => license-header | 2 +- parity-bytes/CHANGELOG.md | 1 + parity-bytes/Cargo.toml | 2 +- parity-bytes/src/lib.rs | 22 +- parity-crypto/CHANGELOG.md | 1 + parity-crypto/Cargo.toml | 2 +- parity-crypto/benches/bench.rs | 22 +- parity-crypto/src/aes.rs | 22 +- parity-crypto/src/digest.rs | 22 +- parity-crypto/src/error.rs | 22 +- parity-crypto/src/hmac/mod.rs | 22 +- parity-crypto/src/hmac/test.rs | 22 +- parity-crypto/src/lib.rs | 22 +- parity-crypto/src/pbkdf2/mod.rs | 22 +- parity-crypto/src/pbkdf2/test.rs | 22 +- parity-crypto/src/publickey/ec_math_utils.rs | 22 +- parity-crypto/src/publickey/ecdh.rs | 22 +- .../src/publickey/ecdsa_signature.rs | 22 +- parity-crypto/src/publickey/ecies.rs | 22 +- parity-crypto/src/publickey/error.rs | 22 +- parity-crypto/src/publickey/extended_keys.rs | 22 +- parity-crypto/src/publickey/keypair.rs | 22 +- .../src/publickey/keypair_generator.rs | 22 +- parity-crypto/src/publickey/mod.rs | 22 +- parity-crypto/src/publickey/secret_key.rs | 22 +- parity-crypto/src/scrypt.rs | 22 +- parity-path/CHANGELOG.md | 1 + parity-path/Cargo.toml | 2 +- parity-path/src/lib.rs | 22 +- parity-util-mem/CHANGELOG.md | 1 + parity-util-mem/Cargo.toml | 4 +- parity-util-mem/derive/Cargo.toml | 2 +- parity-util-mem/derive/lib.rs | 22 +- parity-util-mem/src/allocators.rs | 22 +- parity-util-mem/src/ethereum_impls.rs | 22 +- parity-util-mem/src/lib.rs | 22 +- parity-util-mem/src/primitives_impls.rs | 22 +- parity-util-mem/src/sizeof.rs | 22 +- parity-util-mem/tests/derive.rs | 22 +- plain_hasher/CHANGELOG.md | 1 + plain_hasher/Cargo.toml | 2 +- plain_hasher/benches/bench.rs | 22 +- plain_hasher/src/lib.rs | 22 +- primitive-types/Cargo.toml | 2 +- primitive-types/impls/codec/Cargo.toml | 2 +- primitive-types/impls/codec/src/lib.rs | 2 +- primitive-types/impls/rlp/Cargo.toml | 2 +- primitive-types/impls/rlp/src/lib.rs | 2 +- primitive-types/impls/serde/Cargo.toml | 2 +- .../impls/serde/benches/impl_serde.rs | 2 +- primitive-types/impls/serde/benches/input.rs | 8 + primitive-types/impls/serde/src/lib.rs | 2 +- primitive-types/impls/serde/src/serialize.rs | 2 +- primitive-types/src/lib.rs | 2 +- rlp/benches/rlp.rs | 2 +- rlp/src/error.rs | 2 +- rlp/src/impls.rs | 2 +- rlp/src/lib.rs | 2 +- rlp/src/rlpin.rs | 2 +- rlp/src/stream.rs | 2 +- rlp/src/traits.rs | 2 +- rlp/tests/tests.rs | 2 +- trace-time/CHANGELOG.md | 1 + trace-time/Cargo.toml | 2 +- trace-time/src/lib.rs | 22 +- transaction-pool/CHANGELOG.md | 1 + transaction-pool/Cargo.toml | 2 +- transaction-pool/src/error.rs | 22 +- transaction-pool/src/lib.rs | 22 +- transaction-pool/src/listener.rs | 22 +- transaction-pool/src/options.rs | 22 +- transaction-pool/src/pool.rs | 22 +- transaction-pool/src/ready.rs | 22 +- transaction-pool/src/replace.rs | 22 +- transaction-pool/src/scoring.rs | 22 +- transaction-pool/src/status.rs | 22 +- transaction-pool/src/tests/helpers.rs | 22 +- transaction-pool/src/tests/mod.rs | 22 +- transaction-pool/src/tests/tx_builder.rs | 22 +- transaction-pool/src/transactions.rs | 22 +- transaction-pool/src/verifier.rs | 22 +- triehash/CHANGELOG.md | 1 + triehash/Cargo.toml | 2 +- triehash/benches/triehash.rs | 22 +- triehash/src/lib.rs | 22 +- uint/benches/bigint.rs | 2 +- uint/examples/modular.rs | 2 +- uint/fuzz/fuzz_targets/div_mod.rs | 8 + uint/fuzz/fuzz_targets/div_mod_word.rs | 8 + uint/src/lib.rs | 2 +- uint/src/uint.rs | 2 +- uint/tests/uint_tests.rs | 2 +- 139 files changed, 587 insertions(+), 1667 deletions(-) delete mode 100644 LICENSE rename rlp/LICENSE-APACHE2 => LICENSE-APACHE2 (100%) rename rlp/LICENSE-MIT => LICENSE-MIT (95%) rename rlp/license-header => license-header (87%) diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 94a9ed024..000000000 --- a/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/rlp/LICENSE-APACHE2 b/LICENSE-APACHE2 similarity index 100% rename from rlp/LICENSE-APACHE2 rename to LICENSE-APACHE2 diff --git a/rlp/LICENSE-MIT b/LICENSE-MIT similarity index 95% rename from rlp/LICENSE-MIT rename to LICENSE-MIT index cd8fdd2b9..b2d52b66d 100644 --- a/rlp/LICENSE-MIT +++ b/LICENSE-MIT @@ -1,4 +1,4 @@ -Copyright (c) 2015-2017 Parity Technologies +Copyright (c) 2015-2020 Parity Technologies Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/contract-address/CHANGELOG.md b/contract-address/CHANGELOG.md index 927c9dc9c..546bc458e 100644 --- a/contract-address/CHANGELOG.md +++ b/contract-address/CHANGELOG.md @@ -5,3 +5,4 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/contract-address/Cargo.toml b/contract-address/Cargo.toml index 8b4f05682..21486e7a2 100644 --- a/contract-address/Cargo.toml +++ b/contract-address/Cargo.toml @@ -2,7 +2,7 @@ name = "contract-address" version = "0.3.0" authors = ["Parity Technologies "] -license = "MIT" +license = "MIT/Apache2.0" homepage = "https://github.com/paritytech/parity-common" repository = "https://github.com/paritytech/parity-common" description = "A utility crate to create an ethereum contract address" diff --git a/contract-address/src/lib.rs b/contract-address/src/lib.rs index 1d4068d7d..787f8b06f 100644 --- a/contract-address/src/lib.rs +++ b/contract-address/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. #![cfg_attr(feature = "external_doc", feature(external_doc))] #![cfg_attr(feature = "external_doc", doc(include = "../README.md"))] diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 1edf138b9..677e06714 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) +- License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.8.1] - 2019-10-24 ### Dependencies diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index f9f9960fb..cbf9112a0 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -3,7 +3,7 @@ name = "ethbloom" version = "0.8.1" authors = ["Parity Technologies "] description = "Ethereum bloom filter" -license = "MIT" +license = "MIT/Apache2.0" documentation = "https://docs.rs/ethbloom" homepage = "https://github.com/paritytech/parity-common" repository = "https://github.com/paritytech/parity-common" diff --git a/ethbloom/benches/bloom.rs b/ethbloom/benches/bloom.rs index 005cfd88f..f3de3b7b0 100644 --- a/ethbloom/benches/bloom.rs +++ b/ethbloom/benches/bloom.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use criterion::{criterion_group, criterion_main, Criterion}; use ethbloom::{Bloom, Input}; use hex_literal::hex; diff --git a/ethbloom/benches/unrolling.rs b/ethbloom/benches/unrolling.rs index 5fd6883ce..647528eb9 100644 --- a/ethbloom/benches/unrolling.rs +++ b/ethbloom/benches/unrolling.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use criterion::{criterion_group, criterion_main, Criterion}; use crunchy::unroll; use rand::RngCore; diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index 49a18214f..c12f5cab8 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + //! //! ``` //! use hex_literal::hex; diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 6fad76681..1e06ff2b0 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -5,5 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) + ### Added - uint error type is re-exported (https://github.com/paritytech/parity-common/pull/244) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 8153ea823..b706f2873 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -2,7 +2,7 @@ name = "ethereum-types" version = "0.8.0" authors = ["Parity Technologies "] -license = "MIT" +license = "MIT/Apache2.0" homepage = "https://github.com/paritytech/parity-common" description = "Ethereum types" edition = "2018" diff --git a/ethereum-types/src/hash.rs b/ethereum-types/src/hash.rs index 9fe385c5b..9e2057dec 100644 --- a/ethereum-types/src/hash.rs +++ b/ethereum-types/src/hash.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use crate::{U128, U256, U512, U64}; use fixed_hash::*; use impl_rlp::impl_fixed_hash_rlp; diff --git a/ethereum-types/src/lib.rs b/ethereum-types/src/lib.rs index 9bc756ba0..d94ae57c0 100644 --- a/ethereum-types/src/lib.rs +++ b/ethereum-types/src/lib.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![cfg_attr(not(feature = "std"), no_std)] mod hash; diff --git a/ethereum-types/src/uint.rs b/ethereum-types/src/uint.rs index 5cf35feae..87d09541c 100644 --- a/ethereum-types/src/uint.rs +++ b/ethereum-types/src/uint.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use impl_rlp::impl_uint_rlp; #[cfg(feature = "serialize")] use impl_serde::impl_uint_serde; diff --git a/ethereum-types/tests/serde.rs b/ethereum-types/tests/serde.rs index 4a92241cf..15796c3c7 100644 --- a/ethereum-types/tests/serde.rs +++ b/ethereum-types/tests/serde.rs @@ -1,4 +1,4 @@ -// Copyright 2019 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/fixed-hash/CHANGELOG.md b/fixed-hash/CHANGELOG.md index adf179a5e..cfea43299 100644 --- a/fixed-hash/CHANGELOG.md +++ b/fixed-hash/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) +- License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.5.2] - 2019-12-19 ### Fixed diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index c12d35c57..41bf7d237 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -2,7 +2,7 @@ name = "fixed-hash" version = "0.5.2" authors = ["Parity Technologies "] -license = "MIT" +license = "MIT/Apache2.0" homepage = "https://github.com/paritytech/parity-common" repository = "https://github.com/paritytech/parity-common" description = "Macros to define custom fixed-size hash types" diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 46f5c7748..e7fdf0e4c 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/fixed-hash/src/lib.rs b/fixed-hash/src/lib.rs index 027ba2284..748101c9f 100644 --- a/fixed-hash/src/lib.rs +++ b/fixed-hash/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/fixed-hash/src/tests.rs b/fixed-hash/src/tests.rs index da14bed13..33c1956b7 100644 --- a/fixed-hash/src/tests.rs +++ b/fixed-hash/src/tests.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + construct_fixed_hash! { pub struct H32(4); } construct_fixed_hash! { pub struct H64(8); } construct_fixed_hash! { pub struct H128(16); } diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index c1208e434..4b6c6795a 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.4.1] - 2019-10-24 ### Dependencies diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 3d7d88943..49c9506dd 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -5,7 +5,7 @@ description = "`keccak-hash` is a set of utility functions to facilitate working authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" readme = "README.md" -license = "GPL-3.0" +license = "MIT/Apache-2.0" edition = "2018" [dependencies] diff --git a/keccak-hash/benches/keccak_256.rs b/keccak-hash/benches/keccak_256.rs index 5c5794bf5..97e9ee13d 100644 --- a/keccak-hash/benches/keccak_256.rs +++ b/keccak-hash/benches/keccak_256.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use criterion::{black_box, criterion_group, criterion_main, Criterion}; use keccak_hash::keccak; diff --git a/keccak-hash/src/lib.rs b/keccak-hash/src/lib.rs index e66650b39..e9f410672 100644 --- a/keccak-hash/src/lib.rs +++ b/keccak-hash/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index f2f80a709..cb6163d43 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.4.0] - 2019-02-05 - Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 954a1e117..24d6a9991 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -4,7 +4,7 @@ version = "0.4.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" -license = "GPL-3.0" +license = "MIT/Apache-2.0" edition = "2018" [dependencies] diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 1f40d24cc..51bac41f0 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; use parity_util_mem::MallocSizeOf; diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 10710343c..ac4c2d558 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.5.0] - 2019-02-05 - Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332 diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 2f67b8d99..1a51844e9 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -4,7 +4,7 @@ version = "0.5.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" -license = "GPL-3.0" +license = "MIT/Apache-2.0" edition = "2018" [[bench]] diff --git a/kvdb-rocksdb/benches/bench_read_perf.rs b/kvdb-rocksdb/benches/bench_read_perf.rs index 3b4cde3cf..8c17d8981 100644 --- a/kvdb-rocksdb/benches/bench_read_perf.rs +++ b/kvdb-rocksdb/benches/bench_read_perf.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Benchmark RocksDB read performance. //! The benchmark setup consists in writing `NEEDLES * NEEDLES_TO_HAYSTACK_RATIO` 32-bytes random diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs index 881aa724e..71482f446 100644 --- a/kvdb-rocksdb/src/iter.rs +++ b/kvdb-rocksdb/src/iter.rs @@ -1,18 +1,10 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! This module contains an implementation of a RocksDB iterator //! wrapped inside a `RwLock`. Since `RwLock` "owns" the inner data, diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 681883c01..42ee1388c 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. mod iter; mod stats; diff --git a/kvdb-rocksdb/src/stats.rs b/kvdb-rocksdb/src/stats.rs index 039dd3a88..80fa85ce2 100644 --- a/kvdb-rocksdb/src/stats.rs +++ b/kvdb-rocksdb/src/stats.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use parking_lot::RwLock; use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index 91259e851..367eef8b6 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -4,7 +4,7 @@ version = "0.2.0" authors = ["Parity Technologies "] edition = "2018" description = "Shared tests for kvdb functionality, to be executed against actual implementations" -license = "GPL-3.0" +license = "MIT/Apache2.0" [dependencies] kvdb = { path = "../kvdb", version = "0.4" } diff --git a/kvdb-shared-tests/src/lib.rs b/kvdb-shared-tests/src/lib.rs index 28613c4f3..ca82dc8cd 100644 --- a/kvdb-shared-tests/src/lib.rs +++ b/kvdb-shared-tests/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Shared tests for kvdb functionality, to be executed against actual implementations. diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index 69d10f44a..f967e1838 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.4.0] - 2019-02-05 - Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index a686f6b26..284418ca5 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" documentation = "https://docs.rs/kvdb-web/" -license = "GPL-3.0" +license = "MIT/Apache2.0" edition = "2018" [dependencies] diff --git a/kvdb-web/src/error.rs b/kvdb-web/src/error.rs index cd3916174..e88d44636 100644 --- a/kvdb-web/src/error.rs +++ b/kvdb-web/src/error.rs @@ -1,18 +1,10 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Errors that can occur when working with IndexedDB. diff --git a/kvdb-web/src/indexed_db.rs b/kvdb-web/src/indexed_db.rs index 028b60a0a..4593e3007 100644 --- a/kvdb-web/src/indexed_db.rs +++ b/kvdb-web/src/indexed_db.rs @@ -1,18 +1,10 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Utility functions to interact with IndexedDB browser API. diff --git a/kvdb-web/src/lib.rs b/kvdb-web/src/lib.rs index f73426904..5d9878ec2 100644 --- a/kvdb-web/src/lib.rs +++ b/kvdb-web/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! A key-value database for use in browsers //! diff --git a/kvdb-web/tests/indexed_db.rs b/kvdb-web/tests/indexed_db.rs index fe5d8f6a3..81a765a05 100644 --- a/kvdb-web/tests/indexed_db.rs +++ b/kvdb-web/tests/indexed_db.rs @@ -1,18 +1,10 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! IndexedDB tests. diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index e202aaa7a..ccec27833 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.4.0] - 2019-01-06 - Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index e6d738877..c955920a9 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -4,7 +4,7 @@ version = "0.4.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" -license = "GPL-3.0" +license = "MIT/Apache-2.0" edition = "2018" [dependencies] diff --git a/kvdb/src/io_stats.rs b/kvdb/src/io_stats.rs index d0de5ce36..2f10dc707 100644 --- a/kvdb/src/io_stats.rs +++ b/kvdb/src/io_stats.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Generic statistics for key-value databases diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index afda4af87..6382145ce 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Key-Value store abstraction. diff --git a/rlp/license-header b/license-header similarity index 87% rename from rlp/license-header rename to license-header index 03df169c8..67d3f3a1a 100644 --- a/rlp/license-header +++ b/license-header @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/parity-bytes/CHANGELOG.md b/parity-bytes/CHANGELOG.md index cc79bd068..c391629fd 100644 --- a/parity-bytes/CHANGELOG.md +++ b/parity-bytes/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.1.1] - 2019-10-24 ### Dependencies diff --git a/parity-bytes/Cargo.toml b/parity-bytes/Cargo.toml index e49adce31..582d6560e 100644 --- a/parity-bytes/Cargo.toml +++ b/parity-bytes/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "byte utilities for Parity" -license = "GPL-3.0" +license = "MIT/Apache2.0" edition = "2018" [dependencies] diff --git a/parity-bytes/src/lib.rs b/parity-bytes/src/lib.rs index 292a6e160..e269776d4 100644 --- a/parity-bytes/src/lib.rs +++ b/parity-bytes/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! General bytes-related utilities. //! diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index 591ee42d3..debca1116 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.5.0] - 2020-02-08 - Remove `inv()` from `SecretKey` (breaking) (https://github.com/paritytech/parity-common/pull/258) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index aa5d7b654..0af20678b 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -4,7 +4,7 @@ version = "0.5.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Crypto utils used by ethstore and network." -license = "GPL-3.0" +license = "MIT/Apache-2.0" autobenches = false edition = "2018" diff --git a/parity-crypto/benches/bench.rs b/parity-crypto/benches/bench.rs index 6c13aa369..562c50148 100644 --- a/parity-crypto/benches/bench.rs +++ b/parity-crypto/benches/bench.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use crate::parity_crypto::publickey::Generator; use criterion::{criterion_group, criterion_main, Bencher, Criterion}; diff --git a/parity-crypto/src/aes.rs b/parity-crypto/src/aes.rs index de643dceb..120e4ef3f 100644 --- a/parity-crypto/src/aes.rs +++ b/parity-crypto/src/aes.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use aes::block_cipher_trait::generic_array::GenericArray; use aes::{Aes128, Aes256}; diff --git a/parity-crypto/src/digest.rs b/parity-crypto/src/digest.rs index caf57dd0c..9ec5edf56 100644 --- a/parity-crypto/src/digest.rs +++ b/parity-crypto/src/digest.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use std::marker::PhantomData; use std::ops::Deref; diff --git a/parity-crypto/src/error.rs b/parity-crypto/src/error.rs index 16d67f504..e1a3cceb0 100644 --- a/parity-crypto/src/error.rs +++ b/parity-crypto/src/error.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use std::{error::Error as StdError, fmt, result}; diff --git a/parity-crypto/src/hmac/mod.rs b/parity-crypto/src/hmac/mod.rs index 2b082dd45..6d606fdd7 100644 --- a/parity-crypto/src/hmac/mod.rs +++ b/parity-crypto/src/hmac/mod.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use std::marker::PhantomData; use std::ops::Deref; diff --git a/parity-crypto/src/hmac/test.rs b/parity-crypto/src/hmac/test.rs index 1633cf37c..a13174784 100644 --- a/parity-crypto/src/hmac/test.rs +++ b/parity-crypto/src/hmac/test.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use super::*; use hex_literal::hex; diff --git a/parity-crypto/src/lib.rs b/parity-crypto/src/lib.rs index c6bcb5251..c3049716c 100644 --- a/parity-crypto/src/lib.rs +++ b/parity-crypto/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Crypto utils used by ethstore and network. diff --git a/parity-crypto/src/pbkdf2/mod.rs b/parity-crypto/src/pbkdf2/mod.rs index 099e98893..a3a06e867 100644 --- a/parity-crypto/src/pbkdf2/mod.rs +++ b/parity-crypto/src/pbkdf2/mod.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. pub struct Salt<'a>(pub &'a [u8]); pub struct Secret<'a>(pub &'a [u8]); diff --git a/parity-crypto/src/pbkdf2/test.rs b/parity-crypto/src/pbkdf2/test.rs index ff8cc685c..b0bed84ad 100644 --- a/parity-crypto/src/pbkdf2/test.rs +++ b/parity-crypto/src/pbkdf2/test.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use super::*; diff --git a/parity-crypto/src/publickey/ec_math_utils.rs b/parity-crypto/src/publickey/ec_math_utils.rs index bd8653b79..5ea82a78a 100644 --- a/parity-crypto/src/publickey/ec_math_utils.rs +++ b/parity-crypto/src/publickey/ec_math_utils.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Multiple primitives for work with public and secret keys and with secp256k1 curve points diff --git a/parity-crypto/src/publickey/ecdh.rs b/parity-crypto/src/publickey/ecdh.rs index 8cdaf793a..93a43bed8 100644 --- a/parity-crypto/src/publickey/ecdh.rs +++ b/parity-crypto/src/publickey/ecdh.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! ECDH key agreement scheme implemented as a free function. diff --git a/parity-crypto/src/publickey/ecdsa_signature.rs b/parity-crypto/src/publickey/ecdsa_signature.rs index ae245cffb..6853b1586 100644 --- a/parity-crypto/src/publickey/ecdsa_signature.rs +++ b/parity-crypto/src/publickey/ecdsa_signature.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Signature based on ECDSA, algorithm's description: https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm diff --git a/parity-crypto/src/publickey/ecies.rs b/parity-crypto/src/publickey/ecies.rs index d7c1354f3..3332f8a94 100644 --- a/parity-crypto/src/publickey/ecies.rs +++ b/parity-crypto/src/publickey/ecies.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Functions for ECIES scheme encryption and decryption diff --git a/parity-crypto/src/publickey/error.rs b/parity-crypto/src/publickey/error.rs index 7f9bfb8e0..5ea8ce391 100644 --- a/parity-crypto/src/publickey/error.rs +++ b/parity-crypto/src/publickey/error.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Module specific errors. diff --git a/parity-crypto/src/publickey/extended_keys.rs b/parity-crypto/src/publickey/extended_keys.rs index 44307fdf2..be682db39 100644 --- a/parity-crypto/src/publickey/extended_keys.rs +++ b/parity-crypto/src/publickey/extended_keys.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Secret, public keys extended with the entropy (aka chain code), that allows further key derivation //! Each extended key has 2^31 normal child keys, and 2^31 hardened child keys. diff --git a/parity-crypto/src/publickey/keypair.rs b/parity-crypto/src/publickey/keypair.rs index d4fa13b39..f4ac8b287 100644 --- a/parity-crypto/src/publickey/keypair.rs +++ b/parity-crypto/src/publickey/keypair.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Key pair (public + secret) description. diff --git a/parity-crypto/src/publickey/keypair_generator.rs b/parity-crypto/src/publickey/keypair_generator.rs index 3afd86a97..9dea21de6 100644 --- a/parity-crypto/src/publickey/keypair_generator.rs +++ b/parity-crypto/src/publickey/keypair_generator.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Random key pair generator. Relies on the secp256k1 C-library to generate random data. diff --git a/parity-crypto/src/publickey/mod.rs b/parity-crypto/src/publickey/mod.rs index 8d487b5ad..294d67b38 100644 --- a/parity-crypto/src/publickey/mod.rs +++ b/parity-crypto/src/publickey/mod.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Submodule of crypto utils for working with public key crypto primitives //! If you are looking for git history please refer to the `ethkey` crate in the `parity-ethereum` repository. diff --git a/parity-crypto/src/publickey/secret_key.rs b/parity-crypto/src/publickey/secret_key.rs index 68522ca29..ac938a674 100644 --- a/parity-crypto/src/publickey/secret_key.rs +++ b/parity-crypto/src/publickey/secret_key.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Secret key implementation. diff --git a/parity-crypto/src/scrypt.rs b/parity-crypto/src/scrypt.rs index eea1931b3..15b7e14e1 100644 --- a/parity-crypto/src/scrypt.rs +++ b/parity-crypto/src/scrypt.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use super::{KEY_LENGTH, KEY_LENGTH_AES}; use crate::error::ScryptError; diff --git a/parity-path/CHANGELOG.md b/parity-path/CHANGELOG.md index 927c9dc9c..fc0263835 100644 --- a/parity-path/CHANGELOG.md +++ b/parity-path/CHANGELOG.md @@ -5,3 +5,4 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/parity-path/Cargo.toml b/parity-path/Cargo.toml index a4096fa73..0c783cf6c 100644 --- a/parity-path/Cargo.toml +++ b/parity-path/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Path utilities" -license = "GPL-3.0" +license = "MIT/Apache-2.0" edition = "2018" [dependencies] diff --git a/parity-path/src/lib.rs b/parity-path/src/lib.rs index c286678ba..997a040be 100644 --- a/parity-path/src/lib.rs +++ b/parity-path/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Path utilities use std::path::Path; diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 855ea42fe..9e99d4c0d 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.5.1] - 2019-02-05 - Add different mode for malloc_size_of_is_0 macro dealing with generics #334. [#332](https://github.com/paritytech/parity-common/pull/334) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index d9024ea1c..e0539b23c 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -4,7 +4,7 @@ version = "0.5.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" -license = "GPL-3.0" +license = "MIT/Apache-2.0" edition = "2018" [dependencies] @@ -49,4 +49,4 @@ mimalloc-global = ["mimallocator", "mimalloc-sys"] # implement additional types ethereum-impls = ["ethereum-types", "primitive-types"] # Full estimate: no call to allocator -estimate-heapsize = [] \ No newline at end of file +estimate-heapsize = [] diff --git a/parity-util-mem/derive/Cargo.toml b/parity-util-mem/derive/Cargo.toml index cc208049d..b7648ebd2 100644 --- a/parity-util-mem/derive/Cargo.toml +++ b/parity-util-mem/derive/Cargo.toml @@ -2,7 +2,7 @@ name = "parity-util-mem-derive" version = "0.1.0" authors = ["Parity Technologies "] -license = "MIT" +license = "MIT/Apache2.0" description = "Crate for memory reporting" repository = "https://github.com/paritytech/pariry-common/parity-util-mem/derive" diff --git a/parity-util-mem/derive/lib.rs b/parity-util-mem/derive/lib.rs index c1c1e504e..2331b0aaa 100644 --- a/parity-util-mem/derive/lib.rs +++ b/parity-util-mem/derive/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! A crate for deriving the MallocSizeOf trait. //! diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs index d2953ecfb..e159a32d0 100644 --- a/parity-util-mem/src/allocators.rs +++ b/parity-util-mem/src/allocators.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! default allocator management //! Features are: diff --git a/parity-util-mem/src/ethereum_impls.rs b/parity-util-mem/src/ethereum_impls.rs index 4379b3b0e..c296d2d40 100644 --- a/parity-util-mem/src/ethereum_impls.rs +++ b/parity-util-mem/src/ethereum_impls.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Implementation of `MallocSize` for common ethereum types: fixed hashes //! and uints. diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index cdea52e42..87b47716a 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Crate for parity memory management related utilities. //! It includes global allocator choice, heap measurement and diff --git a/parity-util-mem/src/primitives_impls.rs b/parity-util-mem/src/primitives_impls.rs index ab5953dcc..cf98bc211 100644 --- a/parity-util-mem/src/primitives_impls.rs +++ b/parity-util-mem/src/primitives_impls.rs @@ -1,18 +1,10 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Implementation of `MallocSize` primitive types. diff --git a/parity-util-mem/src/sizeof.rs b/parity-util-mem/src/sizeof.rs index ef63e1000..3d60913e4 100644 --- a/parity-util-mem/src/sizeof.rs +++ b/parity-util-mem/src/sizeof.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Estimation for heapsize calculation. Usable to replace call to allocator method (for some //! allocators or simply because we just need a deterministic cunsumption measurement). diff --git a/parity-util-mem/tests/derive.rs b/parity-util-mem/tests/derive.rs index 87f8c9f50..4fb5f7328 100644 --- a/parity-util-mem/tests/derive.rs +++ b/parity-util-mem/tests/derive.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. #![cfg(feature = "std")] diff --git a/plain_hasher/CHANGELOG.md b/plain_hasher/CHANGELOG.md index a4bd19586..c9970dedd 100644 --- a/plain_hasher/CHANGELOG.md +++ b/plain_hasher/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.2.2] - 2019-10-24 - Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/213) diff --git a/plain_hasher/Cargo.toml b/plain_hasher/Cargo.toml index 2a7f94989..57cf4d0c1 100644 --- a/plain_hasher/Cargo.toml +++ b/plain_hasher/Cargo.toml @@ -3,7 +3,7 @@ name = "plain_hasher" description = "Hasher for 32-byte keys." version = "0.2.2" authors = ["Parity Technologies "] -license = "MIT" +license = "MIT/Apache2.0" keywords = ["hash", "hasher"] homepage = "https://github.com/paritytech/parity-common" categories = ["no-std"] diff --git a/plain_hasher/benches/bench.rs b/plain_hasher/benches/bench.rs index 4ba53bb1a..e14d9d7d7 100644 --- a/plain_hasher/benches/bench.rs +++ b/plain_hasher/benches/bench.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use std::collections::hash_map::DefaultHasher; use std::hash::Hasher; diff --git a/plain_hasher/src/lib.rs b/plain_hasher/src/lib.rs index 3665995d4..4da4a508b 100644 --- a/plain_hasher/src/lib.rs +++ b/plain_hasher/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 3131a001f..e6b53391c 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -2,7 +2,7 @@ name = "primitive-types" version = "0.6.2" authors = ["Parity Technologies "] -license = "Apache-2.0/MIT" +license = "MIT/Apache2.0" homepage = "https://github.com/paritytech/parity-common" description = "Primitive types shared by Ethereum and Substrate" edition = "2018" diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index 12fda74f2..4b39c10af 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -2,7 +2,7 @@ name = "impl-codec" version = "0.4.2" authors = ["Parity Technologies "] -license = "Apache-2.0/MIT" +license = "MIT/Apache2.0" homepage = "https://github.com/paritytech/parity-common" description = "Parity Codec serialization support for uint and fixed hash." edition = "2018" diff --git a/primitive-types/impls/codec/src/lib.rs b/primitive-types/impls/codec/src/lib.rs index 9e5714ce0..1a4f2e252 100644 --- a/primitive-types/impls/codec/src/lib.rs +++ b/primitive-types/impls/codec/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2018 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/primitive-types/impls/rlp/Cargo.toml b/primitive-types/impls/rlp/Cargo.toml index 62e957c85..85db3c328 100644 --- a/primitive-types/impls/rlp/Cargo.toml +++ b/primitive-types/impls/rlp/Cargo.toml @@ -2,7 +2,7 @@ name = "impl-rlp" version = "0.2.1" authors = ["Parity Technologies "] -license = "Apache-2.0/MIT" +license = "MIT/Apache2.0" homepage = "https://github.com/paritytech/parity-common" description = "RLP serialization support for uint and fixed hash." edition = "2018" diff --git a/primitive-types/impls/rlp/src/lib.rs b/primitive-types/impls/rlp/src/lib.rs index 16a711370..e542c6e6e 100644 --- a/primitive-types/impls/rlp/src/lib.rs +++ b/primitive-types/impls/rlp/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2018 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index a57ada2a2..641a59e6a 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -3,7 +3,7 @@ name = "impl-serde" version = "0.3.0" authors = ["Parity Technologies "] edition = "2018" -license = "Apache-2.0/MIT" +license = "MIT/Apache2.0" homepage = "https://github.com/paritytech/parity-common" description = "Serde serialization support for uint and fixed hash." diff --git a/primitive-types/impls/serde/benches/impl_serde.rs b/primitive-types/impls/serde/benches/impl_serde.rs index d19a97fda..c7a1efea8 100644 --- a/primitive-types/impls/serde/benches/impl_serde.rs +++ b/primitive-types/impls/serde/benches/impl_serde.rs @@ -1,4 +1,4 @@ -// Copyright 2019 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/primitive-types/impls/serde/benches/input.rs b/primitive-types/impls/serde/benches/input.rs index 00d5efdc5..5673f1f52 100644 --- a/primitive-types/impls/serde/benches/input.rs +++ b/primitive-types/impls/serde/benches/input.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + /// Hexdecimal string 64 chars (32 bytes) pub const HEX_64_CHARS: &str = "\"0x6402541b4e3c2ab65306aec48fce5adedc60e3ac465c3d7036c731e0b2e49209\""; diff --git a/primitive-types/impls/serde/src/lib.rs b/primitive-types/impls/serde/src/lib.rs index 661ff7c0e..500a60cc4 100644 --- a/primitive-types/impls/serde/src/lib.rs +++ b/primitive-types/impls/serde/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2019 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index 431a56ec7..542ac0dc8 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2019 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index 5e3f77ec4..0b4af97c8 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2018 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp/benches/rlp.rs b/rlp/benches/rlp.rs index 1fcd8b21f..d1de4c93b 100644 --- a/rlp/benches/rlp.rs +++ b/rlp/benches/rlp.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp/src/error.rs b/rlp/src/error.rs index d810130b0..a965e5626 100644 --- a/rlp/src/error.rs +++ b/rlp/src/error.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp/src/impls.rs b/rlp/src/impls.rs index 4f30b8a59..c4815019f 100644 --- a/rlp/src/impls.rs +++ b/rlp/src/impls.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp/src/lib.rs b/rlp/src/lib.rs index a4c66e2ac..3a913d69f 100644 --- a/rlp/src/lib.rs +++ b/rlp/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp/src/rlpin.rs b/rlp/src/rlpin.rs index 6cdfaa808..319723c06 100644 --- a/rlp/src/rlpin.rs +++ b/rlp/src/rlpin.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp/src/stream.rs b/rlp/src/stream.rs index 851b845b9..14983d5ef 100644 --- a/rlp/src/stream.rs +++ b/rlp/src/stream.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp/src/traits.rs b/rlp/src/traits.rs index 13531a1b6..b5dfa2764 100644 --- a/rlp/src/traits.rs +++ b/rlp/src/traits.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index 6f51b9bd3..ac8e8d951 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/trace-time/CHANGELOG.md b/trace-time/CHANGELOG.md index 0231c53a1..0da35007c 100644 --- a/trace-time/CHANGELOG.md +++ b/trace-time/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.1.2] - 2019-10-24 - Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/232) diff --git a/trace-time/Cargo.toml b/trace-time/Cargo.toml index f1ec6e9a1..fca5a7619 100644 --- a/trace-time/Cargo.toml +++ b/trace-time/Cargo.toml @@ -4,7 +4,7 @@ description = "Easily trace time to execute a scope." version = "0.1.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" -license = "GPL-3.0" +license = "MIT/Apache2.0" edition = "2018" [dependencies] diff --git a/trace-time/src/lib.rs b/trace-time/src/lib.rs index 65769ee81..e5ecf2d09 100644 --- a/trace-time/src/lib.rs +++ b/trace-time/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Performance timer with logging diff --git a/transaction-pool/CHANGELOG.md b/transaction-pool/CHANGELOG.md index bcd99d5a9..a054f332b 100644 --- a/transaction-pool/CHANGELOG.md +++ b/transaction-pool/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [2.0.2] - 2019-10-24 - Updated to 2018 edition idioms (https://github.com/paritytech/parity-common/pull/237) diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index 844607efa..0da12d924 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -2,7 +2,7 @@ description = "Generic transaction pool." name = "transaction-pool" version = "2.0.2" -license = "GPL-3.0" +license = "MIT/Apache2.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" edition = "2018" diff --git a/transaction-pool/src/error.rs b/transaction-pool/src/error.rs index 348082f6a..20935c23b 100644 --- a/transaction-pool/src/error.rs +++ b/transaction-pool/src/error.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use std::{error, fmt, result}; diff --git a/transaction-pool/src/lib.rs b/transaction-pool/src/lib.rs index 66e93fffe..dd49fb3a8 100644 --- a/transaction-pool/src/lib.rs +++ b/transaction-pool/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Generic Transaction Pool //! diff --git a/transaction-pool/src/listener.rs b/transaction-pool/src/listener.rs index 566b318ee..5a3f1a0c7 100644 --- a/transaction-pool/src/listener.rs +++ b/transaction-pool/src/listener.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use crate::error::Error; use std::{ diff --git a/transaction-pool/src/options.rs b/transaction-pool/src/options.rs index 8e1c1002d..947af30a9 100644 --- a/transaction-pool/src/options.rs +++ b/transaction-pool/src/options.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. /// Transaction Pool options. #[derive(Clone, Debug, PartialEq)] diff --git a/transaction-pool/src/pool.rs b/transaction-pool/src/pool.rs index 63bb0a07f..2eb324020 100644 --- a/transaction-pool/src/pool.rs +++ b/transaction-pool/src/pool.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use log::{trace, warn}; use std::collections::{hash_map, BTreeSet, HashMap}; diff --git a/transaction-pool/src/ready.rs b/transaction-pool/src/ready.rs index 45e85ca29..009eae273 100644 --- a/transaction-pool/src/ready.rs +++ b/transaction-pool/src/ready.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. /// Transaction readiness. #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/transaction-pool/src/replace.rs b/transaction-pool/src/replace.rs index a278edb80..cbae6319b 100644 --- a/transaction-pool/src/replace.rs +++ b/transaction-pool/src/replace.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! When queue limits are reached, decide whether to replace an existing transaction from the pool diff --git a/transaction-pool/src/scoring.rs b/transaction-pool/src/scoring.rs index 313bd6bc3..b7f75e7fc 100644 --- a/transaction-pool/src/scoring.rs +++ b/transaction-pool/src/scoring.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! A transactions ordering abstraction. diff --git a/transaction-pool/src/status.rs b/transaction-pool/src/status.rs index b9e7656d4..615e40cb7 100644 --- a/transaction-pool/src/status.rs +++ b/transaction-pool/src/status.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. /// Light pool status. /// This status is cheap to compute and can be called frequently. diff --git a/transaction-pool/src/tests/helpers.rs b/transaction-pool/src/tests/helpers.rs index f757ac8d7..8f6e5fb99 100644 --- a/transaction-pool/src/tests/helpers.rs +++ b/transaction-pool/src/tests/helpers.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use std::cmp; use std::collections::HashMap; diff --git a/transaction-pool/src/tests/mod.rs b/transaction-pool/src/tests/mod.rs index db5ea2885..2d80b4a3d 100644 --- a/transaction-pool/src/tests/mod.rs +++ b/transaction-pool/src/tests/mod.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. mod helpers; mod tx_builder; diff --git a/transaction-pool/src/tests/tx_builder.rs b/transaction-pool/src/tests/tx_builder.rs index 83f7b13a0..d543e830e 100644 --- a/transaction-pool/src/tests/tx_builder.rs +++ b/transaction-pool/src/tests/tx_builder.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use super::{Address, Transaction, H256, U256}; use ethereum_types::BigEndianHash; diff --git a/transaction-pool/src/transactions.rs b/transaction-pool/src/transactions.rs index 5fc963d78..4d6d126af 100644 --- a/transaction-pool/src/transactions.rs +++ b/transaction-pool/src/transactions.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use std::{fmt, mem}; diff --git a/transaction-pool/src/verifier.rs b/transaction-pool/src/verifier.rs index 991b78ebd..d28e5a55e 100644 --- a/transaction-pool/src/verifier.rs +++ b/transaction-pool/src/verifier.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use crate::VerifiedTransaction; diff --git a/triehash/CHANGELOG.md b/triehash/CHANGELOG.md index 320e4f84f..071c96204 100644 --- a/triehash/CHANGELOG.md +++ b/triehash/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.8.2] - 2019-12-15 - Added no-std support (https://github.com/paritytech/parity-common/pull/280) diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index a941edaa0..407489469 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -4,7 +4,7 @@ version = "0.8.2" authors = ["Parity Technologies "] description = "In-memory patricia trie operations" repository = "https://github.com/paritytech/parity-common" -license = "GPL-3.0" +license = "MIT/Apache2.0" edition = "2018" [dependencies] diff --git a/triehash/benches/triehash.rs b/triehash/benches/triehash.rs index 684484265..14ce0dd6b 100644 --- a/triehash/benches/triehash.rs +++ b/triehash/benches/triehash.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. use criterion::{criterion_group, criterion_main, Criterion}; use ethereum_types::H256; diff --git a/triehash/src/lib.rs b/triehash/src/lib.rs index 81a005826..a60a24998 100644 --- a/triehash/src/lib.rs +++ b/triehash/src/lib.rs @@ -1,18 +1,10 @@ -// Copyright 2015-2019 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. //! Generetes trie root. //! diff --git a/uint/benches/bigint.rs b/uint/benches/bigint.rs index ea0284cdd..ec36c576a 100644 --- a/uint/benches/bigint.rs +++ b/uint/benches/bigint.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/uint/examples/modular.rs b/uint/examples/modular.rs index abc754f82..30b236992 100644 --- a/uint/examples/modular.rs +++ b/uint/examples/modular.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/uint/fuzz/fuzz_targets/div_mod.rs b/uint/fuzz/fuzz_targets/div_mod.rs index 7bcf751a8..102407ecc 100644 --- a/uint/fuzz/fuzz_targets/div_mod.rs +++ b/uint/fuzz/fuzz_targets/div_mod.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![no_main] use libfuzzer_sys::fuzz_target; diff --git a/uint/fuzz/fuzz_targets/div_mod_word.rs b/uint/fuzz/fuzz_targets/div_mod_word.rs index 890774c08..285304944 100644 --- a/uint/fuzz/fuzz_targets/div_mod_word.rs +++ b/uint/fuzz/fuzz_targets/div_mod_word.rs @@ -1,3 +1,11 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + #![no_main] use libfuzzer_sys::fuzz_target; diff --git a/uint/src/lib.rs b/uint/src/lib.rs index bba720be8..144c53e32 100644 --- a/uint/src/lib.rs +++ b/uint/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 7dd1bca42..ac0ffc85e 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index ddeb747c3..2e226ad36 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2019 Parity Technologies +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license From e5dd7d46155664f49b46ab865ac3427d27232d56 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Thu, 13 Feb 2020 14:00:06 +0100 Subject: [PATCH 092/359] prepare rlp-derive release (#344) * rlp-derive: unify license headers and prepare for publishing * unify changelog formats * parity-runtime: was published recently --- ethbloom/CHANGELOG.md | 2 +- ethereum-types/CHANGELOG.md | 4 ++-- fixed-hash/CHANGELOG.md | 4 ++-- keccak-hash/CHANGELOG.md | 4 ++-- kvdb-memorydb/CHANGELOG.md | 1 - kvdb-rocksdb/CHANGELOG.md | 22 +++++++++++----------- kvdb-shared-tests/CHANGELOG.md | 7 +++++++ kvdb-web/CHANGELOG.md | 2 +- kvdb/CHANGELOG.md | 6 +++--- parity-bytes/CHANGELOG.md | 6 +++--- parity-crypto/CHANGELOG.md | 16 ++++++++-------- parity-util-mem/CHANGELOG.md | 6 +++--- parity-util-mem/derive/CHANGELOG.md | 7 +++++++ plain_hasher/CHANGELOG.md | 6 +++--- primitive-types/CHANGELOG.md | 2 +- primitive-types/impls/serde/CHANGELOG.md | 2 +- rlp-derive/CHANGELOG.md | 2 +- rlp-derive/Cargo.toml | 2 +- rlp-derive/src/de.rs | 2 +- rlp-derive/src/en.rs | 2 +- rlp-derive/src/lib.rs | 2 +- rlp-derive/tests/rlp.rs | 2 +- rlp/CHANGELOG.md | 10 +++++----- runtime/CHANGELOG.md | 4 ++-- trace-time/CHANGELOG.md | 6 +++--- transaction-pool/CHANGELOG.md | 6 +++--- triehash/CHANGELOG.md | 8 ++++---- uint/CHANGELOG.md | 10 +++++----- 28 files changed, 83 insertions(+), 70 deletions(-) create mode 100644 kvdb-shared-tests/CHANGELOG.md create mode 100644 parity-util-mem/derive/CHANGELOG.md diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 677e06714..6c13f6f0a 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -10,4 +10,4 @@ The format is based on [Keep a Changelog]. ## [0.8.1] - 2019-10-24 ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 1e06ff2b0..6b214e75b 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ @@ -8,4 +8,4 @@ The format is based on [Keep a Changelog]. - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ### Added -- uint error type is re-exported (https://github.com/paritytech/parity-common/pull/244) +- Uint error type is re-exported. [#244](https://github.com/paritytech/parity-common/pull/244) diff --git a/fixed-hash/CHANGELOG.md b/fixed-hash/CHANGELOG.md index cfea43299..f486d7847 100644 --- a/fixed-hash/CHANGELOG.md +++ b/fixed-hash/CHANGELOG.md @@ -10,8 +10,8 @@ The format is based on [Keep a Changelog]. ## [0.5.2] - 2019-12-19 ### Fixed -- re-export `alloc` for both std and no-std to fix compilation (See [PR #268](https://github.com/paritytech/parity-common/pull/268)) +- Re-export `alloc` for both std and no-std to fix compilation. [#268](https://github.com/paritytech/parity-common/pull/268) ## [0.5.1] - 2019-10-24 ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index 4b6c6795a..429e3bc13 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ @@ -9,4 +9,4 @@ The format is based on [Keep a Changelog]. ## [0.4.1] - 2019-10-24 ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index cb6163d43..3da9b9203 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -19,7 +19,6 @@ The format is based on [Keep a Changelog]. ## [0.2.0] - 2019-12-19 ### Fixed - `iter_from_prefix` behaviour synced with the `kvdb-rocksdb` - ### Changed - Default column support removed from the API - Column argument type changed from `Option` to `u32` diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index ac4c2d558..dbebb5087 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -8,7 +8,7 @@ The format is based on [Keep a Changelog]. - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.5.0] - 2019-02-05 -- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332 +- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) ## [0.4.2] - 2019-02-04 ### Fixes @@ -22,10 +22,10 @@ The format is based on [Keep a Changelog]. - Support querying memory footprint via `MallocSizeOf` trait. [#292](https://github.com/paritytech/parity-common/pull/292) ## [0.3.0] - 2019-12-19 -- Use `get_pinned` API to save one allocation for each call to `get()` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) -- Rename `drop_column` to `remove_last_column` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) -- Rename `get_cf` to `cf` (See [PR #274](https://github.com/paritytech/parity-common/pull/274) for details) -- Default column support removed from the API (See [PR #278](https://github.com/paritytech/parity-common/pull/278) for details) +- Use `get_pinned` API to save one allocation for each call to `get()`. [#274](https://github.com/paritytech/parity-common/pull/274) +- Rename `drop_column` to `remove_last_column`. [#274](https://github.com/paritytech/parity-common/pull/274) +- Rename `get_cf` to `cf`. [#274](https://github.com/paritytech/parity-common/pull/274) +- Default column support removed from the API. [#278](https://github.com/paritytech/parity-common/pull/278) - Column argument type changed from `Option` to `u32` - Migration - Column index `None` -> unsupported, `Some(0)` -> `0`, `Some(1)` -> `1`, etc. @@ -33,16 +33,16 @@ The format is based on [Keep a Changelog]. - `DatabaseConfig::default()` defaults to 1 column - `Database::with_columns` still accepts `u32`, but panics if `0` is provided - `Database::open` panics if configuration with 0 columns is provided -- Add `num_keys(col)` to get an estimate of the number of keys in a column (See [PR #285](https://github.com/paritytech/parity-common/pull/285)). -- Remove `ElasticArray` and use the new `DBValue` (alias for `Vec`) and `DBKey` types from `kvdb`. (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) +- Add `num_keys(col)` to get an estimate of the number of keys in a column. [#285](https://github.com/paritytech/parity-common/pull/285) +- Remove `ElasticArray` and use the new `DBValue` (alias for `Vec`) and `DBKey` types from `kvdb`. [#282](https://github.com/paritytech/parity-common/pull/282) ## [0.2.0] - 2019-11-28 -- Switched away from using [parity-rocksdb](https://crates.io/crates/parity-rocksdb) in favour of upstream [rust-rocksdb](https://crates.io/crates/rocksdb) (see [PR #257](https://github.com/paritytech/parity-common/pull/257) for details) -- Revamped configuration handling, allowing per-column memory budgeting (see [PR #256](https://github.com/paritytech/parity-common/pull/256) for details) +- Switched away from using [parity-rocksdb](https://crates.io/crates/parity-rocksdb) in favour of upstream [rust-rocksdb](https://crates.io/crates/rocksdb). [#257](https://github.com/paritytech/parity-common/pull/257) +- Revamped configuration handling, allowing per-column memory budgeting. [#256](https://github.com/paritytech/parity-common/pull/256) ### Dependencies - rust-rocksdb v0.13 ## [0.1.6] - 2019-10-24 -- Updated to 2018 edition idioms (https://github.com/paritytech/parity-common/pull/237) +- Updated to 2018 edition idioms. [#237](https://github.com/paritytech/parity-common/pull/237) ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/kvdb-shared-tests/CHANGELOG.md b/kvdb-shared-tests/CHANGELOG.md new file mode 100644 index 000000000..545cf7dff --- /dev/null +++ b/kvdb-shared-tests/CHANGELOG.md @@ -0,0 +1,7 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index f967e1838..0d4a23ad9 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -24,4 +24,4 @@ The format is based on [Keep a Changelog]. ## [0.1.1] - 2019-10-24 ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index ccec27833..c7d38231b 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -22,10 +22,10 @@ The format is based on [Keep a Changelog]. - Default column support removed from the API - Column argument type changed from `Option` to `u32` - Migration `None` -> unsupported, `Some(0)` -> `0`, `Some(1)` -> `1`, etc. -- Remove `ElasticArray` and change `DBValue` to be a type alias for `Vec` and add a `DBKey` backed by a `SmallVec`. (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) +- Remove `ElasticArray` and change `DBValue` to be a type alias for `Vec` and add a `DBKey` backed by a `SmallVec`. [#282](https://github.com/paritytech/parity-common/pull/282) ## [0.1.1] - 2019-10-24 ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) ### Changed -- Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/205) +- Migrated to 2018 edition. [#205](https://github.com/paritytech/parity-common/pull/205) diff --git a/parity-bytes/CHANGELOG.md b/parity-bytes/CHANGELOG.md index c391629fd..ef8a2633d 100644 --- a/parity-bytes/CHANGELOG.md +++ b/parity-bytes/CHANGELOG.md @@ -9,6 +9,6 @@ The format is based on [Keep a Changelog]. ## [0.1.1] - 2019-10-24 ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) -### Added -- Added no-std support (https://github.com/paritytech/parity-common/pull/154) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) +### Added +- Added no-std support. [#154](https://github.com/paritytech/parity-common/pull/154) diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index debca1116..c955084f9 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ @@ -8,10 +8,10 @@ The format is based on [Keep a Changelog]. - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.5.0] - 2020-02-08 -- Remove `inv()` from `SecretKey` (breaking) (https://github.com/paritytech/parity-common/pull/258) -- `Generate::generate()` does not return error (https://github.com/paritytech/parity-common/pull/258) -- `Secp256k1` is no longer exported (https://github.com/paritytech/parity-common/pull/258) -- Remove `public_is_valid()` as it is now impossible to create invalid public keys (https://github.com/paritytech/parity-common/pull/258) -- 0-valued `Secp::Message`s are disallowed (signatures on them are forgeable for all keys) (https://github.com/paritytech/parity-common/pull/258) -- Switch to upstream `rust-secp256k1` at v0.17.2 (https://github.com/paritytech/parity-common/pull/258) -- make `rustc_hex` dependency optional (https://github.com/paritytech/parity-common/pull/337) +- Remove `inv()` from `SecretKey` (breaking). [#258](https://github.com/paritytech/parity-common/pull/258) +- `Generate::generate()` does not return error. [#258](https://github.com/paritytech/parity-common/pull/258) +- `Secp256k1` is no longer exported. [#258](https://github.com/paritytech/parity-common/pull/258) +- Remove `public_is_valid()` as it is now impossible to create invalid public keys. [#258](https://github.com/paritytech/parity-common/pull/258) +- 0-valued `Secp::Message`s are disallowed (signatures on them are forgeable for all keys). [#258](https://github.com/paritytech/parity-common/pull/258) +- Switch to upstream `rust-secp256k1` at v0.17.2. [#258](https://github.com/paritytech/parity-common/pull/258) +- make `rustc_hex` dependency optional. [#337](https://github.com/paritytech/parity-common/pull/337) diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 9e99d4c0d..dbab86b6e 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -8,7 +8,7 @@ The format is based on [Keep a Changelog]. - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.5.1] - 2019-02-05 -- Add different mode for malloc_size_of_is_0 macro dealing with generics #334. [#332](https://github.com/paritytech/parity-common/pull/334) +- Add different mode for malloc_size_of_is_0 macro dealing with generics. [#334](https://github.com/paritytech/parity-common/pull/334) ## [0.5.0] - 2019-02-05 - Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) @@ -27,8 +27,8 @@ The format is based on [Keep a Changelog]. - Added default implementation of `MallocSizeOf` for tuples up to 12. [#300](https://github.com/paritytech/parity-common/pull/300) ## [0.3.0] - 2019-12-19 -- Remove `MallocSizeOf` impls for `ElasticArray` and implement it for `SmallVec` (32 and 36). (See [PR #282](https://github.com/paritytech/parity-common/pull/282/files)) +- Remove `MallocSizeOf` impls for `ElasticArray` and implement it for `SmallVec` (32 and 36). [#282](https://github.com/paritytech/parity-common/pull/282) ## [0.2.1] - 2019-10-24 ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/parity-util-mem/derive/CHANGELOG.md b/parity-util-mem/derive/CHANGELOG.md new file mode 100644 index 000000000..545cf7dff --- /dev/null +++ b/parity-util-mem/derive/CHANGELOG.md @@ -0,0 +1,7 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] diff --git a/plain_hasher/CHANGELOG.md b/plain_hasher/CHANGELOG.md index c9970dedd..afc302090 100644 --- a/plain_hasher/CHANGELOG.md +++ b/plain_hasher/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ @@ -8,6 +8,6 @@ The format is based on [Keep a Changelog]. - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.2.2] - 2019-10-24 -- Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/213) +- Migrated to 2018 edition. [#213](https://github.com/paritytech/parity-common/pull/213) ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 87ee03e6f..0689eab5a 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -12,4 +12,4 @@ The format is based on [Keep a Changelog]. ## [0.6.1] - 2019-10-24 ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/primitive-types/impls/serde/CHANGELOG.md b/primitive-types/impls/serde/CHANGELOG.md index a63cfb2f1..e58aeb12c 100644 --- a/primitive-types/impls/serde/CHANGELOG.md +++ b/primitive-types/impls/serde/CHANGELOG.md @@ -8,4 +8,4 @@ The format is based on [Keep a Changelog]. ## [0.2.3] - 2019-10-29 ### Fixed -- Fixed a bug in empty slice serialization (https://github.com/paritytech/parity-common/pull/253) +- Fixed a bug in empty slice serialization. [#253](https://github.com/paritytech/parity-common/pull/253) diff --git a/rlp-derive/CHANGELOG.md b/rlp-derive/CHANGELOG.md index 592d3fbf9..d7b344b76 100644 --- a/rlp-derive/CHANGELOG.md +++ b/rlp-derive/CHANGELOG.md @@ -6,5 +6,5 @@ The format is based on [Keep a Changelog]. ## [Unreleased] -## [0.1.0] - 2020-02-12 +## [0.1.0] - 2020-02-13 - Extracted from parity-ethereum repo. [#343](https://github.com/paritytech/parity-common/pull/343) diff --git a/rlp-derive/Cargo.toml b/rlp-derive/Cargo.toml index bf4d0eaf0..de3824a2f 100644 --- a/rlp-derive/Cargo.toml +++ b/rlp-derive/Cargo.toml @@ -16,4 +16,4 @@ quote = "1.0.2" proc-macro2 = "1.0.8" [dev-dependencies] -rlp = "0.4.0" +rlp = "0.4.4" diff --git a/rlp-derive/src/de.rs b/rlp-derive/src/de.rs index d1b4e4ca5..490c1a7f4 100644 --- a/rlp-derive/src/de.rs +++ b/rlp-derive/src/de.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp-derive/src/en.rs b/rlp-derive/src/en.rs index 9eb0d6afb..631862429 100644 --- a/rlp-derive/src/en.rs +++ b/rlp-derive/src/en.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp-derive/src/lib.rs b/rlp-derive/src/lib.rs index 47efd2ffe..6ec9cb510 100644 --- a/rlp-derive/src/lib.rs +++ b/rlp-derive/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp-derive/tests/rlp.rs b/rlp-derive/tests/rlp.rs index e3cda4dbc..fd564dc7d 100644 --- a/rlp-derive/tests/rlp.rs +++ b/rlp-derive/tests/rlp.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. +// Copyright 2020 Parity Technologies // // Licensed under the Apache License, Version 2.0 or the MIT license diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index e0a32ca9b..c817dc6b9 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ @@ -8,12 +8,12 @@ The format is based on [Keep a Changelog]. ## [0.4.4] - 2019-11-20 ### Added -- Method `Rlp::at_with_offset` (https://github.com/paritytech/parity-common/pull/269) +- Method `Rlp::at_with_offset`. [#269](https://github.com/paritytech/parity-common/pull/269) ## [0.4.3] - 2019-10-24 ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) ### Fixed -- Fixed nested unbounded lists (https://github.com/paritytech/parity-common/pull/203) +- Fixed nested unbounded lists. [#203](https://github.com/paritytech/parity-common/pull/203) ### Added -- Added no-std support (https://github.com/paritytech/parity-common/pull/206) +- Added no-std support. [#206](https://github.com/paritytech/parity-common/pull/206) diff --git a/runtime/CHANGELOG.md b/runtime/CHANGELOG.md index 59d2c7ffe..e73fb6f28 100644 --- a/runtime/CHANGELOG.md +++ b/runtime/CHANGELOG.md @@ -6,6 +6,6 @@ The format is based on [Keep a Changelog]. ## [Unreleased] -## [0.1.1] - 2019-11-25 +## [0.1.1] - 2020-02-11 ### Changed -- Moved to parity common repo, prepared for publishing (https://github.com/paritytech/parity-common/pull/271) +- Moved to parity common repo, prepared for publishing. [#271](https://github.com/paritytech/parity-common/pull/271) diff --git a/trace-time/CHANGELOG.md b/trace-time/CHANGELOG.md index 0da35007c..300ce6461 100644 --- a/trace-time/CHANGELOG.md +++ b/trace-time/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ @@ -8,6 +8,6 @@ The format is based on [Keep a Changelog]. - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.1.2] - 2019-10-24 -- Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/232) +- Migrated to 2018 edition. [#232](https://github.com/paritytech/parity-common/pull/232) ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/transaction-pool/CHANGELOG.md b/transaction-pool/CHANGELOG.md index a054f332b..431effd94 100644 --- a/transaction-pool/CHANGELOG.md +++ b/transaction-pool/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ @@ -8,6 +8,6 @@ The format is based on [Keep a Changelog]. - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [2.0.2] - 2019-10-24 -- Updated to 2018 edition idioms (https://github.com/paritytech/parity-common/pull/237) +- Updated to 2018 edition idioms. [#237](https://github.com/paritytech/parity-common/pull/237) ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/triehash/CHANGELOG.md b/triehash/CHANGELOG.md index 071c96204..88acd4dd1 100644 --- a/triehash/CHANGELOG.md +++ b/triehash/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ @@ -8,8 +8,8 @@ The format is based on [Keep a Changelog]. - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.8.2] - 2019-12-15 -- Added no-std support (https://github.com/paritytech/parity-common/pull/280) +- Added no-std support. [#280](https://github.com/paritytech/parity-common/pull/280) ## [0.8.1] - 2019-10-24 -- Migrated to 2018 edition (https://github.com/paritytech/parity-common/pull/214) +- Migrated to 2018 edition. [#214](https://github.com/paritytech/parity-common/pull/214) ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index a4214eaf9..c04300e84 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ @@ -8,9 +8,9 @@ The format is based on [Keep a Changelog]. ## [0.8.2] - 2019-10-24 ### Fixed -- Fixed 2018 edition imports (https://github.com/paritytech/parity-common/pull/237) -- Removed `uninitialized` usage (https://github.com/paritytech/parity-common/pull/238) +- Fixed 2018 edition imports. [#237](https://github.com/paritytech/parity-common/pull/237) +- Removed `uninitialized` usage. [#238](https://github.com/paritytech/parity-common/pull/238) ### Dependencies -- Updated dependencies (https://github.com/paritytech/parity-common/pull/239) +- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) ### Changed -- Modified AsRef impl (https://github.com/paritytech/parity-common/pull/196) +- Modified AsRef impl. [#196](https://github.com/paritytech/parity-common/pull/196) From 3ca30bf82d08c972d4affd78055435351cdde2ef Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Fri, 21 Feb 2020 13:05:50 +0300 Subject: [PATCH 093/359] Fix clippy lints for rlp-derive (#345) --- rlp-derive/src/de.rs | 74 ++++++++++++++++++++--------------------- rlp-derive/src/en.rs | 67 +++++++++++++++++++------------------ rlp-derive/src/lib.rs | 2 ++ rlp-derive/tests/rlp.rs | 40 +++++++++++----------- 4 files changed, 93 insertions(+), 90 deletions(-) diff --git a/rlp-derive/src/de.rs b/rlp-derive/src/de.rs index 490c1a7f4..730c39270 100644 --- a/rlp-derive/src/de.rs +++ b/rlp-derive/src/de.rs @@ -24,9 +24,10 @@ fn decodable_wrapper_parse_quotes() -> ParseQuotes { } pub fn impl_decodable(ast: &syn::DeriveInput) -> TokenStream { - let body = match ast.data { - syn::Data::Struct(ref s) => s, - _ => panic!("#[derive(RlpDecodable)] is only defined for structs."), + let body = if let syn::Data::Struct(s) = &ast.data { + s + } else { + panic!("#[derive(RlpDecodable)] is only defined for structs."); }; let mut default_attribute_encountered = false; @@ -59,9 +60,10 @@ pub fn impl_decodable(ast: &syn::DeriveInput) -> TokenStream { } pub fn impl_decodable_wrapper(ast: &syn::DeriveInput) -> TokenStream { - let body = match ast.data { - syn::Data::Struct(ref s) => s, - _ => panic!("#[derive(RlpDecodableWrapper)] is only defined for structs."), + let body = if let syn::Data::Struct(s) = &ast.data { + s + } else { + panic!("#[derive(RlpDecodableWrapper)] is only defined for structs."); }; let stmt = { @@ -98,20 +100,21 @@ pub fn impl_decodable_wrapper(ast: &syn::DeriveInput) -> TokenStream { } fn decodable_field( - index: usize, + mut index: usize, field: &syn::Field, quotes: ParseQuotes, default_attribute_encountered: &mut bool, ) -> TokenStream { - let id = match field.ident { - Some(ref ident) => quote! { #ident }, - None => { - let index: syn::Index = index.into(); - quote! { #index } - } + let id = if let Some(ident) = &field.ident { + quote! { #ident } + } else { + let index = syn::Index::from(index); + quote! { #index } }; - let index = index - *default_attribute_encountered as usize; + if *default_attribute_encountered { + index -= 1; + } let index = quote! { #index }; let single = quotes.single; @@ -123,7 +126,7 @@ fn decodable_field( panic!("only 1 #[rlp(default)] attribute is allowed in a struct") } match attr.parse_args() { - Ok(proc_macro2::TokenTree::Ident(ident)) if ident.to_string() == "default" => {} + Ok(proc_macro2::TokenTree::Ident(ident)) if ident == "default" => {} _ => panic!("only #[rlp(default)] attribute is supported"), } *default_attribute_encountered = true; @@ -132,32 +135,29 @@ fn decodable_field( false }; - match field.ty { - syn::Type::Path(ref path) => { - let ident = &path.path.segments.first().expect("there must be at least 1 segment").ident; - let ident_type = ident.to_string(); - if &ident_type == "Vec" { - if quotes.takes_index { - if default { - quote! { #id: #list(#index).unwrap_or_default(), } - } else { - quote! { #id: #list(#index)?, } - } + if let syn::Type::Path(path) = &field.ty { + let ident = &path.path.segments.first().expect("there must be at least 1 segment").ident; + let ident_type = ident.to_string(); + if ident_type == "Vec" { + if quotes.takes_index { + if default { + quote! { #id: #list(#index).unwrap_or_default(), } } else { - quote! { #id: #list()?, } + quote! { #id: #list(#index)?, } } } else { - if quotes.takes_index { - if default { - quote! { #id: #single(#index).unwrap_or_default(), } - } else { - quote! { #id: #single(#index)?, } - } - } else { - quote! { #id: #single()?, } - } + quote! { #id: #list()?, } + } + } else if quotes.takes_index { + if default { + quote! { #id: #single(#index).unwrap_or_default(), } + } else { + quote! { #id: #single(#index)?, } } + } else { + quote! { #id: #single()?, } } - _ => panic!("rlp_derive not supported"), + } else { + panic!("rlp_derive not supported"); } } diff --git a/rlp-derive/src/en.rs b/rlp-derive/src/en.rs index 631862429..9c21bebd2 100644 --- a/rlp-derive/src/en.rs +++ b/rlp-derive/src/en.rs @@ -10,9 +10,10 @@ use proc_macro2::TokenStream; use quote::quote; pub fn impl_encodable(ast: &syn::DeriveInput) -> TokenStream { - let body = match ast.data { - syn::Data::Struct(ref s) => s, - _ => panic!("#[derive(RlpEncodable)] is only defined for structs."), + let body = if let syn::Data::Struct(s) = &ast.data { + s + } else { + panic!("#[derive(RlpEncodable)] is only defined for structs."); }; let stmts: Vec<_> = body.fields.iter().enumerate().map(|(i, field)| encodable_field(i, field)).collect(); @@ -38,9 +39,10 @@ pub fn impl_encodable(ast: &syn::DeriveInput) -> TokenStream { } pub fn impl_encodable_wrapper(ast: &syn::DeriveInput) -> TokenStream { - let body = match ast.data { - syn::Data::Struct(ref s) => s, - _ => panic!("#[derive(RlpEncodableWrapper)] is only defined for structs."), + let body = if let syn::Data::Struct(s) = &ast.data { + s + } else { + panic!("#[derive(RlpEncodableWrapper)] is only defined for structs."); }; let stmt = { @@ -72,38 +74,37 @@ pub fn impl_encodable_wrapper(ast: &syn::DeriveInput) -> TokenStream { } fn encodable_field(index: usize, field: &syn::Field) -> TokenStream { - let ident = match field.ident { - Some(ref ident) => quote! { #ident }, - None => { - let index: syn::Index = index.into(); - quote! { #index } - } + let ident = if let Some(ident) = &field.ident { + quote! { #ident } + } else { + let index = syn::Index::from(index); + quote! { #index } }; let id = quote! { self.#ident }; - match field.ty { - syn::Type::Path(ref path) => { - let top_segment = path.path.segments.first().expect("there must be at least 1 segment"); - let ident = &top_segment.ident; - if &ident.to_string() == "Vec" { - let inner_ident = match top_segment.arguments { - syn::PathArguments::AngleBracketed(ref angle) => { - let ty = angle.args.first().expect("Vec has only one angle bracketed type; qed"); - match *ty { - syn::GenericArgument::Type(syn::Type::Path(ref path)) => { - &path.path.segments.first().expect("there must be at least 1 segment").ident - } - _ => panic!("rlp_derive not supported"), - } + if let syn::Type::Path(path) = &field.ty { + let top_segment = path.path.segments.first().expect("there must be at least 1 segment"); + let ident = &top_segment.ident; + if ident == "Vec" { + let inner_ident = { + if let syn::PathArguments::AngleBracketed(angle) = &top_segment.arguments { + if let syn::GenericArgument::Type(syn::Type::Path(path)) = + angle.args.first().expect("Vec has only one angle bracketed type; qed") + { + &path.path.segments.first().expect("there must be at least 1 segment").ident + } else { + panic!("rlp_derive not supported"); } - _ => unreachable!("Vec has only one angle bracketed type; qed"), - }; - quote! { stream.append_list::<#inner_ident, _>(&#id); } - } else { - quote! { stream.append(&#id); } - } + } else { + unreachable!("Vec has only one angle bracketed type; qed") + } + }; + quote! { stream.append_list::<#inner_ident, _>(&#id); } + } else { + quote! { stream.append(&#id); } } - _ => panic!("rlp_derive not supported"), + } else { + panic!("rlp_derive not supported"); } } diff --git a/rlp-derive/src/lib.rs b/rlp-derive/src/lib.rs index 6ec9cb510..cf6edb4f4 100644 --- a/rlp-derive/src/lib.rs +++ b/rlp-derive/src/lib.rs @@ -16,6 +16,8 @@ //! the field deserialization fails, as we don't serialize field //! names and there is no way to tell if it is present or not. +#![warn(clippy::all, clippy::pedantic, clippy::nursery)] + extern crate proc_macro; mod de; diff --git a/rlp-derive/tests/rlp.rs b/rlp-derive/tests/rlp.rs index fd564dc7d..24963d323 100644 --- a/rlp-derive/tests/rlp.rs +++ b/rlp-derive/tests/rlp.rs @@ -10,62 +10,62 @@ use rlp::{decode, encode}; use rlp_derive::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; #[derive(Debug, PartialEq, RlpEncodable, RlpDecodable)] -struct Foo { +struct Item { a: String, } #[derive(Debug, PartialEq, RlpEncodableWrapper, RlpDecodableWrapper)] -struct FooWrapper { +struct ItemWrapper { a: String, } #[test] -fn test_encode_foo() { - let foo = Foo { a: "cat".into() }; +fn test_encode_item() { + let item = Item { a: "cat".into() }; let expected = vec![0xc4, 0x83, b'c', b'a', b't']; - let out = encode(&foo); + let out = encode(&item); assert_eq!(out, expected); let decoded = decode(&expected).expect("decode failure"); - assert_eq!(foo, decoded); + assert_eq!(item, decoded); } #[test] -fn test_encode_foo_wrapper() { - let foo = FooWrapper { a: "cat".into() }; +fn test_encode_item_wrapper() { + let item = ItemWrapper { a: "cat".into() }; let expected = vec![0x83, b'c', b'a', b't']; - let out = encode(&foo); + let out = encode(&item); assert_eq!(out, expected); let decoded = decode(&expected).expect("decode failure"); - assert_eq!(foo, decoded); + assert_eq!(item, decoded); } #[test] -fn test_encode_foo_default() { +fn test_encode_item_default() { #[derive(Debug, PartialEq, RlpEncodable, RlpDecodable)] - struct FooDefault { + struct ItemDefault { a: String, /// It works with other attributes. #[rlp(default)] b: Option>, } - let attack_of = String::from("clones"); - let foo = Foo { a: attack_of.clone() }; + let attack_of = "clones"; + let item = Item { a: attack_of.into() }; let expected = vec![0xc7, 0x86, b'c', b'l', b'o', b'n', b'e', b's']; - let out = encode(&foo); + let out = encode(&item); assert_eq!(out, expected); - let foo_default = FooDefault { a: attack_of.clone(), b: None }; + let item_default = ItemDefault { a: attack_of.into(), b: None }; let decoded = decode(&expected).expect("default failure"); - assert_eq!(foo_default, decoded); + assert_eq!(item_default, decoded); - let foo_some = FooDefault { a: attack_of.clone(), b: Some(vec![1, 2, 3]) }; - let out = encode(&foo_some); - assert_eq!(decode(&out), Ok(foo_some)); + let item_some = ItemDefault { a: attack_of.into(), b: Some(vec![1, 2, 3]) }; + let out = encode(&item_some); + assert_eq!(decode(&out), Ok(item_some)); } From 05d7da889cb19b19f6a14c0c100341044695a72b Mon Sep 17 00:00:00 2001 From: Valentin Kettner Date: Fri, 21 Feb 2020 17:40:28 +0100 Subject: [PATCH 094/359] Implement Error for FromDecStrErr (#346) * Implement Error for FromDecStrErr * Only implement Error with std feature --- uint/src/uint.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index ac0ffc85e..493e2e21e 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -38,6 +38,23 @@ pub enum FromDecStrErr { InvalidLength, } +#[cfg(feature = "std")] +impl std::fmt::Display for FromDecStrErr { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + match self { + FromDecStrErr::InvalidCharacter => "a character is not in the range 0-9", + FromDecStrErr::InvalidLength => "the number is too large for the type", + } + ) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for FromDecStrErr {} + #[macro_export] #[doc(hidden)] macro_rules! impl_map_from { From 68dadf26c3b62a805c3e2a4b51f1de3880d24de5 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Thu, 27 Feb 2020 20:23:32 +0100 Subject: [PATCH 095/359] kvdb-rocksdb: expose RocksDB stats (#347) * kvdb-rocksdb: get_statistics method * kvdb-rocksdb: parse statistics string into a rust type * kvdb-rocksdb: integrate rocksdb block cache hit with kvdb::IoStats * kvdb-rocksdb: fmt * kvdb-rocksdb: add a test for stats parser * kvdb-rocksdb: hide rocksdb stats behind a config flag * kvdb-rocksdb: update changelog * Update kvdb-rocksdb/src/lib.rs Co-Authored-By: Nikolay Volf Co-authored-by: Nikolay Volf --- kvdb-rocksdb/CHANGELOG.md | 1 + kvdb-rocksdb/src/lib.rs | 57 ++++++++++++++++++++++++++++++-- kvdb-rocksdb/src/stats.rs | 68 ++++++++++++++++++++++++++++++++++++--- 3 files changed, 118 insertions(+), 8 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index dbebb5087..da022f21a 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Added `get_statistics` method and `enable_statistics` config parameter. [#347](https://github.com/paritytech/parity-common/pull/347) ## [0.5.0] - 2019-02-05 - Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 42ee1388c..ac8958919 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -167,6 +167,12 @@ pub struct DatabaseConfig { pub columns: u32, /// Specify the maximum number of info/debug log files to be kept. pub keep_log_file_num: i32, + /// Enable native RocksDB statistics. + /// Disabled by default. + /// + /// It can have a negative performance impact up to 10% according to + /// https://github.com/facebook/rocksdb/wiki/Statistics. + pub enable_statistics: bool, } impl DatabaseConfig { @@ -215,6 +221,7 @@ impl Default for DatabaseConfig { compaction: CompactionProfile::default(), columns: 1, keep_log_file_num: 1, + enable_statistics: false, } } } @@ -267,6 +274,8 @@ pub struct Database { config: DatabaseConfig, path: String, #[ignore_malloc_size_of = "insignificant"] + opts: Options, + #[ignore_malloc_size_of = "insignificant"] write_opts: WriteOptions, #[ignore_malloc_size_of = "insignificant"] read_opts: ReadOptions, @@ -304,6 +313,10 @@ fn is_corrupted(err: &Error) -> bool { fn generate_options(config: &DatabaseConfig) -> Options { let mut opts = Options::default(); + opts.set_report_bg_io_stats(true); + if config.enable_statistics { + opts.enable_statistics(); + } opts.set_use_fsync(false); opts.create_if_missing(true); opts.set_max_open_files(config.max_open_files); @@ -409,6 +422,7 @@ impl Database { flushing: RwLock::new((0..config.columns).map(|_| HashMap::new()).collect()), flushing_lock: Mutex::new(false), path: path.to_owned(), + opts, read_opts, write_opts, block_opts, @@ -715,6 +729,15 @@ impl Database { None => Ok(()), } } + + /// Get RocksDB statistics. + pub fn get_statistics(&self) -> HashMap { + if let Some(stats) = self.opts.get_statistics() { + stats::parse_rocksdb_stats(&stats) + } else { + HashMap::new() + } + } } // duplicate declaration of methods here to avoid trait import in certain existing cases @@ -755,6 +778,13 @@ impl KeyValueDB for Database { } fn io_stats(&self, kind: kvdb::IoStatsKind) -> kvdb::IoStats { + let rocksdb_stats = self.get_statistics(); + let cache_hit_count = rocksdb_stats.get("block.cache.hit").map(|s| s.count).unwrap_or(0u64); + let overall_stats = self.stats.overall(); + let old_cache_hit_count = overall_stats.raw.cache_hit_count; + + self.stats.tally_cache_hit_count(cache_hit_count - old_cache_hit_count); + let taken_stats = match kind { kvdb::IoStatsKind::Overall => self.stats.overall(), kvdb::IoStatsKind::SincePrevious => self.stats.since_previous(), @@ -767,7 +797,7 @@ impl KeyValueDB for Database { stats.transactions = taken_stats.raw.transactions; stats.bytes_written = taken_stats.raw.bytes_written; stats.bytes_read = taken_stats.raw.bytes_read; - + stats.cache_reads = taken_stats.raw.cache_hit_count; stats.started = taken_stats.started; stats.span = taken_stats.started.elapsed(); @@ -847,6 +877,7 @@ mod tests { compaction: CompactionProfile::default(), columns: 11, keep_log_file_num: 1, + enable_statistics: false, }; let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); @@ -984,20 +1015,40 @@ mod tests { assert_eq!(c.memory_budget(), 45 * MB, "total budget is the sum of the column budget"); } + #[test] + fn test_stats_parser() { + let raw = r#"rocksdb.row.cache.hit COUNT : 1 +rocksdb.db.get.micros P50 : 2.000000 P95 : 3.000000 P99 : 4.000000 P100 : 5.000000 COUNT : 0 SUM : 15 +"#; + let stats = stats::parse_rocksdb_stats(raw); + assert_eq!(stats["row.cache.hit"].count, 1); + assert!(stats["row.cache.hit"].times.is_none()); + assert_eq!(stats["db.get.micros"].count, 0); + let get_times = stats["db.get.micros"].times.unwrap(); + assert_eq!(get_times.sum, 15); + assert_eq!(get_times.p50, 2.0); + assert_eq!(get_times.p95, 3.0); + assert_eq!(get_times.p99, 4.0); + assert_eq!(get_times.p100, 5.0); + } + #[test] fn rocksdb_settings() { const NUM_COLS: usize = 2; - let mut cfg = DatabaseConfig::with_columns(NUM_COLS as u32); + let mut cfg = DatabaseConfig { enable_statistics: true, ..DatabaseConfig::with_columns(NUM_COLS as u32) }; cfg.max_open_files = 123; // is capped by the OS fd limit (typically 1024) cfg.compaction.block_size = 323232; cfg.compaction.initial_file_size = 102030; cfg.memory_budget = [(0, 30), (1, 300)].iter().cloned().collect(); let db_path = TempDir::new("config_test").expect("the OS can create tmp dirs"); - let _db = Database::open(&cfg, db_path.path().to_str().unwrap()).expect("can open a db"); + let db = Database::open(&cfg, db_path.path().to_str().unwrap()).expect("can open a db"); let mut rocksdb_log = std::fs::File::open(format!("{}/LOG", db_path.path().to_str().unwrap())) .expect("rocksdb creates a LOG file"); let mut settings = String::new(); + let statistics = db.get_statistics(); + assert!(statistics.contains_key("block.cache.hit")); + rocksdb_log.read_to_string(&mut settings).unwrap(); // Check column count assert!(settings.contains("Options for column family [default]"), "no default col"); diff --git a/kvdb-rocksdb/src/stats.rs b/kvdb-rocksdb/src/stats.rs index 80fa85ce2..c028b1948 100644 --- a/kvdb-rocksdb/src/stats.rs +++ b/kvdb-rocksdb/src/stats.rs @@ -7,15 +7,68 @@ // except according to those terms. use parking_lot::RwLock; +use std::collections::HashMap; +use std::str::FromStr; use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; use std::time::Instant; +#[derive(Default, Clone, Copy)] pub struct RawDbStats { pub reads: u64, pub writes: u64, pub bytes_written: u64, pub bytes_read: u64, pub transactions: u64, + pub cache_hit_count: u64, +} + +#[derive(Default, Debug, Clone, Copy)] +pub struct RocksDbStatsTimeValue { + /// 50% percentile + pub p50: f64, + /// 95% percentile + pub p95: f64, + /// 99% percentile + pub p99: f64, + /// 100% percentile + pub p100: f64, + pub sum: u64, +} + +#[derive(Default, Debug, Clone, Copy)] +pub struct RocksDbStatsValue { + pub count: u64, + pub times: Option, +} + +pub fn parse_rocksdb_stats(stats: &str) -> HashMap { + stats.lines().map(|line| parse_rocksdb_stats_row(line.splitn(2, ' '))).collect() +} + +fn parse_rocksdb_stats_row<'a>(mut iter: impl Iterator) -> (String, RocksDbStatsValue) { + const PROOF: &str = "rocksdb statistics format is valid and hasn't changed"; + const SEPARATOR: &str = " : "; + let key = iter.next().expect(PROOF).trim_start_matches("rocksdb.").to_owned(); + let values = iter.next().expect(PROOF); + let value = if values.starts_with("COUNT") { + // rocksdb.row.cache.hit COUNT : 0 + RocksDbStatsValue { + count: u64::from_str(values.rsplit(SEPARATOR).next().expect(PROOF)).expect(PROOF), + times: None, + } + } else { + // rocksdb.db.get.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0 + let values: Vec<&str> = values.split_whitespace().filter(|s| *s != ":").collect(); + let times = RocksDbStatsTimeValue { + p50: f64::from_str(values.get(1).expect(PROOF)).expect(PROOF), + p95: f64::from_str(values.get(3).expect(PROOF)).expect(PROOF), + p99: f64::from_str(values.get(5).expect(PROOF)).expect(PROOF), + p100: f64::from_str(values.get(7).expect(PROOF)).expect(PROOF), + sum: u64::from_str(values.get(11).expect(PROOF)).expect(PROOF), + }; + RocksDbStatsValue { count: u64::from_str(values.get(9).expect(PROOF)).expect(PROOF), times: Some(times) } + }; + (key, value) } impl RawDbStats { @@ -26,6 +79,7 @@ impl RawDbStats { bytes_written: self.bytes_written + other.bytes_written, bytes_read: self.bytes_read + other.bytes_written, transactions: self.transactions + other.transactions, + cache_hit_count: self.cache_hit_count + other.cache_hit_count, } } } @@ -38,11 +92,7 @@ struct OverallDbStats { impl OverallDbStats { fn new() -> Self { - OverallDbStats { - stats: RawDbStats { reads: 0, writes: 0, bytes_written: 0, bytes_read: 0, transactions: 0 }, - last_taken: Instant::now(), - started: Instant::now(), - } + OverallDbStats { stats: RawDbStats::default(), last_taken: Instant::now(), started: Instant::now() } } } @@ -52,6 +102,7 @@ pub struct RunningDbStats { bytes_written: AtomicU64, bytes_read: AtomicU64, transactions: AtomicU64, + cache_hit_count: AtomicU64, overall: RwLock, } @@ -68,6 +119,7 @@ impl RunningDbStats { writes: 0.into(), bytes_written: 0.into(), transactions: 0.into(), + cache_hit_count: 0.into(), overall: OverallDbStats::new().into(), } } @@ -92,6 +144,10 @@ impl RunningDbStats { self.transactions.fetch_add(val, AtomicOrdering::Relaxed); } + pub fn tally_cache_hit_count(&self, val: u64) { + self.cache_hit_count.fetch_add(val, AtomicOrdering::Relaxed); + } + fn take_current(&self) -> RawDbStats { RawDbStats { reads: self.reads.swap(0, AtomicOrdering::Relaxed), @@ -99,6 +155,7 @@ impl RunningDbStats { bytes_written: self.bytes_written.swap(0, AtomicOrdering::Relaxed), bytes_read: self.bytes_read.swap(0, AtomicOrdering::Relaxed), transactions: self.transactions.swap(0, AtomicOrdering::Relaxed), + cache_hit_count: self.cache_hit_count.swap(0, AtomicOrdering::Relaxed), } } @@ -109,6 +166,7 @@ impl RunningDbStats { bytes_written: self.bytes_written.load(AtomicOrdering::Relaxed), bytes_read: self.bytes_read.load(AtomicOrdering::Relaxed), transactions: self.transactions.load(AtomicOrdering::Relaxed), + cache_hit_count: self.cache_hit_count.load(AtomicOrdering::Relaxed), } } From 01ccef7b5a4f48af0f2cbdca1e7896ff5e0508f5 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 28 Feb 2020 17:01:35 +0100 Subject: [PATCH 096/359] kvdb-rocksdb: bump version (#348) --- kvdb-rocksdb/CHANGELOG.md | 2 ++ kvdb-rocksdb/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index da022f21a..840e60b2f 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.6.0] - 2019-02-28 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - Added `get_statistics` method and `enable_statistics` config parameter. [#347](https://github.com/paritytech/parity-common/pull/347) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 1a51844e9..bfd75a14d 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.5.0" +version = "0.6.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" From 7ca53fb5320b639dbcfd75ada667ffde488fe219 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Thu, 12 Mar 2020 23:39:59 +0100 Subject: [PATCH 097/359] CI: troubleshoot macOS build (#356) --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index bea20cf75..f52e084c2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,12 +15,12 @@ matrix: script: - cargo check --all --benches - os: osx - osx_image: xcode11.2 + osx_image: xcode11.3 addons: chrome: stable firefox: latest install: - - curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.1/install.sh | sh + - curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | sh - source ~/.nvm/nvm.sh - nvm install --lts - npm install -g chromedriver From 2f3167d10e8b55e1555ac73a467d2a1eb0d0fc02 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 13 Mar 2020 00:02:27 +0100 Subject: [PATCH 098/359] parity-util-mem: use malloc for usable_size on android (#355) --- parity-util-mem/src/allocators.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs index e159a32d0..45df9cba2 100644 --- a/parity-util-mem/src/allocators.rs +++ b/parity-util-mem/src/allocators.rs @@ -96,7 +96,7 @@ mod usable_size { mimalloc_sys::mi_usable_size(ptr as *mut _) } - } else if #[cfg(target_os = "linux")] { + } else if #[cfg(any(target_os = "linux", target_os = "android"))] { /// Linux call system allocator (currently malloc). extern "C" { From c31dabe5c2b6fdb87963deece2331a24e9982c8f Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 13 Mar 2020 09:51:47 +0100 Subject: [PATCH 099/359] kvdb: remove parity-bytes dependency (#351) * kvdb: remove parity-bytes dependency * kvdb: update changelog * Update kvdb/CHANGELOG.md --- kvdb/CHANGELOG.md | 1 + kvdb/Cargo.toml | 1 - kvdb/src/lib.rs | 3 +-- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index c7d38231b..d1e24ebb3 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Remove dependency on parity-bytes. [#351](https://github.com/paritytech/parity-common/pull/351) ## [0.4.0] - 2019-01-06 - Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index c955920a9..722ebcc20 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -9,5 +9,4 @@ edition = "2018" [dependencies] smallvec = "1.0.0" -bytes = { package = "parity-bytes", version = "0.1", path = "../parity-bytes" } parity-util-mem = { path = "../parity-util-mem", version = "0.5", default-features = false } diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 6382145ce..723714de5 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -8,7 +8,6 @@ //! Key-Value store abstraction. -use bytes::Bytes; use smallvec::SmallVec; use std::io; @@ -73,7 +72,7 @@ impl DBTransaction { } /// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write. - pub fn put_vec(&mut self, col: u32, key: &[u8], value: Bytes) { + pub fn put_vec(&mut self, col: u32, key: &[u8], value: Vec) { self.ops.push(DBOp::Insert { col, key: DBKey::from_slice(key), value }); } From be0704968b15b01a05abf62a5492018c7b5d7fae Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 13 Mar 2020 10:44:14 +0100 Subject: [PATCH 100/359] parity-util-mem: update mimalloc feature (#352) * parity-util-mem: update mimalloc feature * parity-util-mem: enable 'secure' feature of mimalloc --- parity-util-mem/Cargo.toml | 11 +++-------- parity-util-mem/src/allocators.rs | 2 +- parity-util-mem/src/lib.rs | 10 +++++----- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index e0539b23c..7b097f7e8 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -13,13 +13,8 @@ dlmalloc = { version = "0.1.3", features = ["global"], optional = true } wee_alloc = { version = "0.4.5", optional = true } lru = { version = "0.4", optional = true } hashbrown = { version = "0.6", optional = true } -# from https://github.com/microsoft/mimalloc: -# mimalloc can be built in secure mode, -# adding guard pages, randomized allocation, encrypted free lists, etc. -# to protect against various heap vulnerabilities. -# The performance penalty is only around 3% on average over our benchmarks. -mimallocator = { version = "0.1.3", features = ["secure"], optional = true } -mimalloc-sys = { version = "0.1.6", optional = true } +mimalloc = { version = "0.1.18", optional = true } +libmimalloc-sys = { version = "0.1.14", optional = true } parity-util-mem-derive = { path = "derive", version = "0.1" } impl-trait-for-tuples = "0.1.3" @@ -45,7 +40,7 @@ weealloc-global = ["wee_alloc", "estimate-heapsize"] # use jemalloc as global allocator jemalloc-global = ["jemallocator"] # use mimalloc as global allocator -mimalloc-global = ["mimallocator", "mimalloc-sys"] +mimalloc-global = ["mimalloc", "libmimalloc-sys"] # implement additional types ethereum-impls = ["ethereum-types", "primitive-types"] # Full estimate: no call to allocator diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs index 45df9cba2..8d8bb18db 100644 --- a/parity-util-mem/src/allocators.rs +++ b/parity-util-mem/src/allocators.rs @@ -93,7 +93,7 @@ mod usable_size { pub unsafe extern "C" fn malloc_usable_size(ptr: *const c_void) -> usize { // mimalloc doesn't actually mutate the value ptr points to, // but requires a mut pointer in the API - mimalloc_sys::mi_usable_size(ptr as *mut _) + libmimalloc_sys::mi_usable_size(ptr as *mut _) } } else if #[cfg(any(target_os = "linux", target_os = "android"))] { diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index 87b47716a..528f0f668 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -21,24 +21,24 @@ cfg_if::cfg_if! { not(target_os = "windows"), not(target_arch = "wasm32") ))] { - #[global_allocator] /// Global allocator + #[global_allocator] pub static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; } else if #[cfg(feature = "dlmalloc-global")] { - #[global_allocator] /// Global allocator + #[global_allocator] pub static ALLOC: dlmalloc::GlobalDlmalloc = dlmalloc::GlobalDlmalloc; } else if #[cfg(feature = "weealloc-global")] { - #[global_allocator] /// Global allocator + #[global_allocator] pub static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; } else if #[cfg(all( feature = "mimalloc-global", not(target_arch = "wasm32") ))] { - #[global_allocator] /// Global allocator - pub static ALLOC: mimallocator::Mimalloc = mimallocator::Mimalloc; + #[global_allocator] + pub static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc; } else { // default allocator used } From 2cb0588dfa5ed81d1212ec1703df99132fb80deb Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 13 Mar 2020 15:06:23 +0100 Subject: [PATCH 101/359] travis: test parity-util-mem on android (#358) --- .travis.yml | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index f52e084c2..19c6b18ef 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,12 +8,16 @@ matrix: rust: stable before_script: - rustup component add rustfmt - - os: linux - rust: beta - os: linux rust: nightly script: - - cargo check --all --benches + - cargo check --workspace --benches + - os: linux + rust: stable + install: + - cargo install cross + script: + - cross test --target=aarch64-linux-android -p parity-util-mem - os: osx osx_image: xcode11.3 addons: @@ -32,12 +36,10 @@ matrix: install: - curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh script: - - if [ "$TRAVIS_RUST_VERSION" == "stable" ] && [ "$TRAVIS_OS_NAME" == "linux" ]; then + - if [ "$TRAVIS_OS_NAME" == "linux" ]; then cargo fmt -- --check; fi - - cargo check --all --tests - - cargo check --all --benches - - cargo build --all + - cargo check --workspace --tests --benches - cargo test --all --exclude uint --exclude fixed-hash --exclude parity-crypto - if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cd contract-address/ && cargo test --features=external_doc && cd ..; From bf5879459ade112d128312408d1f190f3839a93b Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 13 Mar 2020 23:03:26 +0100 Subject: [PATCH 102/359] parity-util-mem: prepare release for 0.5.2 (#359) --- parity-util-mem/CHANGELOG.md | 4 ++++ parity-util-mem/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index dbab86b6e..211d42145 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -5,7 +5,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.5.2] - 2019-03-13 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Updated mimalloc dependency. [#352](https://github.com/paritytech/parity-common/pull/352) +- Use malloc for `usable_size` on Android. [#355](https://github.com/paritytech/parity-common/pull/355) ## [0.5.1] - 2019-02-05 - Add different mode for malloc_size_of_is_0 macro dealing with generics. [#334](https://github.com/paritytech/parity-common/pull/334) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 7b097f7e8..96b1f2508 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.5.1" +version = "0.5.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" From 32f8d96f051195197c3e6a266531690fd4e6ef20 Mon Sep 17 00:00:00 2001 From: David Date: Mon, 16 Mar 2020 19:56:57 +0100 Subject: [PATCH 103/359] Prep for release (#361) * Prep for release * Removing a feature is a breaking change * Update ethereum-types/CHANGELOG.md Co-Authored-By: Andronik Ordian * Update keccak-hash/CHANGELOG.md Co-Authored-By: Andronik Ordian * Update parity-util-mem/CHANGELOG.md Co-Authored-By: Andronik Ordian * Update rlp/CHANGELOG.md Co-Authored-By: Andronik Ordian * Breaking where breaking is due Co-authored-by: Andronik Ordian --- contract-address/CHANGELOG.md | 3 +++ contract-address/Cargo.toml | 6 +++--- ethbloom/CHANGELOG.md | 2 ++ ethbloom/Cargo.toml | 4 ++-- ethereum-types/CHANGELOG.md | 3 +++ ethereum-types/Cargo.toml | 8 ++++---- fixed-hash/CHANGELOG.md | 2 ++ fixed-hash/Cargo.toml | 2 +- keccak-hash/CHANGELOG.md | 3 +++ keccak-hash/Cargo.toml | 4 ++-- kvdb-memorydb/CHANGELOG.md | 3 +++ kvdb-memorydb/Cargo.toml | 6 +++--- kvdb-rocksdb/CHANGELOG.md | 5 ++++- kvdb-rocksdb/Cargo.toml | 6 +++--- kvdb-shared-tests/Cargo.toml | 2 +- kvdb-web/CHANGELOG.md | 3 +++ kvdb-web/Cargo.toml | 8 ++++---- kvdb/CHANGELOG.md | 3 +++ kvdb/Cargo.toml | 4 ++-- parity-bytes/CHANGELOG.md | 2 ++ parity-bytes/Cargo.toml | 2 +- parity-crypto/CHANGELOG.md | 3 +++ parity-crypto/Cargo.toml | 24 ++++++++++++------------ parity-path/CHANGELOG.md | 2 ++ parity-path/Cargo.toml | 2 +- parity-util-mem/CHANGELOG.md | 5 ++++- parity-util-mem/Cargo.toml | 6 +++--- plain_hasher/CHANGELOG.md | 2 ++ plain_hasher/Cargo.toml | 2 +- primitive-types/CHANGELOG.md | 2 ++ primitive-types/Cargo.toml | 4 ++-- rlp/CHANGELOG.md | 4 ++++ rlp/Cargo.toml | 4 ++-- trace-time/CHANGELOG.md | 2 ++ trace-time/Cargo.toml | 2 +- transaction-pool/CHANGELOG.md | 2 ++ transaction-pool/Cargo.toml | 4 ++-- triehash/CHANGELOG.md | 3 ++- triehash/Cargo.toml | 4 ++-- 39 files changed, 103 insertions(+), 55 deletions(-) diff --git a/contract-address/CHANGELOG.md b/contract-address/CHANGELOG.md index 546bc458e..a9b2a3a0b 100644 --- a/contract-address/CHANGELOG.md +++ b/contract-address/CHANGELOG.md @@ -5,4 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.9.0] - 2020-03-16 - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) diff --git a/contract-address/Cargo.toml b/contract-address/Cargo.toml index 21486e7a2..19ee6b363 100644 --- a/contract-address/Cargo.toml +++ b/contract-address/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "contract-address" -version = "0.3.0" +version = "0.4.0" authors = ["Parity Technologies "] license = "MIT/Apache2.0" homepage = "https://github.com/paritytech/parity-common" @@ -11,9 +11,9 @@ edition = "2018" readme = "README.md" [dependencies] -ethereum-types = { version = "0.8.0", path = "../ethereum-types" } +ethereum-types = { version = "0.9.0", path = "../ethereum-types" } rlp = { version = "0.4", path = "../rlp" } -keccak-hash = { version = "0.4", path = "../keccak-hash", default-features = false } +keccak-hash = { version = "0.5", path = "../keccak-hash", default-features = false } [features] default = [] diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 6c13f6f0a..de418cd9d 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.9.0] - 2020-03-16 - Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index cbf9112a0..da4b20047 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.8.1" +version = "0.9.0" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT/Apache2.0" @@ -12,7 +12,7 @@ edition = "2018" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } -fixed-hash = { path = "../fixed-hash", version = "0.5", default-features = false } +fixed-hash = { path = "../fixed-hash", version = "0.6", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 6b214e75b..9455e7bba 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -5,7 +5,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.9.0] - 2020-03-16 - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) ### Added - Uint error type is re-exported. [#244](https://github.com/paritytech/parity-common/pull/244) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index b706f2873..00b49ac7a 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] license = "MIT/Apache2.0" homepage = "https://github.com/paritytech/parity-common" @@ -8,10 +8,10 @@ description = "Ethereum types" edition = "2018" [dependencies] -ethbloom = { path = "../ethbloom", version = "0.8", default-features = false } -fixed-hash = { path = "../fixed-hash", version = "0.5", default-features = false, features = ["byteorder", "rustc-hex"] } +ethbloom = { path = "../ethbloom", version = "0.9", default-features = false } +fixed-hash = { path = "../fixed-hash", version = "0.6", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.8", default-features = false } -primitive-types = { path = "../primitive-types", version = "0.6", features = ["rlp", "byteorder", "rustc-hex"], default-features = false } +primitive-types = { path = "../primitive-types", version = "0.7", features = ["rlp", "byteorder", "rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.0", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } diff --git a/fixed-hash/CHANGELOG.md b/fixed-hash/CHANGELOG.md index f486d7847..da5ec524d 100644 --- a/fixed-hash/CHANGELOG.md +++ b/fixed-hash/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.6.0] - 2020-03-16 - Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 41bf7d237..324804592 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fixed-hash" -version = "0.5.2" +version = "0.6.0" authors = ["Parity Technologies "] license = "MIT/Apache2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index 429e3bc13..13f73c4a9 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -5,7 +5,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.4.2] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) ## [0.4.1] - 2019-10-24 ### Dependencies diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 49c9506dd..ca317f522 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "keccak-hash" -version = "0.4.1" +version = "0.5.0" description = "`keccak-hash` is a set of utility functions to facilitate working with Keccak hashes (256/512 bits long)." authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" @@ -10,7 +10,7 @@ edition = "2018" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } -primitive-types = { path = "../primitive-types", version = "0.6", default-features = false } +primitive-types = { path = "../primitive-types", version = "0.7", default-features = false } [dev-dependencies] tempdir = "0.3.7" diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index 3da9b9203..a8c74772d 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,7 +5,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.5.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) ## [0.4.0] - 2019-02-05 - Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 24d6a9991..90a5dc980 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.4.0" +version = "0.5.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -8,9 +8,9 @@ license = "MIT/Apache-2.0" edition = "2018" [dependencies] -parity-util-mem = { path = "../parity-util-mem", version = "0.5", default-features = false, features = ["std"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-features = false, features = ["std"] } parking_lot = "0.10.0" -kvdb = { version = "0.4", path = "../kvdb" } +kvdb = { version = "0.5", path = "../kvdb" } [dev-dependencies] kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.2" } diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 840e60b2f..e7561c059 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,7 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] -## [0.6.0] - 2019-02-28 +## [0.7.0] - 2020-03-16 +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) + +## [0.6.0] - 2020-02-28 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - Added `get_statistics` method and `enable_statistics` config parameter. [#347](https://github.com/paritytech/parity-common/pull/347) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index bfd75a14d..c4d573bf8 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.6.0" +version = "0.7.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -15,14 +15,14 @@ harness = false smallvec = "1.0.0" fs-swap = "0.2.4" interleaved-ordered = "0.1.1" -kvdb = { path = "../kvdb", version = "0.4" } +kvdb = { path = "../kvdb", version = "0.5" } log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.10.0" regex = "1.3.1" rocksdb = { version = "0.13", features = ["snappy"], default-features = false } owning_ref = "0.4.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.5", default-features = false, features = ["std", "smallvec"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-features = false, features = ["std", "smallvec"] } [dev-dependencies] alloc_counter = "0.0.4" diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index 367eef8b6..ff461caac 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -7,4 +7,4 @@ description = "Shared tests for kvdb functionality, to be executed against actua license = "MIT/Apache2.0" [dependencies] -kvdb = { path = "../kvdb", version = "0.4" } +kvdb = { path = "../kvdb", version = "0.5" } diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index 0d4a23ad9..f8e952bf3 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -5,7 +5,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.5.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) ## [0.4.0] - 2019-02-05 - Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 284418ca5..8d5fc7be4 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-web" -version = "0.4.0" +version = "0.5.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" @@ -11,12 +11,12 @@ edition = "2018" [dependencies] wasm-bindgen = "0.2.54" js-sys = "0.3.31" -kvdb = { version = "0.4", path = "../kvdb" } -kvdb-memorydb = { version = "0.4", path = "../kvdb-memorydb" } +kvdb = { version = "0.5", path = "../kvdb" } +kvdb-memorydb = { version = "0.5", path = "../kvdb-memorydb" } futures = "0.3" log = "0.4.8" send_wrapper = "0.3.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.5", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-features = false } [dependencies.web-sys] version = "0.3.31" diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index d1e24ebb3..18f484117 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -5,8 +5,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.5.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - Remove dependency on parity-bytes. [#351](https://github.com/paritytech/parity-common/pull/351) +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) ## [0.4.0] - 2019-01-06 - Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 722ebcc20..669b19762 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.4.0" +version = "0.5.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" @@ -9,4 +9,4 @@ edition = "2018" [dependencies] smallvec = "1.0.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.5", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-features = false } diff --git a/parity-bytes/CHANGELOG.md b/parity-bytes/CHANGELOG.md index ef8a2633d..49b2d0857 100644 --- a/parity-bytes/CHANGELOG.md +++ b/parity-bytes/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.1.2] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.1.1] - 2019-10-24 diff --git a/parity-bytes/Cargo.toml b/parity-bytes/Cargo.toml index 582d6560e..384489203 100644 --- a/parity-bytes/Cargo.toml +++ b/parity-bytes/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-bytes" -version = "0.1.1" +version = "0.1.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "byte utilities for Parity" diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index c955084f9..14290d54d 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -5,7 +5,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.6.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) ## [0.5.0] - 2020-02-08 - Remove `inv()` from `SecretKey` (breaking). [#258](https://github.com/paritytech/parity-common/pull/258) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 0af20678b..f51bbd88f 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-crypto" -version = "0.5.0" +version = "0.6.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Crypto utils used by ethstore and network." @@ -14,23 +14,23 @@ harness = false required-features = ["publickey"] [dependencies] -tiny-keccak = { version = "2.0", features = ["keccak"] } -scrypt = { version = "0.2.0", default-features = false } -secp256k1 = { version = "0.17.2", optional = true, features = ["recovery", "rand-std"] } -ethereum-types = { version = "0.8.0", optional = true } -lazy_static = { version = "1.0", optional = true } -ripemd160 = "0.8.0" -sha2 = "0.8.0" -digest = "0.8" -hmac = "0.7" aes = "0.3.2" aes-ctr = "0.3.0" block-modes = "0.3.3" +digest = "0.8" +ethereum-types = { version = "0.9.0", optional = true, path = "../ethereum-types" } +hmac = "0.7" +lazy_static = { version = "1.0", optional = true } pbkdf2 = "0.3.0" -subtle = "2.2.1" -zeroize = { version = "1.0.0", default-features = false } rand = "0.7.2" +ripemd160 = "0.8.0" rustc-hex = { version = "2.1.0", default-features = false, optional = true } +scrypt = { version = "0.2.0", default-features = false } +secp256k1 = { version = "0.17.2", optional = true, features = ["recovery", "rand-std"] } +sha2 = "0.8.0" +subtle = "2.2.1" +tiny-keccak = { version = "2.0", features = ["keccak"] } +zeroize = { version = "1.0.0", default-features = false } [dev-dependencies] criterion = "0.3.0" diff --git a/parity-path/CHANGELOG.md b/parity-path/CHANGELOG.md index fc0263835..ed9aa8162 100644 --- a/parity-path/CHANGELOG.md +++ b/parity-path/CHANGELOG.md @@ -5,4 +5,6 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.1.3] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/parity-path/Cargo.toml b/parity-path/Cargo.toml index 0c783cf6c..8f272ad51 100644 --- a/parity-path/Cargo.toml +++ b/parity-path/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-path" -version = "0.1.2" +version = "0.1.3" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Path utilities" diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 211d42145..fdb3f5f7b 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -6,7 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] -## [0.5.2] - 2019-03-13 +## [0.6.0] - 2020-03-13 +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) + +## [0.5.2] - 2020-03-13 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - Updated mimalloc dependency. [#352](https://github.com/paritytech/parity-common/pull/352) - Use malloc for `usable_size` on Android. [#355](https://github.com/paritytech/parity-common/pull/355) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 96b1f2508..9f5e77039 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.5.2" +version = "0.6.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" @@ -19,9 +19,9 @@ parity-util-mem-derive = { path = "derive", version = "0.1" } impl-trait-for-tuples = "0.1.3" smallvec = { version = "1.0.0", optional = true } -ethereum-types = { version = "0.8.0", optional = true, path = "../ethereum-types" } +ethereum-types = { version = "0.9.0", optional = true, path = "../ethereum-types" } parking_lot = { version = "0.10.0", optional = true } -primitive-types = { version = "0.6", path = "../primitive-types", default-features = false, optional = true } +primitive-types = { version = "0.7", path = "../primitive-types", default-features = false, optional = true } [target.'cfg(target_os = "windows")'.dependencies] winapi = { version = "0.3.8", features = ["heapapi"] } diff --git a/plain_hasher/CHANGELOG.md b/plain_hasher/CHANGELOG.md index afc302090..c3f142cfd 100644 --- a/plain_hasher/CHANGELOG.md +++ b/plain_hasher/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.2.3] - 2020-03-16 - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.2.2] - 2019-10-24 diff --git a/plain_hasher/Cargo.toml b/plain_hasher/Cargo.toml index 57cf4d0c1..7e0812571 100644 --- a/plain_hasher/Cargo.toml +++ b/plain_hasher/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "plain_hasher" description = "Hasher for 32-byte keys." -version = "0.2.2" +version = "0.2.3" authors = ["Parity Technologies "] license = "MIT/Apache2.0" keywords = ["hash", "hasher"] diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 0689eab5a..4c6f65713 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.7.0] - 2020-03-16 - Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) ## [0.6.2] - 2019-01-03 diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index e6b53391c..3169b5c2e 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.6.2" +version = "0.7.0" authors = ["Parity Technologies "] license = "MIT/Apache2.0" homepage = "https://github.com/paritytech/parity-common" @@ -8,7 +8,7 @@ description = "Primitive types shared by Ethereum and Substrate" edition = "2018" [dependencies] -fixed-hash = { version = "0.5", path = "../fixed-hash", default-features = false } +fixed-hash = { version = "0.6", path = "../fixed-hash", default-features = false } uint = { version = "0.8.1", path = "../uint", default-features = false } impl-serde = { version = "0.3.0", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index c817dc6b9..cee20902d 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.4.5] - 2020-03-16 +### Dependencies +- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) + ## [0.4.4] - 2019-11-20 ### Added - Method `Rlp::at_with_offset`. [#269](https://github.com/paritytech/parity-common/pull/269) diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index ea0da9b10..947418a6b 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rlp" -version = "0.4.4" +version = "0.4.5" description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" license = "MIT/Apache-2.0" @@ -13,7 +13,7 @@ rustc-hex = { version = "2.0.1", default-features = false } [dev-dependencies] criterion = "0.3.0" hex-literal = "0.2.1" -primitive-types = { path = "../primitive-types", version = "0.6", features = ["impl-rlp"] } +primitive-types = { path = "../primitive-types", version = "0.7", features = ["impl-rlp"] } [features] default = ["std"] diff --git a/trace-time/CHANGELOG.md b/trace-time/CHANGELOG.md index 300ce6461..0f666b438 100644 --- a/trace-time/CHANGELOG.md +++ b/trace-time/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.1.3] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.1.2] - 2019-10-24 diff --git a/trace-time/Cargo.toml b/trace-time/Cargo.toml index fca5a7619..225cdec27 100644 --- a/trace-time/Cargo.toml +++ b/trace-time/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "trace-time" description = "Easily trace time to execute a scope." -version = "0.1.2" +version = "0.1.3" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" license = "MIT/Apache2.0" diff --git a/transaction-pool/CHANGELOG.md b/transaction-pool/CHANGELOG.md index 431effd94..334625f2d 100644 --- a/transaction-pool/CHANGELOG.md +++ b/transaction-pool/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [2.0.3] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [2.0.2] - 2019-10-24 diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index 0da12d924..2bf1dbb59 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Generic transaction pool." name = "transaction-pool" -version = "2.0.2" +version = "2.0.3" license = "MIT/Apache2.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" @@ -13,4 +13,4 @@ smallvec = "0.6.10" trace-time = { path = "../trace-time", version = "0.1" } [dev-dependencies] -ethereum-types = { version = "0.8.0", path = "../ethereum-types" } +ethereum-types = { version = "0.9.0", path = "../ethereum-types" } diff --git a/triehash/CHANGELOG.md b/triehash/CHANGELOG.md index 88acd4dd1..b5f0357b1 100644 --- a/triehash/CHANGELOG.md +++ b/triehash/CHANGELOG.md @@ -5,8 +5,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) +## [0.8.3] - 2020-03-16 +- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.8.2] - 2019-12-15 - Added no-std support. [#280](https://github.com/paritytech/parity-common/pull/280) ## [0.8.1] - 2019-10-24 diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index 407489469..19496503a 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "triehash" -version = "0.8.2" +version = "0.8.3" authors = ["Parity Technologies "] description = "In-memory patricia trie operations" repository = "https://github.com/paritytech/parity-common" @@ -14,7 +14,7 @@ rlp = { version = "0.4", path = "../rlp", default-features = false } [dev-dependencies] criterion = "0.3.0" keccak-hasher = "0.15.2" -ethereum-types = { version = "0.8.0", path = "../ethereum-types" } +ethereum-types = { version = "0.9.0", path = "../ethereum-types" } tiny-keccak = { version = "2.0", features = ["keccak"] } trie-standardmap = "0.15.2" hex-literal = "0.2.1" From 939151e23b132110628739e8458e6cece1f1c8d0 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 17 Mar 2020 06:25:26 -0700 Subject: [PATCH 104/359] Memtest example for Rocksdb (#349) * memtest * address review * Update kvdb-rocksdb/examples/memtest.rs * return time 0.1 --- kvdb-rocksdb/Cargo.toml | 4 + kvdb-rocksdb/examples/memtest.rs | 149 +++++++++++++++++++++++++++++++ 2 files changed, 153 insertions(+) create mode 100644 kvdb-rocksdb/examples/memtest.rs diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index c4d573bf8..3204bbd79 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -31,3 +31,7 @@ ethereum-types = { path = "../ethereum-types" } kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.2" } rand = "0.7.2" tempdir = "0.3.7" +keccak-hash = { path = "../keccak-hash" } +sysinfo = "0.11.7" +ctrlc = "3.1.4" +time = "0.1" diff --git a/kvdb-rocksdb/examples/memtest.rs b/kvdb-rocksdb/examples/memtest.rs new file mode 100644 index 000000000..59fa1a137 --- /dev/null +++ b/kvdb-rocksdb/examples/memtest.rs @@ -0,0 +1,149 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Ethereum. + +// Parity Ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Ethereum. If not, see . + +// This program starts writing random data to the database with 100 (COLUMN_COUNT) +// columns and never stops until interrupted. + +use ethereum_types::H256; +use keccak_hash::keccak; +use kvdb_rocksdb::{Database, DatabaseConfig}; +use std::sync::{atomic::AtomicBool, atomic::Ordering as AtomicOrdering, Arc}; +use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; + +const COLUMN_COUNT: u32 = 100; + +#[derive(Clone)] +struct KeyValueSeed { + seed: H256, + key: H256, + val: H256, +} + +fn next(seed: H256) -> H256 { + let mut buf = [0u8; 33]; + buf[0..32].copy_from_slice(&seed[..]); + buf[32] = 1; + + keccak(&buf[..]) +} + +impl KeyValueSeed { + fn with_seed(seed: H256) -> Self { + KeyValueSeed { seed, key: next(seed), val: next(next(seed)) } + } + + fn new() -> Self { + Self::with_seed(H256::random()) + } +} + +impl Iterator for KeyValueSeed { + type Item = (H256, H256); + + fn next(&mut self) -> Option { + let result = (self.key, self.val); + self.key = next(self.val); + self.val = next(self.key); + + Some(result) + } +} + +fn proc_memory_usage() -> u64 { + let mut sys = System::new(); + let self_pid = get_current_pid().ok(); + let memory = if let Some(self_pid) = self_pid { + if sys.refresh_process(self_pid) { + let proc = sys.get_process(self_pid).expect("Above refresh_process succeeds, this should be Some(), qed"); + proc.memory() + } else { + 0 + } + } else { + 0 + }; + + memory +} + +fn main() { + let mb_per_col = std::env::args() + .nth(1) + .map(|arg| arg.parse().expect("Megabytes per col - should be integer or missing")) + .unwrap_or(1); + + let exit = Arc::new(AtomicBool::new(false)); + let ctrlc_exit = exit.clone(); + + ctrlc::set_handler(move || { + println!("\nRemoving temp database...\n"); + ctrlc_exit.store(true, AtomicOrdering::Relaxed); + }) + .expect("Error setting Ctrl-C handler"); + + let mut config = DatabaseConfig::with_columns(COLUMN_COUNT); + + for c in 0..=COLUMN_COUNT { + config.memory_budget.insert(c, mb_per_col); + } + let dir = tempdir::TempDir::new("rocksdb-example").unwrap(); + + println!("Database is put in: {} (maybe check if it was deleted)", dir.path().to_string_lossy()); + let db = Database::open(&config, &dir.path().to_string_lossy()).unwrap(); + + let mut step = 0; + let mut keyvalues = KeyValueSeed::new(); + while !exit.load(AtomicOrdering::Relaxed) { + let col = step % 100; + + let key_values: Vec<(H256, H256)> = keyvalues.clone().take(128).collect(); + let mut transaction = db.transaction(); + for (k, v) in key_values.iter() { + transaction.put(col, k.as_ref(), v.as_ref()); + } + db.write(transaction).expect("writing failed"); + + let mut seed = H256::zero(); + for (k, _) in key_values.iter() { + let mut buf = [0u8; 64]; + buf[0..32].copy_from_slice(seed.as_ref()); + let val = db.get(col, k.as_ref()).expect("Db fail").expect("Was put above"); + buf[32..64].copy_from_slice(val.as_ref()); + + seed = keccak(&buf[..]); + } + + let mut transaction = db.transaction(); + // delete all but one to avoid too much bloating + for (k, _) in key_values.iter().take(127) { + transaction.delete(col, k.as_ref()); + } + db.write(transaction).expect("delete failed"); + + keyvalues = KeyValueSeed::with_seed(seed); + + if step % 10000 == 9999 { + let timestamp = time::strftime("%Y-%m-%d %H:%M:%S", &time::now()).expect("Error formatting log timestamp"); + + println!("{}", timestamp); + println!("\tData written: {} keys - {} Mb", step + 1, ((step + 1) * 64 * 128) / 1024 / 1024); + println!("\tProcess memory used as seen by the OS: {} Mb", proc_memory_usage() / 1024); + println!("\tMemory used as reported by rocksdb: {} Mb\n", parity_util_mem::malloc_size(&db) / 1024 / 1024); + } + + step += 1; + } +} From 791f5524da0de25a061fa1c71164b036af43e172 Mon Sep 17 00:00:00 2001 From: David Date: Wed, 25 Mar 2020 09:45:49 +0100 Subject: [PATCH 105/359] Use correct license ID (#362) * Use correct license ID * SPDX format for license ID --- contract-address/Cargo.toml | 2 +- ethbloom/Cargo.toml | 2 +- ethereum-types/Cargo.toml | 2 +- fixed-hash/Cargo.toml | 2 +- keccak-hash/Cargo.toml | 2 +- kvdb-memorydb/Cargo.toml | 2 +- kvdb-rocksdb/Cargo.toml | 2 +- kvdb-shared-tests/Cargo.toml | 2 +- kvdb-web/Cargo.toml | 2 +- kvdb/Cargo.toml | 2 +- parity-bytes/Cargo.toml | 2 +- parity-crypto/Cargo.toml | 2 +- parity-path/Cargo.toml | 2 +- parity-util-mem/Cargo.toml | 2 +- parity-util-mem/derive/Cargo.toml | 2 +- plain_hasher/Cargo.toml | 2 +- primitive-types/Cargo.toml | 2 +- primitive-types/impls/codec/Cargo.toml | 2 +- primitive-types/impls/rlp/Cargo.toml | 2 +- primitive-types/impls/serde/Cargo.toml | 2 +- rlp-derive/Cargo.toml | 2 +- rlp/Cargo.toml | 2 +- trace-time/Cargo.toml | 2 +- transaction-pool/Cargo.toml | 2 +- triehash/Cargo.toml | 2 +- uint/Cargo.toml | 2 +- 26 files changed, 26 insertions(+), 26 deletions(-) diff --git a/contract-address/Cargo.toml b/contract-address/Cargo.toml index 19ee6b363..b8a0b7993 100644 --- a/contract-address/Cargo.toml +++ b/contract-address/Cargo.toml @@ -2,7 +2,7 @@ name = "contract-address" version = "0.4.0" authors = ["Parity Technologies "] -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" repository = "https://github.com/paritytech/parity-common" description = "A utility crate to create an ethereum contract address" diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index da4b20047..038378a64 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -3,7 +3,7 @@ name = "ethbloom" version = "0.9.0" authors = ["Parity Technologies "] description = "Ethereum bloom filter" -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" documentation = "https://docs.rs/ethbloom" homepage = "https://github.com/paritytech/parity-common" repository = "https://github.com/paritytech/parity-common" diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 00b49ac7a..b04c87bbd 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -2,7 +2,7 @@ name = "ethereum-types" version = "0.9.0" authors = ["Parity Technologies "] -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Ethereum types" edition = "2018" diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 324804592..93903ef7d 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -2,7 +2,7 @@ name = "fixed-hash" version = "0.6.0" authors = ["Parity Technologies "] -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" repository = "https://github.com/paritytech/parity-common" description = "Macros to define custom fixed-size hash types" diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index ca317f522..ea36f7288 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -5,7 +5,7 @@ description = "`keccak-hash` is a set of utility functions to facilitate working authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" readme = "README.md" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" edition = "2018" [dependencies] diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 90a5dc980..0ea7119a8 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -4,7 +4,7 @@ version = "0.5.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" edition = "2018" [dependencies] diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 3204bbd79..322d0865f 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -4,7 +4,7 @@ version = "0.7.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" edition = "2018" [[bench]] diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index ff461caac..c5987c5c7 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -4,7 +4,7 @@ version = "0.2.0" authors = ["Parity Technologies "] edition = "2018" description = "Shared tests for kvdb functionality, to be executed against actual implementations" -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" [dependencies] kvdb = { path = "../kvdb", version = "0.5" } diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 8d5fc7be4..331ef1fa6 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" documentation = "https://docs.rs/kvdb-web/" -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" edition = "2018" [dependencies] diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 669b19762..f6851ef33 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -4,7 +4,7 @@ version = "0.5.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" edition = "2018" [dependencies] diff --git a/parity-bytes/Cargo.toml b/parity-bytes/Cargo.toml index 384489203..651369f1c 100644 --- a/parity-bytes/Cargo.toml +++ b/parity-bytes/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "byte utilities for Parity" -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" edition = "2018" [dependencies] diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index f51bbd88f..763fc4b74 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Crypto utils used by ethstore and network." -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" autobenches = false edition = "2018" diff --git a/parity-path/Cargo.toml b/parity-path/Cargo.toml index 8f272ad51..d23442c8f 100644 --- a/parity-path/Cargo.toml +++ b/parity-path/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.3" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Path utilities" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" edition = "2018" [dependencies] diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 9f5e77039..af3916902 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" edition = "2018" [dependencies] diff --git a/parity-util-mem/derive/Cargo.toml b/parity-util-mem/derive/Cargo.toml index b7648ebd2..d41ba12f5 100644 --- a/parity-util-mem/derive/Cargo.toml +++ b/parity-util-mem/derive/Cargo.toml @@ -2,7 +2,7 @@ name = "parity-util-mem-derive" version = "0.1.0" authors = ["Parity Technologies "] -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" description = "Crate for memory reporting" repository = "https://github.com/paritytech/pariry-common/parity-util-mem/derive" diff --git a/plain_hasher/Cargo.toml b/plain_hasher/Cargo.toml index 7e0812571..bb5a1668d 100644 --- a/plain_hasher/Cargo.toml +++ b/plain_hasher/Cargo.toml @@ -3,7 +3,7 @@ name = "plain_hasher" description = "Hasher for 32-byte keys." version = "0.2.3" authors = ["Parity Technologies "] -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" keywords = ["hash", "hasher"] homepage = "https://github.com/paritytech/parity-common" categories = ["no-std"] diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 3169b5c2e..06e6e154d 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -2,7 +2,7 @@ name = "primitive-types" version = "0.7.0" authors = ["Parity Technologies "] -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Primitive types shared by Ethereum and Substrate" edition = "2018" diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index 4b39c10af..df837fd01 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -2,7 +2,7 @@ name = "impl-codec" version = "0.4.2" authors = ["Parity Technologies "] -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Parity Codec serialization support for uint and fixed hash." edition = "2018" diff --git a/primitive-types/impls/rlp/Cargo.toml b/primitive-types/impls/rlp/Cargo.toml index 85db3c328..fbc12c7fb 100644 --- a/primitive-types/impls/rlp/Cargo.toml +++ b/primitive-types/impls/rlp/Cargo.toml @@ -2,7 +2,7 @@ name = "impl-rlp" version = "0.2.1" authors = ["Parity Technologies "] -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "RLP serialization support for uint and fixed hash." edition = "2018" diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index 641a59e6a..ab923bb63 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -3,7 +3,7 @@ name = "impl-serde" version = "0.3.0" authors = ["Parity Technologies "] edition = "2018" -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Serde serialization support for uint and fixed hash." diff --git a/rlp-derive/Cargo.toml b/rlp-derive/Cargo.toml index de3824a2f..5059d6d02 100644 --- a/rlp-derive/Cargo.toml +++ b/rlp-derive/Cargo.toml @@ -2,7 +2,7 @@ name = "rlp-derive" version = "0.1.0" authors = ["Parity Technologies "] -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" description = "Derive macro for #[derive(RlpEncodable, RlpDecodable)]" homepage = "http://parity.io" edition = "2018" diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 947418a6b..cb8694c9e 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -3,7 +3,7 @@ name = "rlp" version = "0.4.5" description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/trace-time/Cargo.toml b/trace-time/Cargo.toml index 225cdec27..81877e186 100644 --- a/trace-time/Cargo.toml +++ b/trace-time/Cargo.toml @@ -4,7 +4,7 @@ description = "Easily trace time to execute a scope." version = "0.1.3" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" edition = "2018" [dependencies] diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index 2bf1dbb59..a13bc5767 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -2,7 +2,7 @@ description = "Generic transaction pool." name = "transaction-pool" version = "2.0.3" -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" edition = "2018" diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index 19496503a..358161a80 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -4,7 +4,7 @@ version = "0.8.3" authors = ["Parity Technologies "] description = "In-memory patricia trie operations" repository = "https://github.com/paritytech/parity-common" -license = "MIT/Apache2.0" +license = "MIT OR Apache-2.0" edition = "2018" [dependencies] diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 3a61567a6..257b3fa7f 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -2,7 +2,7 @@ description = "Large fixed-size integer arithmetic" homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" name = "uint" version = "0.8.2" authors = ["Parity Technologies "] From 8a29232801708e2679ee1f444136d76e6e340837 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 25 Mar 2020 18:46:49 +0100 Subject: [PATCH 106/359] Ban duplicates of parity-uil-mem from being linked into the same program (#363) * Prevent multiple versions of parity-uil-mem from being linked into the same program * fmt * parity-util-mem: add a warning about defining global allocs * parity-util-mem: document empty build script * parity-util-mem: extend the warning based on David comments --- parity-util-mem/CHANGELOG.md | 2 ++ parity-util-mem/Cargo.toml | 7 +++++++ parity-util-mem/README.md | 14 ++++++++++++++ parity-util-mem/build.rs | 1 + 4 files changed, 24 insertions(+) create mode 100644 parity-util-mem/build.rs diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index fdb3f5f7b..ec8ef91f9 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Prevent multiple versions from being linked into the same program. [#363](https://github.com/paritytech/parity-common/pull/363) ## [0.6.0] - 2020-03-13 - Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index af3916902..700061c32 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -7,6 +7,13 @@ description = "Collection of memory related utilities" license = "MIT OR Apache-2.0" edition = "2018" +# Prevent multiple versions from being linked into the same program. +links = "parity-util-mem-ban-duplicates" +# `links` requires a build script to be present: +# https://doc.rust-lang.org/cargo/reference/build-scripts.html#the-links-manifest-key +# so we use an empty build script +build = "build.rs" + [dependencies] cfg-if = "0.1.10" dlmalloc = { version = "0.1.3", features = ["global"], optional = true } diff --git a/parity-util-mem/README.md b/parity-util-mem/README.md index 14d6e6c19..a6f15b90b 100644 --- a/parity-util-mem/README.md +++ b/parity-util-mem/README.md @@ -2,6 +2,20 @@ Collection of memory related utilities. +## WARNING + +When `parity-util-mem` is used as a dependency with any of the global allocator features enabled, +it must be the sole place where a global allocator is defined. +The only exception to this rule is when used in a `no_std` context or when the `estimate-heapsize` feature is used. + +Because of that, it must be present in the dependency tree with a single version. +Starting from version 0.7, having duplicate versions of `parity-util-mem` will lead +to a compile-time error. It still will be possible to have 0.6 and 0.7 versions in the same binary though. + +Unless heeded you risk UB; see discussion in [issue 364]. + +[issue 364]: https://github.com/paritytech/parity-common/issues/364 + ## Features - estimate-heapsize : Do not use allocator, but `size_of` or `size_of_val`. diff --git a/parity-util-mem/build.rs b/parity-util-mem/build.rs new file mode 100644 index 000000000..f328e4d9d --- /dev/null +++ b/parity-util-mem/build.rs @@ -0,0 +1 @@ +fn main() {} From 7093b7e63304b236ab5d654b137c656d8bf833f9 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Thu, 26 Mar 2020 15:18:49 +0100 Subject: [PATCH 107/359] kvdb: no overlay (#313) * kvdb-rocksdb: no overlay experiment * kvdb-rocksdb: remove unused dep * kvdb-rocksdb: panic on write failure * kvdb: remove write_buffered and flush * kvdb: update changelog * kvdb-rocksdb: update changelog * kvdb-web: update changelog * kvdb-memory: update changelog * kvdb-rocksdb: fix the bench * kvdb-rocksdb: fix the bench #2 * kvdb: cargo fmt * Apply suggestions from code review Co-Authored-By: David * kvdb-rocksdb: s/acquired_val/value * kvdb-rocksdb: update the changelog Co-authored-by: David --- kvdb-memorydb/CHANGELOG.md | 2 + kvdb-memorydb/src/lib.rs | 5 +- kvdb-rocksdb/CHANGELOG.md | 2 + kvdb-rocksdb/Cargo.toml | 1 - kvdb-rocksdb/benches/bench_read_perf.rs | 2 - kvdb-rocksdb/src/lib.rs | 186 +++--------------------- kvdb-shared-tests/src/lib.rs | 13 +- kvdb-web/CHANGELOG.md | 2 + kvdb-web/src/lib.rs | 10 +- kvdb-web/tests/indexed_db.rs | 2 +- kvdb/CHANGELOG.md | 2 + kvdb/src/lib.rs | 30 +--- 12 files changed, 48 insertions(+), 209 deletions(-) diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index a8c74772d..4db19fc7a 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) ## [0.5.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 51bac41f0..6977fbf4e 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -52,7 +52,7 @@ impl KeyValueDB for InMemory { } } - fn write_buffered(&self, transaction: DBTransaction) { + fn write(&self, transaction: DBTransaction) -> io::Result<()> { let mut columns = self.columns.write(); let ops = transaction.ops; for op in ops { @@ -69,9 +69,6 @@ impl KeyValueDB for InMemory { } } } - } - - fn flush(&self) -> io::Result<()> { Ok(()) } diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index e7561c059..43cf44408 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) ## [0.7.0] - 2020-03-16 - Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 322d0865f..315b4d864 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -14,7 +14,6 @@ harness = false [dependencies] smallvec = "1.0.0" fs-swap = "0.2.4" -interleaved-ordered = "0.1.1" kvdb = { path = "../kvdb", version = "0.5" } log = "0.4.8" num_cpus = "1.10.1" diff --git a/kvdb-rocksdb/benches/bench_read_perf.rs b/kvdb-rocksdb/benches/bench_read_perf.rs index 8c17d8981..3a14ac752 100644 --- a/kvdb-rocksdb/benches/bench_read_perf.rs +++ b/kvdb-rocksdb/benches/bench_read_perf.rs @@ -76,8 +76,6 @@ fn populate(db: &Database) -> io::Result> { batch.put(0, &key.as_bytes(), &n_random_bytes(140)); } db.write(batch)?; - // Clear the overlay - db.flush()?; Ok(needles) } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index ac8958919..8755f207f 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -12,15 +12,14 @@ mod stats; use std::{cmp, collections::HashMap, convert::identity, error, fs, io, mem, path::Path, result}; use parity_util_mem::MallocSizeOf; -use parking_lot::{Mutex, MutexGuard, RwLock}; +use parking_lot::RwLock; use rocksdb::{ BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Error, Options, ReadOptions, WriteBatch, WriteOptions, DB, }; use crate::iter::KeyValuePair; use fs_swap::{swap, swap_nonatomic}; -use interleaved_ordered::interleave_ordered; -use kvdb::{DBKey, DBOp, DBTransaction, DBValue, KeyValueDB}; +use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; use log::{debug, warn}; #[cfg(target_os = "linux")] @@ -51,12 +50,6 @@ pub const DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB: MiB = 128; /// The default memory budget in MiB. pub const DB_DEFAULT_MEMORY_BUDGET_MB: MiB = 512; -#[derive(MallocSizeOf)] -enum KeyState { - Insert(DBValue), - Delete, -} - /// Compaction profile for the database settings /// Note, that changing these parameters may trigger /// the compaction process of RocksDB on startup. @@ -281,15 +274,8 @@ pub struct Database { read_opts: ReadOptions, #[ignore_malloc_size_of = "insignificant"] block_opts: BlockBasedOptions, - // Dirty values added with `write_buffered`. Cleaned on `flush`. - overlay: RwLock>>, #[ignore_malloc_size_of = "insignificant"] stats: stats::RunningDbStats, - // Values currently being flushed. Cleared when `flush` completes. - flushing: RwLock>>, - // Prevents concurrent flushes. - // Value indicates if a flush is in progress. - flushing_lock: Mutex, } #[inline] @@ -418,9 +404,6 @@ impl Database { Ok(Database { db: RwLock::new(Some(DBAndColumns { db, column_names })), config: config.clone(), - overlay: RwLock::new((0..config.columns).map(|_| HashMap::new()).collect()), - flushing: RwLock::new((0..config.columns).map(|_| HashMap::new()).collect()), - flushing_lock: Mutex::new(false), path: path.to_owned(), opts, read_opts, @@ -435,75 +418,6 @@ impl Database { DBTransaction::new() } - /// Commit transaction to database. - pub fn write_buffered(&self, tr: DBTransaction) { - let mut overlay = self.overlay.write(); - let ops = tr.ops; - for op in ops { - match op { - DBOp::Insert { col, key, value } => overlay[col as usize].insert(key, KeyState::Insert(value)), - DBOp::Delete { col, key } => overlay[col as usize].insert(key, KeyState::Delete), - }; - } - } - - /// Commit buffered changes to database. Must be called under `flush_lock` - fn write_flushing_with_lock(&self, _lock: &mut MutexGuard<'_, bool>) -> io::Result<()> { - match *self.db.read() { - Some(ref cfs) => { - let mut batch = WriteBatch::default(); - let mut ops: usize = 0; - let mut bytes: usize = 0; - mem::swap(&mut *self.overlay.write(), &mut *self.flushing.write()); - { - for (c, column) in self.flushing.read().iter().enumerate() { - ops += column.len(); - for (key, state) in column.iter() { - let cf = cfs.cf(c); - match *state { - KeyState::Delete => { - bytes += key.len(); - batch.delete_cf(cf, key).map_err(other_io_err)? - } - KeyState::Insert(ref value) => { - bytes += key.len() + value.len(); - batch.put_cf(cf, key, value).map_err(other_io_err)? - } - }; - } - } - } - - check_for_corruption(&self.path, cfs.db.write_opt(batch, &self.write_opts))?; - self.stats.tally_transactions(1); - self.stats.tally_writes(ops as u64); - self.stats.tally_bytes_written(bytes as u64); - - for column in self.flushing.write().iter_mut() { - column.clear(); - column.shrink_to_fit(); - } - Ok(()) - } - None => Err(other_io_err("Database is closed")), - } - } - - /// Commit buffered changes to database. - pub fn flush(&self) -> io::Result<()> { - let mut lock = self.flushing_lock.lock(); - // If RocksDB batch allocation fails the thread gets terminated and the lock is released. - // The value inside the lock is used to detect that. - if *lock { - // This can only happen if another flushing thread is terminated unexpectedly. - return Err(other_io_err("Database write failure. Running low on memory perhaps?")); - } - *lock = true; - let result = self.write_flushing_with_lock(&mut lock); - *lock = false; - result - } - /// Commit transaction to database. pub fn write(&self, tr: DBTransaction) -> io::Result<()> { match *self.db.read() { @@ -517,9 +431,6 @@ impl Database { let mut stats_total_bytes = 0; for op in ops { - // remove any buffered operation for this key - self.overlay.write()[op.col() as usize].remove(op.key()); - let cf = cfs.cf(op.col() as usize); match op { @@ -546,84 +457,55 @@ impl Database { pub fn get(&self, col: u32, key: &[u8]) -> io::Result> { match *self.db.read() { Some(ref cfs) => { - self.stats.tally_reads(1); - let guard = self.overlay.read(); - let overlay = - guard.get(col as usize).ok_or_else(|| other_io_err("kvdb column index is out of bounds"))?; - match overlay.get(key) { - Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), - Some(&KeyState::Delete) => Ok(None), - None => { - let flushing = &self.flushing.read()[col as usize]; - match flushing.get(key) { - Some(&KeyState::Insert(ref value)) => Ok(Some(value.clone())), - Some(&KeyState::Delete) => Ok(None), - None => { - let acquired_val = cfs - .db - .get_pinned_cf_opt(cfs.cf(col as usize), key, &self.read_opts) - .map(|r| r.map(|v| v.to_vec())) - .map_err(other_io_err); - - match acquired_val { - Ok(Some(ref v)) => self.stats.tally_bytes_read((key.len() + v.len()) as u64), - Ok(None) => self.stats.tally_bytes_read(key.len() as u64), - _ => {} - }; - - acquired_val - } - } - } + if cfs.column_names.get(col as usize).is_none() { + return Err(other_io_err("column index is out of bounds")); } + self.stats.tally_reads(1); + let value = cfs + .db + .get_pinned_cf_opt(cfs.cf(col as usize), key, &self.read_opts) + .map(|r| r.map(|v| v.to_vec())) + .map_err(other_io_err); + + match value { + Ok(Some(ref v)) => self.stats.tally_bytes_read((key.len() + v.len()) as u64), + Ok(None) => self.stats.tally_bytes_read(key.len() as u64), + _ => {} + }; + + value } None => Ok(None), } } - /// Get value by partial key. Prefix size should match configured prefix size. Only searches flushed values. - // TODO: support prefix seek for unflushed data + /// Get value by partial key. Prefix size should match configured prefix size. pub fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { self.iter_from_prefix(col, prefix).next().map(|(_, v)| v) } - /// Get database iterator for flushed data. + /// Iterator over the data in the given database column index. /// Will hold a lock until the iterator is dropped /// preventing the database from being closed. pub fn iter<'a>(&'a self, col: u32) -> impl Iterator + 'a { let read_lock = self.db.read(); let optional = if read_lock.is_some() { - let overlay_data = { - let overlay = &self.overlay.read()[col as usize]; - let mut overlay_data = overlay - .iter() - .filter_map(|(k, v)| match *v { - KeyState::Insert(ref value) => { - Some((k.clone().into_vec().into_boxed_slice(), value.clone().into_boxed_slice())) - } - KeyState::Delete => None, - }) - .collect::>(); - overlay_data.sort(); - overlay_data - }; - let guarded = iter::ReadGuardedIterator::new(read_lock, col, &self.read_opts); - Some(interleave_ordered(overlay_data, guarded)) + Some(guarded) } else { None }; optional.into_iter().flat_map(identity) } - /// Get database iterator from prefix for flushed data. + /// Iterator over data in the `col` database column index matching the given prefix. /// Will hold a lock until the iterator is dropped /// preventing the database from being closed. fn iter_from_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> impl Iterator + 'a { let read_lock = self.db.read(); let optional = if read_lock.is_some() { let guarded = iter::ReadGuardedIterator::new_from_prefix(read_lock, col, prefix, &self.read_opts); - Some(interleave_ordered(Vec::new(), guarded)) + Some(guarded) } else { None }; @@ -636,8 +518,6 @@ impl Database { /// Close the database fn close(&self) { *self.db.write() = None; - self.overlay.write().clear(); - self.flushing.write().clear(); } /// Restore the database from a copy at given path. @@ -671,8 +551,6 @@ impl Database { // reopen the database and steal handles into self let db = Self::open(&self.config, &self.path)?; *self.db.write() = mem::replace(&mut *db.db.write(), None); - *self.overlay.write() = mem::replace(&mut *db.overlay.write(), Vec::new()); - *self.flushing.write() = mem::replace(&mut *db.flushing.write(), Vec::new()); Ok(()) } @@ -687,7 +565,6 @@ impl Database { } /// The number of keys in a column (estimated). - /// Does not take into account the unflushed data. pub fn num_keys(&self, col: u32) -> io::Result { const ESTIMATE_NUM_KEYS: &str = "rocksdb.estimate-num-keys"; match *self.db.read() { @@ -751,18 +628,10 @@ impl KeyValueDB for Database { Database::get_by_prefix(self, col, prefix) } - fn write_buffered(&self, transaction: DBTransaction) { - Database::write_buffered(self, transaction) - } - fn write(&self, transaction: DBTransaction) -> io::Result<()> { Database::write(self, transaction) } - fn flush(&self) -> io::Result<()> { - Database::flush(self) - } - fn iter<'a>(&'a self, col: u32) -> Box + 'a> { let unboxed = Database::iter(self, col); Box::new(unboxed.into_iter()) @@ -805,13 +674,6 @@ impl KeyValueDB for Database { } } -impl Drop for Database { - fn drop(&mut self) { - // write all buffered changes if we can. - let _ = self.flush(); - } -} - #[cfg(test)] mod tests { use super::*; @@ -888,8 +750,6 @@ mod tests { } db.write(batch).unwrap(); - db.flush().unwrap(); - { let db = db.db.read(); db.as_ref().map(|db| { diff --git a/kvdb-shared-tests/src/lib.rs b/kvdb-shared-tests/src/lib.rs index ca82dc8cd..35de99239 100644 --- a/kvdb-shared-tests/src/lib.rs +++ b/kvdb-shared-tests/src/lib.rs @@ -17,7 +17,7 @@ pub fn test_put_and_get(db: &dyn KeyValueDB) -> io::Result<()> { let mut transaction = db.transaction(); transaction.put(0, key1, b"horse"); - db.write_buffered(transaction); + db.write(transaction)?; assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); Ok(()) } @@ -28,12 +28,12 @@ pub fn test_delete_and_get(db: &dyn KeyValueDB) -> io::Result<()> { let mut transaction = db.transaction(); transaction.put(0, key1, b"horse"); - db.write_buffered(transaction); + db.write(transaction)?; assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); let mut transaction = db.transaction(); transaction.delete(0, key1); - db.write_buffered(transaction); + db.write(transaction)?; assert!(db.get(0, key1)?.is_none()); Ok(()) } @@ -49,7 +49,7 @@ pub fn test_get_fails_with_non_existing_column(db: &dyn KeyValueDB) -> io::Resul pub fn test_write_clears_buffered_ops(db: &dyn KeyValueDB) -> io::Result<()> { let mut batch = db.transaction(); batch.put(0, b"foo", b"bar"); - db.write_buffered(batch); + db.write(batch)?; assert_eq!(db.get(0, b"foo")?.unwrap(), b"bar"); @@ -69,7 +69,7 @@ pub fn test_iter(db: &dyn KeyValueDB) -> io::Result<()> { let mut transaction = db.transaction(); transaction.put(0, key1, key1); transaction.put(0, key2, key2); - db.write_buffered(transaction); + db.write(transaction)?; let contents: Vec<_> = db.iter(0).into_iter().collect(); assert_eq!(contents.len(), 2); @@ -224,11 +224,10 @@ pub fn test_complex(db: &dyn KeyValueDB) -> io::Result<()> { let mut transaction = db.transaction(); transaction.put(0, key1, b"horse"); transaction.delete(0, key3); - db.write_buffered(transaction); + db.write(transaction)?; assert!(db.get(0, key3)?.is_none()); assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); - db.flush()?; assert!(db.get(0, key3)?.is_none()); assert_eq!(&*db.get(0, key1)?.unwrap(), b"horse"); Ok(()) diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index f8e952bf3..97fa61f4f 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) ## [0.5.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/kvdb-web/src/lib.rs b/kvdb-web/src/lib.rs index 5d9878ec2..f0179c085 100644 --- a/kvdb-web/src/lib.rs +++ b/kvdb-web/src/lib.rs @@ -71,7 +71,7 @@ impl Database { txn.put_vec(column, key.as_ref(), value); } // write each column into memory - in_memory.write_buffered(txn); + in_memory.write(txn).expect("writing in memory always succeeds; qed"); } Ok(Database { name: name_clone, version, columns, in_memory, indexed_db: inner }) } @@ -102,13 +102,9 @@ impl KeyValueDB for Database { self.in_memory.get_by_prefix(col, prefix) } - fn write_buffered(&self, transaction: DBTransaction) { + fn write(&self, transaction: DBTransaction) -> io::Result<()> { let _ = indexed_db::idb_commit_transaction(&*self.indexed_db, &transaction, self.columns); - self.in_memory.write_buffered(transaction); - } - - fn flush(&self) -> io::Result<()> { - Ok(()) + self.in_memory.write(transaction) } // NOTE: clones the whole db diff --git a/kvdb-web/tests/indexed_db.rs b/kvdb-web/tests/indexed_db.rs index 81a765a05..43c7ce83c 100644 --- a/kvdb-web/tests/indexed_db.rs +++ b/kvdb-web/tests/indexed_db.rs @@ -66,7 +66,7 @@ async fn reopen_the_database_with_more_columns() { // Write a value into the database let mut batch = db.transaction(); batch.put(0, b"hello", b"world"); - db.write_buffered(batch); + db.write(batch).unwrap(); assert_eq!(db.get(0, b"hello").unwrap().unwrap(), b"world"); diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 18f484117..6cfa99a6e 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Removed `write_buffered` and `flush` methods. [#313](https://github.com/paritytech/parity-common/pull/313) ## [0.5.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 723714de5..0fd28d9b8 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -84,19 +84,10 @@ impl DBTransaction { /// Generic key-value database. /// -/// This makes a distinction between "buffered" and "flushed" values. Values which have been -/// written can always be read, but may be present in an in-memory buffer. Values which have -/// been flushed have been moved to backing storage, like a RocksDB instance. There are certain -/// operations which are only guaranteed to operate on flushed data and not buffered, -/// although implementations may differ in this regard. -/// -/// The contents of an interior buffer may be explicitly flushed using the `flush` method. -/// -/// The `KeyValueDB` also deals in "column families", which can be thought of as distinct +/// The `KeyValueDB` deals with "column families", which can be thought of as distinct /// stores within a database. Keys written in one column family will not be accessible from /// any other. The number of column families must be specified at initialization, with a -/// differing interface for each database. The `None` argument in place of a column index -/// is always supported. +/// differing interface for each database. /// /// The API laid out here, along with the `Sync` bound implies interior synchronization for /// implementation. @@ -109,25 +100,16 @@ pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { /// Get a value by key. fn get(&self, col: u32, key: &[u8]) -> io::Result>; - /// Get a value by partial key. Only works for flushed data. + /// Get the first value matching the given prefix. fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option>; - /// Write a transaction of changes to the buffer. - fn write_buffered(&self, transaction: DBTransaction); - /// Write a transaction of changes to the backing store. - fn write(&self, transaction: DBTransaction) -> io::Result<()> { - self.write_buffered(transaction); - self.flush() - } - - /// Flush all buffered data. - fn flush(&self) -> io::Result<()>; + fn write(&self, transaction: DBTransaction) -> io::Result<()>; - /// Iterate over flushed data for a given column. + /// Iterate over the data for a given column. fn iter<'a>(&'a self, col: u32) -> Box, Box<[u8]>)> + 'a>; - /// Iterate over flushed data for a given column, starting from a given prefix. + /// Iterate over the data for a given column, starting from a given prefix. fn iter_from_prefix<'a>( &'a self, col: u32, From dd89c9a00b82b9e5c4e18964fd452c0dca5eed3c Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 27 Mar 2020 20:11:56 +0100 Subject: [PATCH 108/359] Delete by prefix operator in kvdb (#360) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Switch from `parity-rocksdb` to upstream `rust-rocksdb` * Update to latest rocksdb * Delete prefix as range exept when start is full of 255 (corner case). * forcing ignore lifetime (not more unsafe than before, columnfamily is probably not an issue, dbiterator is one as it will fail on close db). * remove temporarilly path deps for easier patch usage. * being a bit more exhaustive * Tests for delete prefix, fix rocksdb full column delete. * revert util-mem * update versionning. * better end prefix from review * revert test error check * Update kvdb-memorydb/src/lib.rs Co-Authored-By: Andronik Ordian * Update kvdb-shared-tests/src/lib.rs Co-Authored-By: Andronik Ordian * Update kvdb/src/lib.rs Co-Authored-By: Andronik Ordian * Update kvdb/src/lib.rs Co-Authored-By: Andronik Ordian * applying suggestions, and remove delete by prefix method. * io stats num column * end_prefix test * format * Redundant delete. * Update kvdb/src/lib.rs Co-Authored-By: David * Update kvdb/src/lib.rs Co-Authored-By: David * Documentation fix and additional test case in end_prefix_test * Doc. * doc Co-authored-by: Bastian Köcher Co-authored-by: Benjamin Kampmann Co-authored-by: Andronik Ordian Co-authored-by: David --- kvdb-memorydb/Cargo.toml | 2 +- kvdb-memorydb/src/lib.rs | 21 +++++++++++++ kvdb-rocksdb/Cargo.toml | 2 +- kvdb-rocksdb/src/lib.rs | 19 +++++++++++- kvdb-shared-tests/Cargo.toml | 2 +- kvdb-shared-tests/src/lib.rs | 60 ++++++++++++++++++++++++++++++++++++ kvdb-web/Cargo.toml | 3 +- kvdb-web/src/indexed_db.rs | 15 ++++++++- kvdb-web/tests/indexed_db.rs | 6 ++++ kvdb/src/lib.rs | 44 ++++++++++++++++++++++++++ 10 files changed, 168 insertions(+), 6 deletions(-) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 0ea7119a8..290727c75 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -13,4 +13,4 @@ parking_lot = "0.10.0" kvdb = { version = "0.5", path = "../kvdb" } [dev-dependencies] -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.2" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.3" } diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 6977fbf4e..c37ed821f 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -67,6 +67,21 @@ impl KeyValueDB for InMemory { col.remove(&*key); } } + DBOp::DeletePrefix { col, prefix } => { + if let Some(col) = columns.get_mut(&col) { + use std::ops::Bound; + if prefix.is_empty() { + col.clear(); + } else { + let start_range = Bound::Included(prefix.to_vec()); + let end_range = Bound::Excluded(kvdb::end_prefix(&prefix[..])); + let keys: Vec<_> = col.range((start_range, end_range)).map(|(k, _)| k.clone()).collect(); + for key in keys.into_iter() { + col.remove(&key[..]); + } + } + } + } } } Ok(()) @@ -127,6 +142,12 @@ mod tests { st::test_delete_and_get(&db) } + #[test] + fn delete_prefix() -> io::Result<()> { + let db = create(st::DELETE_PREFIX_NUM_COLUMNS); + st::test_delete_prefix(&db) + } + #[test] fn iter() -> io::Result<()> { let db = create(1); diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 315b4d864..77812b6fe 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-featur alloc_counter = "0.0.4" criterion = "0.3" ethereum-types = { path = "../ethereum-types" } -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.2" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.3" } rand = "0.7.2" tempdir = "0.3.7" keccak-hash = { path = "../keccak-hash" } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 8755f207f..54a81140c 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -443,6 +443,17 @@ impl Database { stats_total_bytes += key.len(); batch.delete_cf(cf, &key).map_err(other_io_err)? } + DBOp::DeletePrefix { col: _, prefix } => { + if prefix.len() > 0 { + let end_range = kvdb::end_prefix(&prefix[..]); + batch.delete_range_cf(cf, &prefix[..], &end_range[..]).map_err(other_io_err)?; + } else { + // Deletes all values in the column. + let end_range = &[u8::max_value()]; + batch.delete_range_cf(cf, &prefix[..], &end_range[..]).map_err(other_io_err)?; + batch.delete_cf(cf, &end_range[..]).map_err(other_io_err)?; + } + } }; } self.stats.tally_bytes_written(stats_total_bytes as u64); @@ -705,6 +716,12 @@ mod tests { st::test_delete_and_get(&db) } + #[test] + fn delete_prefix() -> io::Result<()> { + let db = create(st::DELETE_PREFIX_NUM_COLUMNS)?; + st::test_delete_prefix(&db) + } + #[test] fn iter() -> io::Result<()> { let db = create(1)?; @@ -725,7 +742,7 @@ mod tests { #[test] fn stats() -> io::Result<()> { - let db = create(3)?; + let db = create(st::IOSTATS_NUM_COLUMNS)?; st::test_io_stats(&db) } diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index c5987c5c7..e6f0876c7 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-shared-tests" -version = "0.2.0" +version = "0.3.0" authors = ["Parity Technologies "] edition = "2018" description = "Shared tests for kvdb functionality, to be executed against actual implementations" diff --git a/kvdb-shared-tests/src/lib.rs b/kvdb-shared-tests/src/lib.rs index 35de99239..2ffcc07ef 100644 --- a/kvdb-shared-tests/src/lib.rs +++ b/kvdb-shared-tests/src/lib.rs @@ -126,6 +126,9 @@ pub fn test_iter_from_prefix(db: &dyn KeyValueDB) -> io::Result<()> { Ok(()) } +/// The number of columns required to run `test_io_stats`. +pub const IOSTATS_NUM_COLUMNS: u32 = 3; + /// A test for `KeyValueDB::io_stats`. /// Assumes that the `db` has at least 3 columns. pub fn test_io_stats(db: &dyn KeyValueDB) -> io::Result<()> { @@ -171,6 +174,63 @@ pub fn test_io_stats(db: &dyn KeyValueDB) -> io::Result<()> { Ok(()) } +/// The number of columns required to run `test_delete_prefix`. +pub const DELETE_PREFIX_NUM_COLUMNS: u32 = 5; + +/// A test for `KeyValueDB::delete_prefix`. +pub fn test_delete_prefix(db: &dyn KeyValueDB) -> io::Result<()> { + let keys = [ + &[][..], + &[0u8][..], + &[0, 1][..], + &[1][..], + &[1, 0][..], + &[1, 255][..], + &[1, 255, 255][..], + &[2][..], + &[2, 0][..], + &[2, 255][..], + ]; + let init_db = |ix: u32| -> io::Result<()> { + let mut batch = db.transaction(); + for (i, key) in keys.iter().enumerate() { + batch.put(ix, key, &[i as u8]); + } + db.write(batch)?; + Ok(()) + }; + let check_db = |ix: u32, content: [bool; 10]| -> io::Result<()> { + let mut state = [true; 10]; + for (c, key) in keys.iter().enumerate() { + state[c] = db.get(ix, key)?.is_some(); + } + assert_eq!(state, content, "at {}", ix); + Ok(()) + }; + let tests: [_; DELETE_PREFIX_NUM_COLUMNS as usize] = [ + // standard + (&[1u8][..], [true, true, true, false, false, false, false, true, true, true]), + // edge + (&[1u8, 255, 255][..], [true, true, true, true, true, true, false, true, true, true]), + // none 1 + (&[1, 2][..], [true, true, true, true, true, true, true, true, true, true]), + // none 2 + (&[8][..], [true, true, true, true, true, true, true, true, true, true]), + // all + (&[][..], [false, false, false, false, false, false, false, false, false, false]), + ]; + for (ix, test) in tests.iter().enumerate() { + let ix = ix as u32; + init_db(ix)?; + let mut batch = db.transaction(); + batch.delete_prefix(ix, test.0); + db.write(batch)?; + check_db(ix, test.1)?; + } + + Ok(()) +} + /// A complex test. pub fn test_complex(db: &dyn KeyValueDB) -> io::Result<()> { let key1 = b"02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc"; diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 331ef1fa6..2a9a681d6 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -34,11 +34,12 @@ features = [ 'EventTarget', 'IdbCursor', 'IdbCursorWithValue', + 'IdbKeyRange', 'DomStringList', ] [dev-dependencies] console_log = "0.1.2" -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.2" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.3" } wasm-bindgen-test = "0.3.4" wasm-bindgen-futures = "0.4.4" diff --git a/kvdb-web/src/indexed_db.rs b/kvdb-web/src/indexed_db.rs index 4593e3007..15a7713e6 100644 --- a/kvdb-web/src/indexed_db.rs +++ b/kvdb-web/src/indexed_db.rs @@ -10,7 +10,7 @@ use js_sys::{Array, ArrayBuffer, Uint8Array}; use wasm_bindgen::{closure::Closure, JsCast, JsValue}; -use web_sys::{Event, IdbCursorWithValue, IdbDatabase, IdbOpenDbRequest, IdbRequest, IdbTransactionMode}; +use web_sys::{Event, IdbCursorWithValue, IdbDatabase, IdbKeyRange, IdbOpenDbRequest, IdbRequest, IdbTransactionMode}; use futures::channel; use futures::prelude::*; @@ -157,6 +157,19 @@ pub fn idb_commit_transaction(idb: &IdbDatabase, txn: &DBTransaction, columns: u warn!("error deleting key from col_{}: {:?}", column, err); } } + DBOp::DeletePrefix { col, prefix } => { + let column = *col as usize; + // Convert rust bytes to js arrays + let prefix_js_start = Uint8Array::from(prefix.as_ref()); + let prefix_js_end = Uint8Array::from(prefix.as_ref()); + + let range = IdbKeyRange::bound(prefix_js_start.as_ref(), prefix_js_end.as_ref()) + .expect("Starting and ending at same value is valid bound; qed"); + let res = object_stores[column].delete(range.as_ref()); + if let Err(err) = res { + warn!("error deleting prefix from col_{}: {:?}", column, err); + } + } } } diff --git a/kvdb-web/tests/indexed_db.rs b/kvdb-web/tests/indexed_db.rs index 43c7ce83c..e32d5ea18 100644 --- a/kvdb-web/tests/indexed_db.rs +++ b/kvdb-web/tests/indexed_db.rs @@ -39,6 +39,12 @@ async fn delete_and_get() { st::test_delete_and_get(&db).unwrap() } +#[wasm_bindgen_test] +async fn delete_prefix() { + let db = open_db(st::DELETE_PREFIX_NUM_COLUMNS, "delete_prefix").await; + st::test_delete_prefix(&db).unwrap() +} + #[wasm_bindgen_test] async fn iter() { let db = open_db(1, "iter").await; diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 0fd28d9b8..695519f43 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -35,6 +35,7 @@ pub struct DBTransaction { pub enum DBOp { Insert { col: u32, key: DBKey, value: DBValue }, Delete { col: u32, key: DBKey }, + DeletePrefix { col: u32, prefix: DBKey }, } impl DBOp { @@ -43,6 +44,7 @@ impl DBOp { match *self { DBOp::Insert { ref key, .. } => key, DBOp::Delete { ref key, .. } => key, + DBOp::DeletePrefix { ref prefix, .. } => prefix, } } @@ -51,6 +53,7 @@ impl DBOp { match *self { DBOp::Insert { col, .. } => col, DBOp::Delete { col, .. } => col, + DBOp::DeletePrefix { col, .. } => col, } } } @@ -80,6 +83,13 @@ impl DBTransaction { pub fn delete(&mut self, col: u32, key: &[u8]) { self.ops.push(DBOp::Delete { col, key: DBKey::from_slice(key) }); } + + /// Delete all values with the given key prefix. + /// Using an empty prefix here will remove all keys + /// (all keys starts with the empty prefix). + pub fn delete_prefix(&mut self, col: u32, prefix: &[u8]) { + self.ops.push(DBOp::DeletePrefix { col, prefix: DBKey::from_slice(prefix) }); + } } /// Generic key-value database. @@ -129,3 +139,37 @@ pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { IoStats::empty() } } + +/// For a given start prefix (inclusive), returns the correct end prefix (non-inclusive). +/// This assumes the key bytes are ordered in lexicographical order. +pub fn end_prefix(prefix: &[u8]) -> Vec { + let mut end_range = prefix.to_vec(); + while let Some(0xff) = end_range.last() { + end_range.pop(); + } + if let Some(byte) = end_range.last_mut() { + *byte += 1; + } + end_range +} + +#[cfg(test)] +mod test { + use super::end_prefix; + + #[test] + fn end_prefix_test() { + assert_eq!(end_prefix(&[5, 6, 7]), vec![5, 6, 8]); + assert_eq!(end_prefix(&[5, 6, 255]), vec![5, 7]); + // This is not equal as the result is before start. + assert_ne!(end_prefix(&[5, 255, 255]), vec![5, 255]); + // This is equal ([5, 255] will not be deleted because + // it is before start). + assert_eq!(end_prefix(&[5, 255, 255]), vec![6]); + assert_eq!(end_prefix(&[255, 255, 255]), vec![]); + + assert_eq!(end_prefix(&[0x00, 0xff]), vec![0x01]); + assert_eq!(end_prefix(&[0xff]), vec![]); + assert_eq!(end_prefix(&[]), vec![]); + } +} From b87aee1d54bb372471b92e0d25970bcdc9d6e002 Mon Sep 17 00:00:00 2001 From: David Date: Fri, 10 Apr 2020 12:43:21 +0200 Subject: [PATCH 109/359] Allow pubkey recovery for all-zero messages (#369) * Allow pubkey recovery for all-zero messages After https://github.com/openethereum/openethereum/pull/11406 it is no longer possible to to public key recovery from messages that are all-zero. This create issues when using the `ecrecover` builtin because externally produced signatures may well provide a message (i.e. a preimage) that is all-zeroes. This works around the problem at the cost of cloning the incoming message and create a `ZeroesAllowedMessage` wrapper around it. The `ZeroesAllowedMessage` implements the `ThirtyTwoByteHash` trait from `rust-secp256k1` which circumvents the zero-check. In a follow-up PR we'll likely change the interface of `recover()` to take a `ZeroesAllowedMessage` directly, thus removing the unneeded clone. * Inner doesn't need to be pub * Update parity-crypto/src/publickey/ecdsa_signature.rs Co-Authored-By: Andronik Ordian * Docs and review grumbles * Add `recover_allowing_all_zero_message()` Revert `recover()` to previous behaviour: no zero-messages allowed Docs and cleanup * Obey the fmt Co-authored-by: Andronik Ordian --- .../src/publickey/ecdsa_signature.rs | 62 +++++++++++++++++-- parity-crypto/src/publickey/mod.rs | 20 +++++- 2 files changed, 75 insertions(+), 7 deletions(-) diff --git a/parity-crypto/src/publickey/ecdsa_signature.rs b/parity-crypto/src/publickey/ecdsa_signature.rs index 6853b1586..7968967a7 100644 --- a/parity-crypto/src/publickey/ecdsa_signature.rs +++ b/parity-crypto/src/publickey/ecdsa_signature.rs @@ -8,7 +8,7 @@ //! Signature based on ECDSA, algorithm's description: https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm -use super::{public_to_address, Address, Error, Message, Public, Secret, SECP256K1}; +use super::{public_to_address, Address, Error, Message, Public, Secret, ZeroesAllowedMessage, SECP256K1}; use ethereum_types::{H256, H520}; use rustc_hex::{FromHex, ToHex}; use secp256k1::key::{PublicKey, SecretKey}; @@ -246,22 +246,56 @@ pub fn verify_address(address: &Address, signature: &Signature, message: &Messag /// Recovers the public key from the signature for the message pub fn recover(signature: &Signature, message: &Message) -> Result { - let context = &SECP256K1; let rsig = RecoverableSignature::from_compact(&signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; - let pubkey = context.recover(&SecpMessage::from_slice(&message[..])?, &rsig)?; + let pubkey = &SECP256K1.recover(&SecpMessage::from_slice(&message[..])?, &rsig)?; let serialized = pubkey.serialize_uncompressed(); - let mut public = Public::default(); public.as_bytes_mut().copy_from_slice(&serialized[1..65]); Ok(public) } +/// Recovers the public key from the signature for the given message. +/// This version of `recover()` allows for all-zero messages, which is necessary +/// for ethereum but is otherwise highly discouraged. Use with caution. +pub fn recover_allowing_all_zero_message( + signature: &Signature, + message: ZeroesAllowedMessage, +) -> Result { + let rsig = RecoverableSignature::from_compact(&signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; + let pubkey = &SECP256K1.recover(&message.into(), &rsig)?; + let serialized = pubkey.serialize_uncompressed(); + let mut public = Public::zero(); + public.as_bytes_mut().copy_from_slice(&serialized[1..65]); + Ok(public) +} + #[cfg(test)] mod tests { - use super::super::{Generator, Message, Random}; - use super::{recover, sign, verify_address, verify_public, Signature}; + use super::super::{Generator, Message, Random, SECP256K1}; + use super::{ + recover, recover_allowing_all_zero_message, sign, verify_address, verify_public, Secret, Signature, + ZeroesAllowedMessage, + }; + use secp256k1::SecretKey; use std::str::FromStr; + // Copy of `sign()` that allows signing all-zero Messages. + // Note: this is for *tests* only. DO NOT USE UNLESS YOU NEED IT. + fn sign_zero_message(secret: &Secret) -> Signature { + let context = &SECP256K1; + let sec = SecretKey::from_slice(secret.as_ref()).unwrap(); + // force an all-zero message into a secp `Message` bypassing the validity check. + let zero_msg = ZeroesAllowedMessage(Message::zero()); + let s = context.sign_recoverable(&zero_msg.into(), &sec); + let (rec_id, data) = s.serialize_compact(); + let mut data_arr = [0; 65]; + + // no need to check if s is low, it always is + data_arr[0..64].copy_from_slice(&data[0..64]); + data_arr[64] = rec_id.to_i32() as u8; + Signature(data_arr) + } + #[test] fn vrs_conversion() { // given @@ -295,6 +329,22 @@ mod tests { assert_eq!(keypair.public(), &recover(&signature, &message).unwrap()); } + #[test] + fn sign_and_recover_public_fails_with_zeroed_messages() { + let keypair = Random.generate(); + let signature = sign_zero_message(keypair.secret()); + let zero_message = Message::zero(); + assert!(&recover(&signature, &zero_message).is_err()); + } + + #[test] + fn recover_allowing_all_zero_message_can_recover_from_all_zero_messages() { + let keypair = Random.generate(); + let signature = sign_zero_message(keypair.secret()); + let zero_message = ZeroesAllowedMessage(Message::zero()); + assert_eq!(keypair.public(), &recover_allowing_all_zero_message(&signature, zero_message).unwrap()) + } + #[test] fn sign_and_verify_public() { let keypair = Random.generate(); diff --git a/parity-crypto/src/publickey/mod.rs b/parity-crypto/src/publickey/mod.rs index 294d67b38..53dd12209 100644 --- a/parity-crypto/src/publickey/mod.rs +++ b/parity-crypto/src/publickey/mod.rs @@ -20,7 +20,9 @@ pub mod ecdh; pub mod ecies; pub mod error; -pub use self::ecdsa_signature::{recover, sign, verify_address, verify_public, Signature}; +pub use self::ecdsa_signature::{ + recover, recover_allowing_all_zero_message, sign, verify_address, verify_public, Signature, +}; pub use self::error::Error; pub use self::extended_keys::{Derivation, DerivationError, ExtendedKeyPair, ExtendedPublic, ExtendedSecret}; pub use self::keypair::{public_to_address, KeyPair}; @@ -33,6 +35,22 @@ use lazy_static::lazy_static; pub use ethereum_types::{Address, Public}; pub type Message = H256; +use secp256k1::ThirtyTwoByteHash; + +/// In ethereum we allow public key recovery from a signature + message pair +/// where the message is all-zeroes. This conflicts with the best practise of +/// not allowing such values and so in order to avoid breaking consensus we need +/// this to work around it. The `ZeroesAllowedType` wraps an `H256` that can be +/// converted to a `[u8; 32]` which in turn can be cast to a +/// `secp256k1::Message` by the `ThirtyTwoByteHash` and satisfy the API for +/// `recover()`. +pub struct ZeroesAllowedMessage(H256); +impl ThirtyTwoByteHash for ZeroesAllowedMessage { + fn into_32(self) -> [u8; 32] { + self.0.to_fixed_bytes() + } +} + /// The number -1 encoded as a secret key const MINUS_ONE_KEY: &'static [u8] = &[ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, From 0424206b9f022d4befaa71d03ddbc65859edcd9b Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 10 Apr 2020 16:57:38 +0200 Subject: [PATCH 110/359] keccak-hash: add keccak256_range and keccak512_range functions (#370) * keccak-hash: add keccak256_range and keccak512_range functions * keccak-hash: prep for release * keccak-hash: update the date in changelog * keccak-hash: more docs * Update keccak-hash/src/lib.rs Co-Authored-By: David Co-authored-by: David --- keccak-hash/CHANGELOG.md | 6 ++++- keccak-hash/src/lib.rs | 51 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index 13f73c4a9..e8580f5d2 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -6,9 +6,13 @@ The format is based on [Keep a Changelog]. ## [Unreleased] -## [0.4.2] - 2020-03-16 +## [0.5.1] - 2020-04-10 +- Added `keccak256_range` and `keccak512_range` functions. [#370](https://github.com/paritytech/parity-common/pull/370) + +## [0.5.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) +- Updated tiny-keccak. [#260](https://github.com/paritytech/parity-common/pull/260) ## [0.4.1] - 2019-10-24 ### Dependencies diff --git a/keccak-hash/src/lib.rs b/keccak-hash/src/lib.rs index e9f410672..dbad92af5 100644 --- a/keccak-hash/src/lib.rs +++ b/keccak-hash/src/lib.rs @@ -45,6 +45,32 @@ pub fn keccak256(data: &mut [u8]) { keccak256.finalize(data); } +/// Computes in-place keccak256 hash of `data[range]`. +/// +/// The `range` argument specifies a subslice of `data` in bytes to be hashed. +/// The resulting hash will be written back to `data`. +/// # Panics +/// +/// If `range` is out of bounds. +/// +/// # Example +/// +/// ``` +/// let mut data = [1u8; 32]; +/// // Hash the first 8 bytes of `data` and write the result, 32 bytes, to `data`. +/// keccak_hash::keccak256_range(&mut data, 0..8); +/// let expected = [ +/// 0x54, 0x84, 0x4f, 0x69, 0xb4, 0xda, 0x4b, 0xb4, 0xa9, 0x9f, 0x24, 0x59, 0xb5, 0x11, 0xd4, 0x42, +/// 0xcc, 0x5b, 0xd2, 0xfd, 0xf4, 0xc3, 0x54, 0xd2, 0x07, 0xbb, 0x13, 0x08, 0x94, 0x43, 0xaf, 0x68, +/// ]; +/// assert_eq!(&data, &expected); +/// ``` +pub fn keccak256_range(data: &mut [u8], range: core::ops::Range) { + let mut keccak256 = Keccak::v256(); + keccak256.update(&data[range]); + keccak256.finalize(data); +} + /// Computes in-place keccak512 hash of `data`. pub fn keccak512(data: &mut [u8]) { let mut keccak512 = Keccak::v512(); @@ -52,6 +78,31 @@ pub fn keccak512(data: &mut [u8]) { keccak512.finalize(data); } +/// Computes in-place keccak512 hash of `data[range]`. +/// +/// The `range` argument specifies a subslice of `data` in bytes to be hashed. +/// The resulting hash will be written back to `data`. +/// # Panics +/// +/// If `range` is out of bounds. +/// +/// # Example +/// +/// ``` +/// let mut data = [1u8; 64]; +/// keccak_hash::keccak512_range(&mut data, 0..8); +/// let expected = [ +/// 0x90, 0x45, 0xc5, 0x9e, 0xd3, 0x0e, 0x1f, 0x42, 0xac, 0x35, 0xcc, 0xc9, 0x55, 0x7c, 0x77, 0x17, +/// 0xc8, 0x89, 0x3a, 0x77, 0x6c, 0xea, 0x2e, 0xf3, 0x88, 0xea, 0xe5, 0xc0, 0xea, 0x40, 0x26, 0x64, +/// ]; +/// assert_eq!(&data[..32], &expected); +/// ``` +pub fn keccak512_range(data: &mut [u8], range: core::ops::Range) { + let mut keccak512 = Keccak::v512(); + keccak512.update(&data[range]); + keccak512.finalize(data); +} + pub fn keccak_256(input: &[u8], output: &mut [u8]) { write_keccak(input, output); } From 53b47b778d5116397c51300f5ddb1030a83dc510 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 10 Apr 2020 17:27:04 +0200 Subject: [PATCH 111/359] keccak-hash: bump version to 0.5.1 (#371) --- keccak-hash/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index ea36f7288..54c1c0439 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "keccak-hash" -version = "0.5.0" +version = "0.5.1" description = "`keccak-hash` is a set of utility functions to facilitate working with Keccak hashes (256/512 bits long)." authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" From 13ae5bebe24d5c103340a5394da4a3e1f2109203 Mon Sep 17 00:00:00 2001 From: David Date: Sat, 11 Apr 2020 22:57:34 +0200 Subject: [PATCH 112/359] [parity-crypto] Release 0.6.1 (#373) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …and fix construction of `ZeroesAllowedMessage` --- parity-crypto/CHANGELOG.md | 3 +++ parity-crypto/Cargo.toml | 2 +- parity-crypto/src/publickey/mod.rs | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index 14290d54d..08cd46532 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.6.1] - 2020-04-11 +- Add `recover_allowing_all_zero_message()` and `ZeroesAllowedMessage` to accomodate ethereum's `ecrecover` builtin. [#369](https://github.com/paritytech/parity-common/pull/369) + ## [0.6.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 763fc4b74..1b1944ad3 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-crypto" -version = "0.6.0" +version = "0.6.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Crypto utils used by ethstore and network." diff --git a/parity-crypto/src/publickey/mod.rs b/parity-crypto/src/publickey/mod.rs index 53dd12209..54d3ffe79 100644 --- a/parity-crypto/src/publickey/mod.rs +++ b/parity-crypto/src/publickey/mod.rs @@ -44,7 +44,7 @@ use secp256k1::ThirtyTwoByteHash; /// converted to a `[u8; 32]` which in turn can be cast to a /// `secp256k1::Message` by the `ThirtyTwoByteHash` and satisfy the API for /// `recover()`. -pub struct ZeroesAllowedMessage(H256); +pub struct ZeroesAllowedMessage(pub H256); impl ThirtyTwoByteHash for ZeroesAllowedMessage { fn into_32(self) -> [u8; 32] { self.0.to_fixed_bytes() From 0bba5668615e88aec23d2d2e22f7b5bae6e0932b Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Sat, 11 Apr 2020 22:59:00 +0200 Subject: [PATCH 113/359] keccak-hash: fix bench and add one for range (#372) * keccak-hash: fix bench and add one for range * fmt --- .travis.yml | 2 +- keccak-hash/Cargo.toml | 4 ++++ keccak-hash/benches/keccak_256.rs | 18 ++++++++++++++++-- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 19c6b18ef..53e552f6d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -40,7 +40,7 @@ script: cargo fmt -- --check; fi - cargo check --workspace --tests --benches - - cargo test --all --exclude uint --exclude fixed-hash --exclude parity-crypto + - cargo test --workspace --exclude uint --exclude fixed-hash --exclude parity-crypto - if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cd contract-address/ && cargo test --features=external_doc && cd ..; fi diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 54c1c0439..c663c66c5 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -19,3 +19,7 @@ criterion = "0.3.0" [features] default = ["std"] std = [] + +[[bench]] +name = "keccak_256" +harness = false diff --git a/keccak-hash/benches/keccak_256.rs b/keccak-hash/benches/keccak_256.rs index 97e9ee13d..3a28f993a 100644 --- a/keccak-hash/benches/keccak_256.rs +++ b/keccak-hash/benches/keccak_256.rs @@ -22,12 +22,26 @@ pub fn keccak_256_with_empty_input(c: &mut Criterion) { } pub fn keccak_256_with_typical_input(c: &mut Criterion) { - let data: Vec = From::from("some medium length string with important information"); - c.bench_function("keccak_256_with_typical_input", |b| { + let mut data: Vec = From::from("some medium length string with important information"); + let len = data.len(); + let mut group = c.benchmark_group("keccak_256_with_typical_input"); + group.bench_function("regular", |b| { b.iter(|| { let _out = keccak(black_box(&data)); }) }); + group.bench_function("inplace", |b| { + b.iter(|| { + keccak_hash::keccak256(black_box(&mut data[..])); + }) + }); + group.bench_function("inplace_range", |b| { + b.iter(|| { + keccak_hash::keccak256_range(black_box(&mut data[..]), 0..len); + }) + }); + + group.finish(); } pub fn keccak_256_with_large_input(c: &mut Criterion) { From ef69ef1dcda806a45b1988605bf71843dd032cc1 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 15 Apr 2020 00:11:34 +0200 Subject: [PATCH 114/359] parity-util-mem: fix for windows (#375) * parity-util-mem: try to fix compilation on windows * appveyor: test parity-util-mem with no-std * appveyor: test parity-util-mem with default features * fmt --- appveyor.yml | 2 ++ parity-util-mem/src/allocators.rs | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index bc9c9bf75..2807de0d7 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -26,4 +26,6 @@ test_script: - cd fixed-hash/ && cargo test --all-features && cd .. - cd uint/ && cargo test --features=std,quickcheck --release && cd .. - cd plain_hasher/ && cargo test --no-default-features && cd .. + - cd parity-util-mem/ && cargo test --no-default-features && cd .. - cd parity-util-mem/ && cargo test --features=estimate-heapsize && cd .. + - cd parity-util-mem/ && cargo test && cd .. diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs index 8d8bb18db..fca674ebe 100644 --- a/parity-util-mem/src/allocators.rs +++ b/parity-util-mem/src/allocators.rs @@ -66,6 +66,7 @@ mod usable_size { } else if #[cfg(target_os = "windows")] { use winapi::um::heapapi::{GetProcessHeap, HeapSize, HeapValidate}; + use winapi::ctypes::c_void as winapi_c_void; /// Get the size of a heap block. /// Call windows allocator through `winapi` crate @@ -73,11 +74,11 @@ mod usable_size { let heap = GetProcessHeap(); - if HeapValidate(heap, 0, ptr) == 0 { + if HeapValidate(heap, 0, ptr as *const winapi_c_void) == 0 { ptr = *(ptr as *const *const c_void).offset(-1); } - HeapSize(heap, 0, ptr) as usize + HeapSize(heap, 0, ptr as *const winapi_c_void) as usize } } else if #[cfg(feature = "jemalloc-global")] { From b99e466e60d19c298b54f593bbd2266edca7f61f Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 15 Apr 2020 10:41:19 +0200 Subject: [PATCH 115/359] bump parity-util-mem (#376) * parity-util-mem: bump minor version * parity-util-mem: update the changelog * parity-util-mem: update README --- parity-util-mem/CHANGELOG.md | 4 +++- parity-util-mem/Cargo.toml | 2 +- parity-util-mem/README.md | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index ec8ef91f9..a23c2164d 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -5,7 +5,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -### Breaking + +## [0.6.1] - 2020-04-15 +- Fix compilation on Windows for no-std. [#375](https://github.com/paritytech/parity-common/pull/375) - Prevent multiple versions from being linked into the same program. [#363](https://github.com/paritytech/parity-common/pull/363) ## [0.6.0] - 2020-03-13 diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 700061c32..97e20cf0e 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.6.0" +version = "0.6.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" diff --git a/parity-util-mem/README.md b/parity-util-mem/README.md index a6f15b90b..8b7dd7bd1 100644 --- a/parity-util-mem/README.md +++ b/parity-util-mem/README.md @@ -9,8 +9,8 @@ it must be the sole place where a global allocator is defined. The only exception to this rule is when used in a `no_std` context or when the `estimate-heapsize` feature is used. Because of that, it must be present in the dependency tree with a single version. -Starting from version 0.7, having duplicate versions of `parity-util-mem` will lead -to a compile-time error. It still will be possible to have 0.6 and 0.7 versions in the same binary though. +Starting from version 0.6.1, having duplicate versions of `parity-util-mem` will lead +to a compile-time error. It still will be possible to have 0.5 and 0.6.1 versions in the same binary though. Unless heeded you risk UB; see discussion in [issue 364]. From a52fbeb0c8f0f77b418abb8a70b5ff7e83ce49eb Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 15 Apr 2020 10:45:21 +0200 Subject: [PATCH 116/359] kvdb-rocksdb: optimize and rename iter_from_prefix (#365) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * kvdb: small cleanup * kvdb-rocksdb: rename and optimize iter_with_prefix * fix links in changelogs * apply suggestions from code review * 🤦 * fmt --- kvdb-memorydb/src/lib.rs | 6 +++--- kvdb-rocksdb/CHANGELOG.md | 1 + kvdb-rocksdb/src/iter.rs | 28 +++++++++++++++++++--------- kvdb-rocksdb/src/lib.rs | 36 ++++++++++++++++++++++-------------- kvdb-shared-tests/src/lib.rs | 18 +++++++++--------- kvdb-web/src/lib.rs | 4 ++-- kvdb-web/tests/indexed_db.rs | 6 +++--- kvdb/CHANGELOG.md | 2 ++ kvdb/src/lib.rs | 8 +++++--- 9 files changed, 66 insertions(+), 43 deletions(-) diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index c37ed821f..6a70babea 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -97,7 +97,7 @@ impl KeyValueDB for InMemory { } } - fn iter_from_prefix<'a>( + fn iter_with_prefix<'a>( &'a self, col: u32, prefix: &'a [u8], @@ -155,9 +155,9 @@ mod tests { } #[test] - fn iter_from_prefix() -> io::Result<()> { + fn iter_with_prefix() -> io::Result<()> { let db = create(1); - st::test_iter_from_prefix(&db) + st::test_iter_with_prefix(&db) } #[test] diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 43cf44408..cd4838404 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [Keep a Changelog]. ## [Unreleased] ### Breaking - Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) +- Rename and optimize prefix iteration. [#365](https://github.com/paritytech/parity-common/pull/365) ## [0.7.0] - 2020-03-16 - Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs index 71482f446..a1ef70a53 100644 --- a/kvdb-rocksdb/src/iter.rs +++ b/kvdb-rocksdb/src/iter.rs @@ -12,8 +12,7 @@ //! //! Note: this crate does not use "Prefix Seek" mode which means that the prefix iterator //! will return keys not starting with the given prefix as well (as long as `key >= prefix`). -//! To work around this we filter the data returned by rocksdb to ensure that -//! all data yielded by the iterator does start with the given prefix. +//! To work around this we set an upper bound to the prefix successor. //! See https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes for details. use crate::DBAndColumns; @@ -28,6 +27,13 @@ pub type KeyValuePair = (Box<[u8]>, Box<[u8]>); /// Iterator with built-in synchronization. pub struct ReadGuardedIterator<'a, I, T> { inner: OwningHandle>, DerefWrapper>>, + // We store the upper bound here + // to make sure it lives at least as long as the iterator. + // See https://github.com/rust-rocksdb/rust-rocksdb/pull/309. + // TODO: remove this once https://github.com/rust-rocksdb/rust-rocksdb/pull/377 + // is merged and released. + #[allow(dead_code)] + upper_bound_prefix: Option>, } // We can't implement `StableAddress` for a `RwLockReadGuard` @@ -81,8 +87,8 @@ pub trait IterationHandler { /// Create an `Iterator` over a `ColumnFamily` corresponding to the passed index. Takes a /// reference to a `ReadOptions` to allow configuration of the new iterator (see /// https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h#L1169). - /// The iterator starts from the first key having the provided `prefix`. - fn iter_from_prefix(&self, col: u32, prefix: &[u8], read_opts: &ReadOptions) -> Self::Iterator; + /// The `Iterator` iterates over keys which start with the provided `prefix`. + fn iter_with_prefix(&self, col: u32, prefix: &[u8], read_opts: &ReadOptions) -> Self::Iterator; } impl<'a, T> ReadGuardedIterator<'a, <&'a T as IterationHandler>::Iterator, T> @@ -92,18 +98,22 @@ where /// Creates a new `ReadGuardedIterator` that maps `RwLock` to `RwLock`, /// where `DBIterator` iterates over all keys. pub fn new(read_lock: RwLockReadGuard<'a, Option>, col: u32, read_opts: &ReadOptions) -> Self { - Self { inner: Self::new_inner(read_lock, |db| db.iter(col, read_opts)) } + Self { inner: Self::new_inner(read_lock, |db| db.iter(col, read_opts)), upper_bound_prefix: None } } /// Creates a new `ReadGuardedIterator` that maps `RwLock` to `RwLock`, - /// where `DBIterator` iterates over keys >= prefix. - pub fn new_from_prefix( + /// where `DBIterator` iterates over keys which start with the given prefix. + pub fn new_with_prefix( read_lock: RwLockReadGuard<'a, Option>, col: u32, prefix: &[u8], + upper_bound: Box<[u8]>, read_opts: &ReadOptions, ) -> Self { - Self { inner: Self::new_inner(read_lock, |db| db.iter_from_prefix(col, prefix, read_opts)) } + Self { + inner: Self::new_inner(read_lock, |db| db.iter_with_prefix(col, prefix, read_opts)), + upper_bound_prefix: Some(upper_bound), + } } fn new_inner( @@ -126,7 +136,7 @@ impl<'a> IterationHandler for &'a DBAndColumns { .expect("iterator params are valid; qed") } - fn iter_from_prefix(&self, col: u32, prefix: &[u8], read_opts: &ReadOptions) -> Self::Iterator { + fn iter_with_prefix(&self, col: u32, prefix: &[u8], read_opts: &ReadOptions) -> Self::Iterator { self.db .iterator_cf_opt(self.cf(col as usize), read_opts, IteratorMode::From(prefix, Direction::Forward)) .expect("iterator params are valid; qed") diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 54a81140c..72c96f540 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -444,13 +444,13 @@ impl Database { batch.delete_cf(cf, &key).map_err(other_io_err)? } DBOp::DeletePrefix { col: _, prefix } => { - if prefix.len() > 0 { + if !prefix.is_empty() { let end_range = kvdb::end_prefix(&prefix[..]); batch.delete_range_cf(cf, &prefix[..], &end_range[..]).map_err(other_io_err)?; } else { // Deletes all values in the column. let end_range = &[u8::max_value()]; - batch.delete_range_cf(cf, &prefix[..], &end_range[..]).map_err(other_io_err)?; + batch.delete_range_cf(cf, &[][..], &end_range[..]).map_err(other_io_err)?; batch.delete_cf(cf, &end_range[..]).map_err(other_io_err)?; } } @@ -492,7 +492,7 @@ impl Database { /// Get value by partial key. Prefix size should match configured prefix size. pub fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { - self.iter_from_prefix(col, prefix).next().map(|(_, v)| v) + self.iter_with_prefix(col, prefix).next().map(|(_, v)| v) } /// Iterator over the data in the given database column index. @@ -512,18 +512,26 @@ impl Database { /// Iterator over data in the `col` database column index matching the given prefix. /// Will hold a lock until the iterator is dropped /// preventing the database from being closed. - fn iter_from_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> impl Iterator + 'a { + fn iter_with_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> impl Iterator + 'a { let read_lock = self.db.read(); let optional = if read_lock.is_some() { - let guarded = iter::ReadGuardedIterator::new_from_prefix(read_lock, col, prefix, &self.read_opts); + let mut read_opts = ReadOptions::default(); + read_opts.set_verify_checksums(false); + let end_prefix = kvdb::end_prefix(prefix).into_boxed_slice(); + // rocksdb doesn't work with an empty upper bound + if !end_prefix.is_empty() { + // SAFETY: the end_prefix lives as long as the iterator + // See `ReadGuardedIterator` definition for more details. + unsafe { + read_opts.set_iterate_upper_bound(&end_prefix); + } + } + let guarded = iter::ReadGuardedIterator::new_with_prefix(read_lock, col, prefix, end_prefix, &read_opts); Some(guarded) } else { None }; - // We're not using "Prefix Seek" mode, so the iterator will return - // keys not starting with the given prefix as well, - // see https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes - optional.into_iter().flat_map(identity).take_while(move |(k, _)| k.starts_with(prefix)) + optional.into_iter().flat_map(identity) } /// Close the database @@ -648,8 +656,8 @@ impl KeyValueDB for Database { Box::new(unboxed.into_iter()) } - fn iter_from_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> Box + 'a> { - let unboxed = Database::iter_from_prefix(self, col, prefix); + fn iter_with_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> Box + 'a> { + let unboxed = Database::iter_with_prefix(self, col, prefix); Box::new(unboxed.into_iter()) } @@ -729,9 +737,9 @@ mod tests { } #[test] - fn iter_from_prefix() -> io::Result<()> { + fn iter_with_prefix() -> io::Result<()> { let db = create(1)?; - st::test_iter_from_prefix(&db) + st::test_iter_with_prefix(&db) } #[test] @@ -742,7 +750,7 @@ mod tests { #[test] fn stats() -> io::Result<()> { - let db = create(st::IOSTATS_NUM_COLUMNS)?; + let db = create(st::IO_STATS_NUM_COLUMNS)?; st::test_io_stats(&db) } diff --git a/kvdb-shared-tests/src/lib.rs b/kvdb-shared-tests/src/lib.rs index 2ffcc07ef..76fb00c5c 100644 --- a/kvdb-shared-tests/src/lib.rs +++ b/kvdb-shared-tests/src/lib.rs @@ -80,8 +80,8 @@ pub fn test_iter(db: &dyn KeyValueDB) -> io::Result<()> { Ok(()) } -/// A test for `KeyValueDB::iter_from_prefix`. -pub fn test_iter_from_prefix(db: &dyn KeyValueDB) -> io::Result<()> { +/// A test for `KeyValueDB::iter_with_prefix`. +pub fn test_iter_with_prefix(db: &dyn KeyValueDB) -> io::Result<()> { let key1 = b"0"; let key2 = b"ab"; let key3 = b"abc"; @@ -95,7 +95,7 @@ pub fn test_iter_from_prefix(db: &dyn KeyValueDB) -> io::Result<()> { db.write(batch)?; // empty prefix - let contents: Vec<_> = db.iter_from_prefix(0, b"").into_iter().collect(); + let contents: Vec<_> = db.iter_with_prefix(0, b"").into_iter().collect(); assert_eq!(contents.len(), 4); assert_eq!(&*contents[0].0, key1); assert_eq!(&*contents[1].0, key2); @@ -103,31 +103,31 @@ pub fn test_iter_from_prefix(db: &dyn KeyValueDB) -> io::Result<()> { assert_eq!(&*contents[3].0, key4); // prefix a - let contents: Vec<_> = db.iter_from_prefix(0, b"a").into_iter().collect(); + let contents: Vec<_> = db.iter_with_prefix(0, b"a").into_iter().collect(); assert_eq!(contents.len(), 3); assert_eq!(&*contents[0].0, key2); assert_eq!(&*contents[1].0, key3); assert_eq!(&*contents[2].0, key4); // prefix abc - let contents: Vec<_> = db.iter_from_prefix(0, b"abc").into_iter().collect(); + let contents: Vec<_> = db.iter_with_prefix(0, b"abc").into_iter().collect(); assert_eq!(contents.len(), 2); assert_eq!(&*contents[0].0, key3); assert_eq!(&*contents[1].0, key4); // prefix abcde - let contents: Vec<_> = db.iter_from_prefix(0, b"abcde").into_iter().collect(); + let contents: Vec<_> = db.iter_with_prefix(0, b"abcde").into_iter().collect(); assert_eq!(contents.len(), 0); // prefix 0 - let contents: Vec<_> = db.iter_from_prefix(0, b"0").into_iter().collect(); + let contents: Vec<_> = db.iter_with_prefix(0, b"0").into_iter().collect(); assert_eq!(contents.len(), 1); assert_eq!(&*contents[0].0, key1); Ok(()) } /// The number of columns required to run `test_io_stats`. -pub const IOSTATS_NUM_COLUMNS: u32 = 3; +pub const IO_STATS_NUM_COLUMNS: u32 = 3; /// A test for `KeyValueDB::io_stats`. /// Assumes that the `db` has at least 3 columns. @@ -256,7 +256,7 @@ pub fn test_complex(db: &dyn KeyValueDB) -> io::Result<()> { assert_eq!(contents[1].0.to_vec(), key2.to_vec()); assert_eq!(&*contents[1].1, b"dog"); - let mut prefix_iter = db.iter_from_prefix(0, b"04c0"); + let mut prefix_iter = db.iter_with_prefix(0, b"04c0"); assert_eq!(*prefix_iter.next().unwrap().1, b"caterpillar"[..]); assert_eq!(*prefix_iter.next().unwrap().1, b"beef"[..]); assert_eq!(*prefix_iter.next().unwrap().1, b"fish"[..]); diff --git a/kvdb-web/src/lib.rs b/kvdb-web/src/lib.rs index f0179c085..49540e35e 100644 --- a/kvdb-web/src/lib.rs +++ b/kvdb-web/src/lib.rs @@ -113,12 +113,12 @@ impl KeyValueDB for Database { } // NOTE: clones the whole db - fn iter_from_prefix<'a>( + fn iter_with_prefix<'a>( &'a self, col: u32, prefix: &'a [u8], ) -> Box, Box<[u8]>)> + 'a> { - self.in_memory.iter_from_prefix(col, prefix) + self.in_memory.iter_with_prefix(col, prefix) } // NOTE: not supported diff --git a/kvdb-web/tests/indexed_db.rs b/kvdb-web/tests/indexed_db.rs index e32d5ea18..2becc48f4 100644 --- a/kvdb-web/tests/indexed_db.rs +++ b/kvdb-web/tests/indexed_db.rs @@ -52,9 +52,9 @@ async fn iter() { } #[wasm_bindgen_test] -async fn iter_from_prefix() { - let db = open_db(1, "iter_from_prefix").await; - st::test_iter_from_prefix(&db).unwrap() +async fn iter_with_prefix() { + let db = open_db(1, "iter_with_prefix").await; + st::test_iter_with_prefix(&db).unwrap() } #[wasm_bindgen_test] diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 6cfa99a6e..62771e4dc 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -7,6 +7,8 @@ The format is based on [Keep a Changelog]. ## [Unreleased] ### Breaking - Removed `write_buffered` and `flush` methods. [#313](https://github.com/paritytech/parity-common/pull/313) +- Introduce a new `DeletePrefix` database operation. [#360](https://github.com/paritytech/parity-common/pull/360) +- Rename prefix iteration to `iter_with_prefix`. [#365](https://github.com/paritytech/parity-common/pull/365) ## [0.5.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 695519f43..a9c74e949 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -86,7 +86,7 @@ impl DBTransaction { /// Delete all values with the given key prefix. /// Using an empty prefix here will remove all keys - /// (all keys starts with the empty prefix). + /// (all keys start with the empty prefix). pub fn delete_prefix(&mut self, col: u32, prefix: &[u8]) { self.ops.push(DBOp::DeletePrefix { col, prefix: DBKey::from_slice(prefix) }); } @@ -119,8 +119,9 @@ pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { /// Iterate over the data for a given column. fn iter<'a>(&'a self, col: u32) -> Box, Box<[u8]>)> + 'a>; - /// Iterate over the data for a given column, starting from a given prefix. - fn iter_from_prefix<'a>( + /// Iterate over the data for a given column, returning all key/value pairs + /// where the key starts with the given prefix. + fn iter_with_prefix<'a>( &'a self, col: u32, prefix: &'a [u8], @@ -170,6 +171,7 @@ mod test { assert_eq!(end_prefix(&[0x00, 0xff]), vec![0x01]); assert_eq!(end_prefix(&[0xff]), vec![]); + assert_eq!(end_prefix(b"0"), b"1".to_vec()); assert_eq!(end_prefix(&[]), vec![]); } } From 6bb24f26bc0d33c2d8556ef1048c13210f189a26 Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Thu, 23 Apr 2020 23:06:38 +1000 Subject: [PATCH 117/359] Add arbitrary trait implementation (#378) * Add arbitrary trait implementation Signed-off-by: Kirk Baird * Add arbitrary trait to Ethereum Types Signed-off-by: Kirk Baird * Tidy up creation of empty array Signed-off-by: Kirk Baird * Resolve minor issues Signed-off-by: Kirk Baird * Update README.md and fix features Signed-off-by: Kirk Baird * Add arbitrary support for Uint, ethbool and primitive-types Signed-off-by: Kirk Baird * Update add ethbloom/arbitrary Co-Authored-By: Andronik Ordian * Update uint/README.md Co-Authored-By: David * Rewrite arbitrary comments Signed-off-by: Kirk Baird * Update fixed-hash/src/hash.rs Co-Authored-By: David * Update fixed-hash/src/hash.rs Co-Authored-By: David Co-authored-by: Andronik Ordian Co-authored-by: David --- ethbloom/Cargo.toml | 1 + ethereum-types/Cargo.toml | 1 + fixed-hash/Cargo.toml | 1 + fixed-hash/README.md | 2 ++ fixed-hash/src/hash.rs | 37 +++++++++++++++++++++++++++++++++++++ fixed-hash/src/lib.rs | 4 ++++ primitive-types/Cargo.toml | 1 + uint/Cargo.toml | 1 + uint/README.md | 2 ++ uint/src/lib.rs | 4 ++++ uint/src/uint.rs | 24 ++++++++++++++++++++++++ 11 files changed, 78 insertions(+) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 038378a64..1f2de0a62 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -26,6 +26,7 @@ default = ["std", "serialize", "rustc-hex"] std = ["fixed-hash/std", "crunchy/std"] serialize = ["std", "impl-serde"] rustc-hex = ["fixed-hash/rustc-hex"] +arbitrary = ["fixed-hash/arbitrary"] [[bench]] name = "bloom" diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index b04c87bbd..8d9dc0739 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -22,3 +22,4 @@ serde_json = "1.0.41" default = ["std", "serialize"] std = ["uint-crate/std", "fixed-hash/std", "ethbloom/std", "primitive-types/std"] serialize = ["std", "impl-serde", "primitive-types/serde", "ethbloom/serialize"] +arbitrary = ["ethbloom/arbitrary", "fixed-hash/arbitrary", "uint-crate/arbitrary"] diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 93903ef7d..33fd076a0 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -19,6 +19,7 @@ quickcheck = { version = "0.9.0", optional = true } rand = { version = "0.7.2", optional = true, default-features = false } rustc-hex = { version = "2.0.1", optional = true, default-features = false } static_assertions = "1.0.0" +arbitrary = { version = "0.4", optional = true } [dev-dependencies] rand_xorshift = "0.2.0" diff --git a/fixed-hash/README.md b/fixed-hash/README.md index c07db2f23..1974bea8f 100644 --- a/fixed-hash/README.md +++ b/fixed-hash/README.md @@ -64,3 +64,5 @@ fixed-hash = { version = "0.3", default-features = false } - Disabled by default. - `api-dummy`: Generate a dummy hash type for API documentation. - Enabled by default at `docs.rs` +- `arbitrary`: Allow for creation of a hash from random unstructured input. + - Disabled by default. diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index e7fdf0e4c..972b750b4 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -316,6 +316,7 @@ macro_rules! construct_fixed_hash { impl_cmp_for_fixed_hash!($name); impl_rustc_hex_for_fixed_hash!($name); impl_quickcheck_for_fixed_hash!($name); + impl_arbitrary_for_fixed_hash!($name); } } @@ -636,6 +637,42 @@ macro_rules! impl_quickcheck_for_fixed_hash { }; } +// When the `arbitrary` feature is disabled. +// +// # Note +// +// Feature guarded macro definitions instead of feature guarded impl blocks +// to work around the problems of introducing `arbitrary` crate feature in +// a user crate. +#[cfg(not(feature = "arbitrary"))] +#[macro_export] +#[doc(hidden)] +macro_rules! impl_arbitrary_for_fixed_hash { + ( $name:ident ) => {}; +} + +// When the `arbitrary` feature is enabled. +// +// # Note +// +// Feature guarded macro definitions instead of feature guarded impl blocks +// to work around the problems of introducing `arbitrary` crate feature in +// a user crate. +#[cfg(feature = "arbitrary")] +#[macro_export] +#[doc(hidden)] +macro_rules! impl_arbitrary_for_fixed_hash { + ( $name:ident ) => { + impl $crate::arbitrary::Arbitrary for $name { + fn arbitrary(u: &mut $crate::arbitrary::Unstructured<'_>) -> $crate::arbitrary::Result { + let mut res = Self::zero(); + u.fill_buffer(&mut res.0)?; + Ok(Self::from(res)) + } + } + }; +} + #[macro_export] #[doc(hidden)] macro_rules! impl_ops_for_hash { diff --git a/fixed-hash/src/lib.rs b/fixed-hash/src/lib.rs index 748101c9f..228f551e0 100644 --- a/fixed-hash/src/lib.rs +++ b/fixed-hash/src/lib.rs @@ -46,6 +46,10 @@ pub use rand; #[doc(hidden)] pub use quickcheck; +#[cfg(feature = "arbitrary")] +#[doc(hidden)] +pub use arbitrary; + #[macro_use] mod hash; diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 06e6e154d..fcbdc4e63 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -22,3 +22,4 @@ rustc-hex = ["fixed-hash/rustc-hex"] serde = ["std", "impl-serde"] codec = ["impl-codec"] rlp = ["impl-rlp"] +arbitrary = ["fixed-hash/arbitrary", "uint/arbitrary"] diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 257b3fa7f..97d489884 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -16,6 +16,7 @@ qc = { package = "quickcheck", version = "0.9.0", optional = true } rand = { version = "0.7.2", default-features = false, optional = true } rustc-hex = { version = "2.0.1", default-features = false } static_assertions = "1.0.0" +arbitrary = { version = "0.4", optional = true } [features] default = ["std"] diff --git a/uint/README.md b/uint/README.md index 557d63991..34006f83d 100644 --- a/uint/README.md +++ b/uint/README.md @@ -69,3 +69,5 @@ see fuzz [README.md](fuzz/README.md) - Enabled by default. - `quickcheck`: Enable quickcheck-style property testing - Use with `cargo test --release --features=quickcheck`. +- `arbitrary`: Allow for creation of an `uint` object from random unstructured input for use with fuzzers that use the `arbitrary` crate. + - Disabled by default. diff --git a/uint/src/lib.rs b/uint/src/lib.rs index 144c53e32..7da1f24a5 100644 --- a/uint/src/lib.rs +++ b/uint/src/lib.rs @@ -29,6 +29,10 @@ pub use qc; #[doc(hidden)] pub use rand; +#[cfg(feature = "arbitrary")] +#[doc(hidden)] +pub use arbitrary; + #[doc(hidden)] pub use static_assertions; diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 493e2e21e..26b633416 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1554,6 +1554,7 @@ macro_rules! construct_uint { // `$n_words * 8` because macro expects bytes and // uints use 64 bit (8 byte) words $crate::impl_quickcheck_arbitrary_for_uint!($name, ($n_words * 8)); + $crate::impl_arbitrary_for_uint!($name, ($n_words * 8)); } } @@ -1632,3 +1633,26 @@ macro_rules! impl_quickcheck_arbitrary_for_uint { macro_rules! impl_quickcheck_arbitrary_for_uint { ($uint: ty, $n_bytes: tt) => {}; } + + +#[cfg(feature = "arbitrary")] +#[macro_export] +#[doc(hidden)] +macro_rules! impl_arbitrary_for_uint { + ($uint: ty, $n_bytes: tt) => { + impl $crate::arbitrary::Arbitrary for $uint { + fn arbitrary(u: &mut $crate::arbitrary::Unstructured<'_>) -> $crate::arbitrary::Result { + let mut res = [0u8; $n_bytes]; + u.fill_buffer(&mut res)?; + Ok(Self::from(res)) + } + } + }; +} + +#[cfg(not(feature = "arbitrary"))] +#[macro_export] +#[doc(hidden)] +macro_rules! impl_arbitrary_for_uint { + ($uint: ty, $n_bytes: tt) => {}; +} From 692aa9d9f25e0af9e44cfda15b1653f128168997 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 23 Apr 2020 16:20:57 +0200 Subject: [PATCH 118/359] Fix limit prefix delete case (#368) * Fix limit prefix delete case. Warning this uses iter by prefix for it in rocksdb. * Comment * Changes needed for merging, end_prefix returning option in new code and renaming of iter with prefix. * reduce cornercase iteration windows size. * extract common iter prefix delete corner case code. * Update kvdb-rocksdb/src/lib.rs Co-Authored-By: Andronik Ordian * Remove unused code, there can be call to 'delete_range_cf' on empty range, but that is fine. Co-authored-by: Andronik Ordian --- kvdb-memorydb/src/lib.rs | 7 +++++-- kvdb-rocksdb/src/iter.rs | 4 ++-- kvdb-rocksdb/src/lib.rs | 26 ++++++++++++++------------ kvdb-shared-tests/src/lib.rs | 21 +++++++++++++-------- kvdb/src/lib.rs | 26 +++++++++++++++----------- 5 files changed, 49 insertions(+), 35 deletions(-) diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 6a70babea..0ac955a0b 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -74,8 +74,11 @@ impl KeyValueDB for InMemory { col.clear(); } else { let start_range = Bound::Included(prefix.to_vec()); - let end_range = Bound::Excluded(kvdb::end_prefix(&prefix[..])); - let keys: Vec<_> = col.range((start_range, end_range)).map(|(k, _)| k.clone()).collect(); + let keys: Vec<_> = if let Some(end_range) = kvdb::end_prefix(&prefix[..]) { + col.range((start_range, Bound::Excluded(end_range))).map(|(k, _)| k.clone()).collect() + } else { + col.range((start_range, Bound::Unbounded)).map(|(k, _)| k.clone()).collect() + }; for key in keys.into_iter() { col.remove(&key[..]); } diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs index a1ef70a53..62f9ba9a2 100644 --- a/kvdb-rocksdb/src/iter.rs +++ b/kvdb-rocksdb/src/iter.rs @@ -107,12 +107,12 @@ where read_lock: RwLockReadGuard<'a, Option>, col: u32, prefix: &[u8], - upper_bound: Box<[u8]>, + upper_bound: Option>, read_opts: &ReadOptions, ) -> Self { Self { inner: Self::new_inner(read_lock, |db| db.iter_with_prefix(col, prefix, read_opts)), - upper_bound_prefix: Some(upper_bound), + upper_bound_prefix: upper_bound, } } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 72c96f540..d4e76b4c8 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -443,15 +443,16 @@ impl Database { stats_total_bytes += key.len(); batch.delete_cf(cf, &key).map_err(other_io_err)? } - DBOp::DeletePrefix { col: _, prefix } => { - if !prefix.is_empty() { - let end_range = kvdb::end_prefix(&prefix[..]); - batch.delete_range_cf(cf, &prefix[..], &end_range[..]).map_err(other_io_err)?; - } else { - // Deletes all values in the column. - let end_range = &[u8::max_value()]; - batch.delete_range_cf(cf, &[][..], &end_range[..]).map_err(other_io_err)?; - batch.delete_cf(cf, &end_range[..]).map_err(other_io_err)?; + DBOp::DeletePrefix { col, prefix } => { + let end_prefix = kvdb::end_prefix(&prefix[..]); + let no_end = end_prefix.is_none(); + let end_range = end_prefix.unwrap_or_else(|| vec![u8::max_value(); 16]); + batch.delete_range_cf(cf, &prefix[..], &end_range[..]).map_err(other_io_err)?; + if no_end { + let prefix = if prefix.len() > end_range.len() { &prefix[..] } else { &end_range[..] }; + for (key, _) in self.iter_with_prefix(col, prefix) { + batch.delete_cf(cf, &key[..]).map_err(other_io_err)?; + } } } }; @@ -517,15 +518,16 @@ impl Database { let optional = if read_lock.is_some() { let mut read_opts = ReadOptions::default(); read_opts.set_verify_checksums(false); - let end_prefix = kvdb::end_prefix(prefix).into_boxed_slice(); // rocksdb doesn't work with an empty upper bound - if !end_prefix.is_empty() { + let end_prefix = kvdb::end_prefix(prefix).map(|end_prefix| { + let end_prefix = end_prefix.into_boxed_slice(); // SAFETY: the end_prefix lives as long as the iterator // See `ReadGuardedIterator` definition for more details. unsafe { read_opts.set_iterate_upper_bound(&end_prefix); } - } + end_prefix + }); let guarded = iter::ReadGuardedIterator::new_with_prefix(read_lock, col, prefix, end_prefix, &read_opts); Some(guarded) } else { diff --git a/kvdb-shared-tests/src/lib.rs b/kvdb-shared-tests/src/lib.rs index 76fb00c5c..f3352faa0 100644 --- a/kvdb-shared-tests/src/lib.rs +++ b/kvdb-shared-tests/src/lib.rs @@ -175,7 +175,7 @@ pub fn test_io_stats(db: &dyn KeyValueDB) -> io::Result<()> { } /// The number of columns required to run `test_delete_prefix`. -pub const DELETE_PREFIX_NUM_COLUMNS: u32 = 5; +pub const DELETE_PREFIX_NUM_COLUMNS: u32 = 7; /// A test for `KeyValueDB::delete_prefix`. pub fn test_delete_prefix(db: &dyn KeyValueDB) -> io::Result<()> { @@ -190,6 +190,7 @@ pub fn test_delete_prefix(db: &dyn KeyValueDB) -> io::Result<()> { &[2][..], &[2, 0][..], &[2, 255][..], + &[255; 16][..], ]; let init_db = |ix: u32| -> io::Result<()> { let mut batch = db.transaction(); @@ -199,8 +200,8 @@ pub fn test_delete_prefix(db: &dyn KeyValueDB) -> io::Result<()> { db.write(batch)?; Ok(()) }; - let check_db = |ix: u32, content: [bool; 10]| -> io::Result<()> { - let mut state = [true; 10]; + let check_db = |ix: u32, content: [bool; 11]| -> io::Result<()> { + let mut state = [true; 11]; for (c, key) in keys.iter().enumerate() { state[c] = db.get(ix, key)?.is_some(); } @@ -209,15 +210,19 @@ pub fn test_delete_prefix(db: &dyn KeyValueDB) -> io::Result<()> { }; let tests: [_; DELETE_PREFIX_NUM_COLUMNS as usize] = [ // standard - (&[1u8][..], [true, true, true, false, false, false, false, true, true, true]), + (&[1u8][..], [true, true, true, false, false, false, false, true, true, true, true]), // edge - (&[1u8, 255, 255][..], [true, true, true, true, true, true, false, true, true, true]), + (&[1u8, 255, 255][..], [true, true, true, true, true, true, false, true, true, true, true]), // none 1 - (&[1, 2][..], [true, true, true, true, true, true, true, true, true, true]), + (&[1, 2][..], [true, true, true, true, true, true, true, true, true, true, true]), // none 2 - (&[8][..], [true, true, true, true, true, true, true, true, true, true]), + (&[8][..], [true, true, true, true, true, true, true, true, true, true, true]), + // last value + (&[255, 255][..], [true, true, true, true, true, true, true, true, true, true, false]), + // last value, limit prefix + (&[255][..], [true, true, true, true, true, true, true, true, true, true, false]), // all - (&[][..], [false, false, false, false, false, false, false, false, false, false]), + (&[][..], [false, false, false, false, false, false, false, false, false, false, false]), ]; for (ix, test) in tests.iter().enumerate() { let ix = ix as u32; diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index a9c74e949..7cacff666 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -143,15 +143,19 @@ pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { /// For a given start prefix (inclusive), returns the correct end prefix (non-inclusive). /// This assumes the key bytes are ordered in lexicographical order. -pub fn end_prefix(prefix: &[u8]) -> Vec { +/// Since key length is not limited, for some case we return `None` because there is +/// no bounded limit (every keys in the serie `[]`, `[255]`, `[255, 255]` ...). +pub fn end_prefix(prefix: &[u8]) -> Option> { let mut end_range = prefix.to_vec(); while let Some(0xff) = end_range.last() { end_range.pop(); } if let Some(byte) = end_range.last_mut() { *byte += 1; + Some(end_range) + } else { + None } - end_range } #[cfg(test)] @@ -160,18 +164,18 @@ mod test { #[test] fn end_prefix_test() { - assert_eq!(end_prefix(&[5, 6, 7]), vec![5, 6, 8]); - assert_eq!(end_prefix(&[5, 6, 255]), vec![5, 7]); + assert_eq!(end_prefix(&[5, 6, 7]), Some(vec![5, 6, 8])); + assert_eq!(end_prefix(&[5, 6, 255]), Some(vec![5, 7])); // This is not equal as the result is before start. - assert_ne!(end_prefix(&[5, 255, 255]), vec![5, 255]); + assert_ne!(end_prefix(&[5, 255, 255]), Some(vec![5, 255])); // This is equal ([5, 255] will not be deleted because // it is before start). - assert_eq!(end_prefix(&[5, 255, 255]), vec![6]); - assert_eq!(end_prefix(&[255, 255, 255]), vec![]); + assert_eq!(end_prefix(&[5, 255, 255]), Some(vec![6])); + assert_eq!(end_prefix(&[255, 255, 255]), None); - assert_eq!(end_prefix(&[0x00, 0xff]), vec![0x01]); - assert_eq!(end_prefix(&[0xff]), vec![]); - assert_eq!(end_prefix(b"0"), b"1".to_vec()); - assert_eq!(end_prefix(&[]), vec![]); + assert_eq!(end_prefix(&[0x00, 0xff]), Some(vec![0x01])); + assert_eq!(end_prefix(&[0xff]), None); + assert_eq!(end_prefix(&[]), None); + assert_eq!(end_prefix(b"0"), Some(b"1".to_vec())); } } From 9d1c613863c22c285a25fdb8ed112a128976ef9d Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 27 Apr 2020 12:18:49 +0200 Subject: [PATCH 119/359] uint: fix UB in uint::from_big_endian (#381) * uint: fix UB in uint::from_fig_endian * uint: add regression tests --- uint/src/uint.rs | 9 ++------- uint/tests/uint_tests.rs | 6 ++++++ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 26b633416..2811d7d6c 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1120,13 +1120,8 @@ macro_rules! construct_uint { let mut ret = [0; $n_words]; unsafe { let ret_u8: &mut [u8; $n_words * 8] = $crate::core_::mem::transmute(&mut ret); - let mut ret_ptr = ret_u8.as_mut_ptr(); - let mut slice_ptr = slice.as_ptr().offset(slice.len() as isize - 1); - for _ in 0..slice.len() { - *ret_ptr = *slice_ptr; - ret_ptr = ret_ptr.offset(1); - slice_ptr = slice_ptr.offset(-1); - } + ret_u8[0..slice.len()].copy_from_slice(slice); + ret_u8[0..slice.len()].reverse(); } $name(ret) diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 2e226ad36..32a14c728 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -990,6 +990,12 @@ fn from_big_endian() { let number = U256::from_big_endian(&source[..]); assert_eq!(U256::from(1), number); + + let number = U256::from_big_endian(&[]); + assert_eq!(U256::zero(), number); + + let number = U256::from_big_endian(&[1]); + assert_eq!(U256::from(1), number); } #[test] From 89e465eaedad9d3e6ba64732ddcbb71edf504ac6 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 27 Apr 2020 18:01:37 +0200 Subject: [PATCH 120/359] prepare releases for a few crates (#382) * update changelogs * update versions * update dev-dependency in impl/serde --- ethbloom/CHANGELOG.md | 3 +++ ethbloom/Cargo.toml | 2 +- ethereum-types/CHANGELOG.md | 3 +++ ethereum-types/Cargo.toml | 2 +- fixed-hash/CHANGELOG.md | 3 +++ fixed-hash/Cargo.toml | 2 +- primitive-types/CHANGELOG.md | 3 +++ primitive-types/Cargo.toml | 4 ++-- primitive-types/impls/serde/Cargo.toml | 2 +- uint/CHANGELOG.md | 4 ++++ uint/Cargo.toml | 2 +- 11 files changed, 23 insertions(+), 7 deletions(-) diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index de418cd9d..cc467dae8 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.1] - 2020-04-27 +- Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) + ## [0.9.0] - 2020-03-16 - Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 1f2de0a62..de61568ce 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.9.0" +version = "0.9.1" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT OR Apache-2.0" diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 9455e7bba..c91696835 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.1] - 2020-04-27 +- Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) + ## [0.9.0] - 2020-03-16 - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 8d9dc0739..572ad5530 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.9.0" +version = "0.9.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/fixed-hash/CHANGELOG.md b/fixed-hash/CHANGELOG.md index da5ec524d..6db7b6e76 100644 --- a/fixed-hash/CHANGELOG.md +++ b/fixed-hash/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.6.1] - 2020-04-27 +- Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) + ## [0.6.0] - 2020-03-16 - Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 33fd076a0..c8e85585f 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fixed-hash" -version = "0.6.0" +version = "0.6.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 4c6f65713..8f5d60af2 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.7.1] - 2020-04-27 +- Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) + ## [0.7.0] - 2020-03-16 - Removed `libc` feature. [#317](https://github.com/paritytech/parity-common/pull/317) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index fcbdc4e63..b64d2b35b 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.7.0" +version = "0.7.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -9,7 +9,7 @@ edition = "2018" [dependencies] fixed-hash = { version = "0.6", path = "../fixed-hash", default-features = false } -uint = { version = "0.8.1", path = "../uint", default-features = false } +uint = { version = "0.8.3", path = "../uint", default-features = false } impl-serde = { version = "0.3.0", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } impl-rlp = { version = "0.2", path = "impls/rlp", default-features = false, optional = true } diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index ab923bb63..78122b1ff 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -14,7 +14,7 @@ serde = "1.0.101" criterion = "0.3.0" serde_derive = "1.0.101" serde_json = "1.0.41" -uint = "0.8.1" +uint = { version = "0.8.3", path = "../../../uint" } [[bench]] name = "impl_serde" diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index c04300e84..386475612 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.8.3] - 2020-04-27 +- Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) +- Fixed UB in `from_big_endian`. [#381](https://github.com/paritytech/parity-common/pull/381) + ## [0.8.2] - 2019-10-24 ### Fixed - Fixed 2018 edition imports. [#237](https://github.com/paritytech/parity-common/pull/237) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 97d489884..274b50692 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -4,7 +4,7 @@ homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" name = "uint" -version = "0.8.2" +version = "0.8.3" authors = ["Parity Technologies "] readme = "README.md" edition = "2018" From 50c3dc2ff733723371678da88d920491879c31cd Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 29 Apr 2020 19:30:27 +0200 Subject: [PATCH 121/359] kvdb-rocksdb: update rocksdb to 0.14 (#379) * kvdb-rocksdb: update to new set_upper_bound API * kvdb-rocksdb: update rocksdb to crates.io version * kvdb-rocksdb: update the changelog * Fix build? Set VM template. * Fix build? correct image name * Fix build? Maybe it's 2019? * appveyor: try release build * Revert "appveyor: try release build" This reverts commit ace87ee0c81594ff87fb956c79da36a3f382062d. * checkout https://github.com/rust-rocksdb/rust-rocksdb/pull/412 * revert patch * revert unrelated changes Co-authored-by: David Palm --- kvdb-rocksdb/CHANGELOG.md | 1 + kvdb-rocksdb/Cargo.toml | 2 +- kvdb-rocksdb/src/iter.rs | 43 +++++++++++++-------------------------- kvdb-rocksdb/src/lib.rs | 37 ++++++++++++++++----------------- 4 files changed, 34 insertions(+), 49 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index cd4838404..c4a46858f 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Updated RocksDB to 6.7.3. [#379](https://github.com/paritytech/parity-common/pull/379) ### Breaking - Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) - Rename and optimize prefix iteration. [#365](https://github.com/paritytech/parity-common/pull/365) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 77812b6fe..ee5b2d394 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -19,7 +19,7 @@ log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.10.0" regex = "1.3.1" -rocksdb = { version = "0.13", features = ["snappy"], default-features = false } +rocksdb = { version = "0.14", features = ["snappy"], default-features = false } owning_ref = "0.4.0" parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-features = false, features = ["std", "smallvec"] } diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs index 62f9ba9a2..ba8c20e34 100644 --- a/kvdb-rocksdb/src/iter.rs +++ b/kvdb-rocksdb/src/iter.rs @@ -27,13 +27,6 @@ pub type KeyValuePair = (Box<[u8]>, Box<[u8]>); /// Iterator with built-in synchronization. pub struct ReadGuardedIterator<'a, I, T> { inner: OwningHandle>, DerefWrapper>>, - // We store the upper bound here - // to make sure it lives at least as long as the iterator. - // See https://github.com/rust-rocksdb/rust-rocksdb/pull/309. - // TODO: remove this once https://github.com/rust-rocksdb/rust-rocksdb/pull/377 - // is merged and released. - #[allow(dead_code)] - upper_bound_prefix: Option>, } // We can't implement `StableAddress` for a `RwLockReadGuard` @@ -80,15 +73,15 @@ impl<'a, I: Iterator, T> Iterator for ReadGuardedIterator<'a, I, T> { pub trait IterationHandler { type Iterator: Iterator; - /// Create an `Iterator` over a `ColumnFamily` corresponding to the passed index. Takes a - /// reference to a `ReadOptions` to allow configuration of the new iterator (see + /// Create an `Iterator` over a `ColumnFamily` corresponding to the passed index. Takes + /// `ReadOptions` to allow configuration of the new iterator (see /// https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h#L1169). - fn iter(&self, col: u32, read_opts: &ReadOptions) -> Self::Iterator; - /// Create an `Iterator` over a `ColumnFamily` corresponding to the passed index. Takes a - /// reference to a `ReadOptions` to allow configuration of the new iterator (see + fn iter(&self, col: u32, read_opts: ReadOptions) -> Self::Iterator; + /// Create an `Iterator` over a `ColumnFamily` corresponding to the passed index. Takes + /// `ReadOptions` to allow configuration of the new iterator (see /// https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h#L1169). /// The `Iterator` iterates over keys which start with the provided `prefix`. - fn iter_with_prefix(&self, col: u32, prefix: &[u8], read_opts: &ReadOptions) -> Self::Iterator; + fn iter_with_prefix(&self, col: u32, prefix: &[u8], read_opts: ReadOptions) -> Self::Iterator; } impl<'a, T> ReadGuardedIterator<'a, <&'a T as IterationHandler>::Iterator, T> @@ -97,8 +90,8 @@ where { /// Creates a new `ReadGuardedIterator` that maps `RwLock` to `RwLock`, /// where `DBIterator` iterates over all keys. - pub fn new(read_lock: RwLockReadGuard<'a, Option>, col: u32, read_opts: &ReadOptions) -> Self { - Self { inner: Self::new_inner(read_lock, |db| db.iter(col, read_opts)), upper_bound_prefix: None } + pub fn new(read_lock: RwLockReadGuard<'a, Option>, col: u32, read_opts: ReadOptions) -> Self { + Self { inner: Self::new_inner(read_lock, |db| db.iter(col, read_opts)) } } /// Creates a new `ReadGuardedIterator` that maps `RwLock` to `RwLock`, @@ -107,13 +100,9 @@ where read_lock: RwLockReadGuard<'a, Option>, col: u32, prefix: &[u8], - upper_bound: Option>, - read_opts: &ReadOptions, + read_opts: ReadOptions, ) -> Self { - Self { - inner: Self::new_inner(read_lock, |db| db.iter_with_prefix(col, prefix, read_opts)), - upper_bound_prefix: upper_bound, - } + Self { inner: Self::new_inner(read_lock, |db| db.iter_with_prefix(col, prefix, read_opts)) } } fn new_inner( @@ -130,15 +119,11 @@ where impl<'a> IterationHandler for &'a DBAndColumns { type Iterator = DBIterator<'a>; - fn iter(&self, col: u32, read_opts: &ReadOptions) -> Self::Iterator { - self.db - .iterator_cf_opt(self.cf(col as usize), read_opts, IteratorMode::Start) - .expect("iterator params are valid; qed") + fn iter(&self, col: u32, read_opts: ReadOptions) -> Self::Iterator { + self.db.iterator_cf_opt(self.cf(col as usize), read_opts, IteratorMode::Start) } - fn iter_with_prefix(&self, col: u32, prefix: &[u8], read_opts: &ReadOptions) -> Self::Iterator { - self.db - .iterator_cf_opt(self.cf(col as usize), read_opts, IteratorMode::From(prefix, Direction::Forward)) - .expect("iterator params are valid; qed") + fn iter_with_prefix(&self, col: u32, prefix: &[u8], read_opts: ReadOptions) -> Self::Iterator { + self.db.iterator_cf_opt(self.cf(col as usize), read_opts, IteratorMode::From(prefix, Direction::Forward)) } } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index d4e76b4c8..4d40b9b2a 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -313,6 +313,12 @@ fn generate_options(config: &DatabaseConfig) -> Options { opts } +fn generate_read_options() -> ReadOptions { + let mut read_opts = ReadOptions::default(); + read_opts.set_verify_checksums(false); + read_opts +} + /// Generate the block based options for RocksDB, based on the given `DatabaseConfig`. fn generate_block_based_options(config: &DatabaseConfig) -> BlockBasedOptions { let mut block_opts = BlockBasedOptions::default(); @@ -360,8 +366,7 @@ impl Database { let column_names: Vec<_> = (0..config.columns).map(|c| format!("col{}", c)).collect(); let write_opts = WriteOptions::default(); - let mut read_opts = ReadOptions::default(); - read_opts.set_verify_checksums(false); + let read_opts = generate_read_options(); let cf_descriptors: Vec<_> = (0..config.columns) .map(|i| ColumnFamilyDescriptor::new(&column_names[i as usize], config.column_config(&block_opts, i))) @@ -436,22 +441,22 @@ impl Database { match op { DBOp::Insert { col: _, key, value } => { stats_total_bytes += key.len() + value.len(); - batch.put_cf(cf, &key, &value).map_err(other_io_err)? + batch.put_cf(cf, &key, &value); } DBOp::Delete { col: _, key } => { // We count deletes as writes. stats_total_bytes += key.len(); - batch.delete_cf(cf, &key).map_err(other_io_err)? + batch.delete_cf(cf, &key); } DBOp::DeletePrefix { col, prefix } => { let end_prefix = kvdb::end_prefix(&prefix[..]); let no_end = end_prefix.is_none(); let end_range = end_prefix.unwrap_or_else(|| vec![u8::max_value(); 16]); - batch.delete_range_cf(cf, &prefix[..], &end_range[..]).map_err(other_io_err)?; + batch.delete_range_cf(cf, &prefix[..], &end_range[..]); if no_end { let prefix = if prefix.len() > end_range.len() { &prefix[..] } else { &end_range[..] }; for (key, _) in self.iter_with_prefix(col, prefix) { - batch.delete_cf(cf, &key[..]).map_err(other_io_err)?; + batch.delete_cf(cf, &key[..]); } } } @@ -502,7 +507,8 @@ impl Database { pub fn iter<'a>(&'a self, col: u32) -> impl Iterator + 'a { let read_lock = self.db.read(); let optional = if read_lock.is_some() { - let guarded = iter::ReadGuardedIterator::new(read_lock, col, &self.read_opts); + let read_opts = generate_read_options(); + let guarded = iter::ReadGuardedIterator::new(read_lock, col, read_opts); Some(guarded) } else { None @@ -516,19 +522,12 @@ impl Database { fn iter_with_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> impl Iterator + 'a { let read_lock = self.db.read(); let optional = if read_lock.is_some() { - let mut read_opts = ReadOptions::default(); - read_opts.set_verify_checksums(false); + let mut read_opts = generate_read_options(); // rocksdb doesn't work with an empty upper bound - let end_prefix = kvdb::end_prefix(prefix).map(|end_prefix| { - let end_prefix = end_prefix.into_boxed_slice(); - // SAFETY: the end_prefix lives as long as the iterator - // See `ReadGuardedIterator` definition for more details. - unsafe { - read_opts.set_iterate_upper_bound(&end_prefix); - } - end_prefix - }); - let guarded = iter::ReadGuardedIterator::new_with_prefix(read_lock, col, prefix, end_prefix, &read_opts); + if let Some(end_prefix) = kvdb::end_prefix(prefix) { + read_opts.set_iterate_upper_bound(end_prefix); + } + let guarded = iter::ReadGuardedIterator::new_with_prefix(read_lock, col, prefix, read_opts); Some(guarded) } else { None From 990d45dd2b5ae40f56640e5fa8c9e012583ea5bd Mon Sep 17 00:00:00 2001 From: Andrew Plaza Date: Fri, 1 May 2020 16:44:39 +0200 Subject: [PATCH 122/359] Add Rocksdb Secondary Instance Api (#384) * kvdb-rocksdb: update to new set_upper_bound API * kvdb-rocksdb: update rocksdb to crates.io version * kvdb-rocksdb: update the changelog * Fix build? Set VM template. * Fix build? correct image name * Fix build? Maybe it's 2019? * appveyor: try release build * Revert "appveyor: try release build" This reverts commit ace87ee0c81594ff87fb956c79da36a3f382062d. * checkout https://github.com/rust-rocksdb/rust-rocksdb/pull/412 * revert patch * revert unrelated changes * add open as secondary rocksdb api * Update kvdb-rocksdb/src/lib.rs Co-Authored-By: Andronik Ordian * add more information to secondary mode comment * add function to catch up a secondary instance with a primary instance * one more doc comment for more clarity * style fixes * Update kvdb-rocksdb/src/lib.rs Co-Authored-By: David * Update kvdb-rocksdb/src/lib.rs Co-Authored-By: David * change name of `secondary_mode` option to `secondary` * Update kvdb-rocksdb/src/lib.rs Co-Authored-By: David * fix some punctuation * specify a different directory for secondary instance to store its logs * Update kvdb-rocksdb/src/lib.rs Co-authored-by: Andronik Ordian * remove catching up on primary db in test * doc comment fixes expand on what `try_catch_up_with_secondary` does, since it may have some implications on the primary instance of rocksdb according to L503-566 in `db/db_impl/db_impl_secondary.cc` of facebook/rocksdb * remove wrong info about blocking primary instance * more docs for catch-up-with-primary * grammar * make `max_open_files` comment clearer Co-authored-by: Andronik Ordian Co-authored-by: David Palm --- kvdb-rocksdb/src/lib.rs | 153 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 138 insertions(+), 15 deletions(-) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 4d40b9b2a..2b1fd362a 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -166,6 +166,17 @@ pub struct DatabaseConfig { /// It can have a negative performance impact up to 10% according to /// https://github.com/facebook/rocksdb/wiki/Statistics. pub enable_statistics: bool, + /// Open the database as a secondary instance. + /// Specify a path for the secondary instance of the database. + /// Secondary instances are read-only and kept updated by tailing the rocksdb MANIFEST. + /// It is up to the user to call `catch_up_with_primary()` manually to update the secondary db. + /// Disabled by default. + /// + /// `max_open_files` is overridden to always equal `-1`. + /// May have a negative performance impact on the secondary instance + /// if the secondary instance reads and applies state changes before the primary instance compacts them. + /// More info: https://github.com/facebook/rocksdb/wiki/Secondary-instance + pub secondary: Option, } impl DatabaseConfig { @@ -215,6 +226,7 @@ impl Default for DatabaseConfig { columns: 1, keep_log_file_num: 1, enable_statistics: false, + secondary: None, } } } @@ -305,7 +317,11 @@ fn generate_options(config: &DatabaseConfig) -> Options { } opts.set_use_fsync(false); opts.create_if_missing(true); - opts.set_max_open_files(config.max_open_files); + if config.secondary.is_some() { + opts.set_max_open_files(-1) + } else { + opts.set_max_open_files(config.max_open_files); + } opts.set_bytes_per_sync(1 * MB as u64); opts.set_keep_log_file_num(1); opts.increase_parallelism(cmp::max(1, num_cpus::get() as i32 / 2)); @@ -364,12 +380,38 @@ impl Database { } let column_names: Vec<_> = (0..config.columns).map(|c| format!("col{}", c)).collect(); - let write_opts = WriteOptions::default(); let read_opts = generate_read_options(); + let db = if let Some(secondary_path) = &config.secondary { + Self::open_secondary(&opts, path, secondary_path.as_str(), column_names.as_slice())? + } else { + let column_names: Vec<&str> = column_names.iter().map(|s| s.as_str()).collect(); + Self::open_primary(&opts, path, config, column_names.as_slice(), &block_opts)? + }; + + Ok(Database { + db: RwLock::new(Some(DBAndColumns { db, column_names })), + config: config.clone(), + path: path.to_owned(), + opts, + read_opts, + write_opts, + block_opts, + stats: stats::RunningDbStats::new(), + }) + } + + /// Internal api to open a database in primary mode. + fn open_primary( + opts: &Options, + path: &str, + config: &DatabaseConfig, + column_names: &[&str], + block_opts: &BlockBasedOptions, + ) -> io::Result { let cf_descriptors: Vec<_> = (0..config.columns) - .map(|i| ColumnFamilyDescriptor::new(&column_names[i as usize], config.column_config(&block_opts, i))) + .map(|i| ColumnFamilyDescriptor::new(column_names[i as usize], config.column_config(&block_opts, i))) .collect(); let db = match DB::open_cf_descriptors(&opts, path, cf_descriptors) { @@ -390,7 +432,7 @@ impl Database { ok => ok, }; - let db = match db { + Ok(match db { Ok(db) => db, Err(ref s) if is_corrupted(s) => { warn!("DB corrupted: {}, attempting repair", s); @@ -398,23 +440,34 @@ impl Database { let cf_descriptors: Vec<_> = (0..config.columns) .map(|i| { - ColumnFamilyDescriptor::new(&column_names[i as usize], config.column_config(&block_opts, i)) + ColumnFamilyDescriptor::new(column_names[i as usize], config.column_config(&block_opts, i)) }) .collect(); DB::open_cf_descriptors(&opts, path, cf_descriptors).map_err(other_io_err)? } Err(s) => return Err(other_io_err(s)), - }; - Ok(Database { - db: RwLock::new(Some(DBAndColumns { db, column_names })), - config: config.clone(), - path: path.to_owned(), - opts, - read_opts, - write_opts, - block_opts, - stats: stats::RunningDbStats::new(), + }) + } + + /// Internal api to open a database in secondary mode. + /// Secondary database needs a seperate path to store its own logs. + fn open_secondary( + opts: &Options, + path: &str, + secondary_path: &str, + column_names: &[String], + ) -> io::Result { + let db = DB::open_cf_as_secondary(&opts, path, secondary_path, column_names); + + Ok(match db { + Ok(db) => db, + Err(ref s) if is_corrupted(s) => { + warn!("DB corrupted: {}, attempting repair", s); + DB::repair(&opts, path).map_err(other_io_err)?; + DB::open_cf_as_secondary(&opts, path, secondary_path, column_names).map_err(other_io_err)? + } + Err(s) => return Err(other_io_err(s)), }) } @@ -635,6 +688,33 @@ impl Database { HashMap::new() } } + + /// Try to catch up a secondary instance with + /// the primary by reading as much from the logs as possible. + /// + /// Guaranteed to have changes up to the the time that `try_catch_up_with_primary` is called + /// if it finishes succesfully. + /// + /// Blocks until the MANIFEST file and any state changes in the corresponding Write-Ahead-Logs + /// are applied to the secondary instance. If the manifest files are very large + /// this method could take a long time. + /// + /// If Write-Ahead-Logs have been purged by the primary instance before the secondary + /// is able to open them, the secondary will not be caught up + /// until this function is called again and new Write-Ahead-Logs are identified. + /// + /// If called while the primary is writing, the catch-up may fail. + /// + /// If the secondary is unable to catch up because of missing logs, + /// this method fails silently and no error is returned. + /// + /// Calling this as primary will return an error. + pub fn try_catch_up_with_primary(&self) -> io::Result<()> { + match self.db.read().as_ref() { + Some(DBAndColumns { db, .. }) => db.try_catch_up_with_primary().map_err(other_io_err), + None => Ok(()), + } + } } // duplicate declaration of methods here to avoid trait import in certain existing cases @@ -755,6 +835,48 @@ mod tests { st::test_io_stats(&db) } + #[test] + fn secondary_db_get() -> io::Result<()> { + let primary = TempDir::new("")?; + let config = DatabaseConfig::with_columns(1); + let db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; + + let key1 = b"key1"; + let mut transaction = db.transaction(); + transaction.put(0, key1, b"horse"); + db.write(transaction)?; + + let config = DatabaseConfig { + secondary: TempDir::new("")?.path().to_str().map(|s| s.to_string()), + ..DatabaseConfig::with_columns(1) + }; + let second_db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; + assert_eq!(&*second_db.get(0, key1)?.unwrap(), b"horse"); + Ok(()) + } + + #[test] + fn secondary_db_catch_up() -> io::Result<()> { + let primary = TempDir::new("")?; + let config = DatabaseConfig::with_columns(1); + let db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; + + let config = DatabaseConfig { + secondary: TempDir::new("")?.path().to_str().map(|s| s.to_string()), + ..DatabaseConfig::with_columns(1) + }; + let second_db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; + + let mut transaction = db.transaction(); + transaction.put(0, b"key1", b"mule"); + transaction.put(0, b"key2", b"cat"); + db.write(transaction)?; + + second_db.try_catch_up_with_primary()?; + assert_eq!(&*second_db.get(0, b"key2")?.unwrap(), b"cat"); + Ok(()) + } + #[test] fn mem_tables_size() { let tempdir = TempDir::new("").unwrap(); @@ -766,6 +888,7 @@ mod tests { columns: 11, keep_log_file_num: 1, enable_statistics: false, + secondary: None, }; let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); From 371f17fe26cad6dc8f9cce04a779d5a4a21c8319 Mon Sep 17 00:00:00 2001 From: Cheng XU <3105373+xu-cheng@users.noreply.github.com> Date: Sun, 3 May 2020 22:57:45 -0700 Subject: [PATCH 123/359] primitive-types: add no_std support for serde feature (#385) * primitive-types: add no_std support for serde feature This adds no_std support to primitive-types with serde. Due to https://github.com/rust-lang/cargo/issues/3494, a separate new feature `serde_no_std` is created. * primitive-types: update changelog * travis: add tests for primitive-types --- .travis.yml | 2 ++ primitive-types/CHANGELOG.md | 1 + primitive-types/Cargo.toml | 3 ++- primitive-types/impls/serde/Cargo.toml | 6 +++++- primitive-types/impls/serde/src/lib.rs | 8 ++++++++ primitive-types/impls/serde/src/serialize.rs | 8 ++++++-- 6 files changed, 24 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 53e552f6d..a79ed71d8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -55,6 +55,8 @@ script: - cd parity-util-mem/ && cargo test --features=jemalloc-global && cd .. - cd parity-util-mem/ && cargo test --features=mimalloc-global && cd .. - cd parity-util-mem/ && cargo test --no-default-features --features=dlmalloc-global && cd .. + - cd primitive-types/ && cargo test --all-features && cd .. + - cd primitive-types/ && cargo test --no-default-features --features=serde_no_std && cd .. - cd rlp/ && cargo test --no-default-features && cargo check --benches && cd .. - cd triehash/ && cargo check --benches && cd .. - cd kvdb-web/ && wasm-pack test --headless --firefox && cd .. diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 8f5d60af2..018d16eda 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Added `no_std` support for `serde` feature. [#385](https://github.com/paritytech/parity-common/pull/385) ## [0.7.1] - 2020-04-27 - Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index b64d2b35b..67622a480 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -19,7 +19,8 @@ default = ["std"] std = ["uint/std", "fixed-hash/std", "impl-codec/std"] byteorder = ["fixed-hash/byteorder"] rustc-hex = ["fixed-hash/rustc-hex"] -serde = ["std", "impl-serde"] +serde = ["std", "impl-serde", "impl-serde/std"] +serde_no_std = ["impl-serde"] codec = ["impl-codec"] rlp = ["impl-rlp"] arbitrary = ["fixed-hash/arbitrary", "uint/arbitrary"] diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index 78122b1ff..b89051f75 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -7,8 +7,12 @@ license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Serde serialization support for uint and fixed hash." +[features] +default = ["std"] +std = ["serde/std"] + [dependencies] -serde = "1.0.101" +serde = { version = "1.0.101", default-features = false, features = ["alloc"] } [dev-dependencies] criterion = "0.3.0" diff --git a/primitive-types/impls/serde/src/lib.rs b/primitive-types/impls/serde/src/lib.rs index 500a60cc4..63fe535cb 100644 --- a/primitive-types/impls/serde/src/lib.rs +++ b/primitive-types/impls/serde/src/lib.rs @@ -8,6 +8,14 @@ //! Serde serialization support for uint and fixed hash. +#![no_std] + +#[macro_use] +extern crate alloc; + +#[cfg(feature = "std")] +extern crate std; + #[doc(hidden)] pub use serde; diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index 542ac0dc8..90e42e2a6 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -6,8 +6,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use alloc::string::String; +use alloc::vec::Vec; +use core::fmt; +use core::result::Result; use serde::{de, Deserializer, Serializer}; -use std::fmt; static CHARS: &[u8] = b"0123456789abcdef"; @@ -58,7 +61,7 @@ fn to_hex_raw<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a } // SAFETY: all characters come either from CHARS or "0x", therefore valid UTF8 - unsafe { std::str::from_utf8_unchecked(&v[0..idx]) } + unsafe { core::str::from_utf8_unchecked(&v[0..idx]) } } /// Decoding bytes from hex string error. @@ -75,6 +78,7 @@ pub enum FromHexError { }, } +#[cfg(feature = "std")] impl std::error::Error for FromHexError {} impl fmt::Display for FromHexError { From 844f899ed48b2382af483515175af42150f967a6 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 5 May 2020 14:21:38 +0200 Subject: [PATCH 124/359] prepare new `kvdb-` release (#386) * update changelogs and bump versions * update kvdb-web changelog * kvdb-rocksdb: mark second instance as breaking It added a public field to a public struct `DatabaseConfig`. * maybe today is the day? --- kvdb-memorydb/CHANGELOG.md | 2 ++ kvdb-memorydb/Cargo.toml | 6 +++--- kvdb-rocksdb/CHANGELOG.md | 3 +++ kvdb-rocksdb/Cargo.toml | 6 +++--- kvdb-shared-tests/Cargo.toml | 4 ++-- kvdb-web/CHANGELOG.md | 2 ++ kvdb-web/Cargo.toml | 8 ++++---- kvdb/CHANGELOG.md | 6 ++++-- kvdb/Cargo.toml | 2 +- 9 files changed, 24 insertions(+), 15 deletions(-) diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index 4db19fc7a..94f297a2d 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.6.0] - 2020-05-05 ### Breaking - Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 290727c75..7104c0c25 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.5.0" +version = "0.6.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -10,7 +10,7 @@ edition = "2018" [dependencies] parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-features = false, features = ["std"] } parking_lot = "0.10.0" -kvdb = { version = "0.5", path = "../kvdb" } +kvdb = { version = "0.6", path = "../kvdb" } [dev-dependencies] -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.3" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.4" } diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index c4a46858f..2c1cd2813 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,10 +5,13 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.8.0] - 2020-05-05 - Updated RocksDB to 6.7.3. [#379](https://github.com/paritytech/parity-common/pull/379) ### Breaking - Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) - Rename and optimize prefix iteration. [#365](https://github.com/paritytech/parity-common/pull/365) +- Added Secondary Instance API. [#384](https://github.com/paritytech/parity-common/pull/384) ## [0.7.0] - 2020-03-16 - Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index ee5b2d394..235d3bd1d 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.7.0" +version = "0.8.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -14,7 +14,7 @@ harness = false [dependencies] smallvec = "1.0.0" fs-swap = "0.2.4" -kvdb = { path = "../kvdb", version = "0.5" } +kvdb = { path = "../kvdb", version = "0.6" } log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.10.0" @@ -27,7 +27,7 @@ parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-featur alloc_counter = "0.0.4" criterion = "0.3" ethereum-types = { path = "../ethereum-types" } -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.3" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.4" } rand = "0.7.2" tempdir = "0.3.7" keccak-hash = { path = "../keccak-hash" } diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index e6f0876c7..e2eb7647e 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "kvdb-shared-tests" -version = "0.3.0" +version = "0.4.0" authors = ["Parity Technologies "] edition = "2018" description = "Shared tests for kvdb functionality, to be executed against actual implementations" license = "MIT OR Apache-2.0" [dependencies] -kvdb = { path = "../kvdb", version = "0.5" } +kvdb = { path = "../kvdb", version = "0.6" } diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index 97fa61f4f..128934771 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.6.0] - 2020-05-05 ### Breaking - Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 2a9a681d6..e6e7292ee 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-web" -version = "0.5.0" +version = "0.6.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" @@ -11,8 +11,8 @@ edition = "2018" [dependencies] wasm-bindgen = "0.2.54" js-sys = "0.3.31" -kvdb = { version = "0.5", path = "../kvdb" } -kvdb-memorydb = { version = "0.5", path = "../kvdb-memorydb" } +kvdb = { version = "0.6", path = "../kvdb" } +kvdb-memorydb = { version = "0.6", path = "../kvdb-memorydb" } futures = "0.3" log = "0.4.8" send_wrapper = "0.3.0" @@ -40,6 +40,6 @@ features = [ [dev-dependencies] console_log = "0.1.2" -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.3" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.4" } wasm-bindgen-test = "0.3.4" wasm-bindgen-futures = "0.4.4" diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 62771e4dc..0ce19f17f 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -5,10 +5,12 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.6.0] - 2020-05-05 ### Breaking - Removed `write_buffered` and `flush` methods. [#313](https://github.com/paritytech/parity-common/pull/313) -- Introduce a new `DeletePrefix` database operation. [#360](https://github.com/paritytech/parity-common/pull/360) -- Rename prefix iteration to `iter_with_prefix`. [#365](https://github.com/paritytech/parity-common/pull/365) +- Introduced a new `DeletePrefix` database operation. [#360](https://github.com/paritytech/parity-common/pull/360) +- Renamed prefix iteration to `iter_with_prefix`. [#365](https://github.com/paritytech/parity-common/pull/365) ## [0.5.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index f6851ef33..7638955ef 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.5.0" +version = "0.6.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" From b3cc129917c54615decd28db4cd84b2616fb532b Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 5 May 2020 17:01:18 +0200 Subject: [PATCH 125/359] update changelogs and bump versions (#387) --- primitive-types/CHANGELOG.md | 4 +++- primitive-types/Cargo.toml | 4 ++-- primitive-types/impls/serde/CHANGELOG.md | 3 +++ primitive-types/impls/serde/Cargo.toml | 2 +- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 018d16eda..4d317d442 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,7 +5,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -- Added `no_std` support for `serde` feature. [#385](https://github.com/paritytech/parity-common/pull/385) + +## [0.7.2] - 2020-05-05 +- Added `serde_no_std` feature. [#385](https://github.com/paritytech/parity-common/pull/385) ## [0.7.1] - 2020-04-27 - Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 67622a480..d48bde3d2 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.7.1" +version = "0.7.2" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -10,7 +10,7 @@ edition = "2018" [dependencies] fixed-hash = { version = "0.6", path = "../fixed-hash", default-features = false } uint = { version = "0.8.3", path = "../uint", default-features = false } -impl-serde = { version = "0.3.0", path = "impls/serde", default-features = false, optional = true } +impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } impl-rlp = { version = "0.2", path = "impls/rlp", default-features = false, optional = true } diff --git a/primitive-types/impls/serde/CHANGELOG.md b/primitive-types/impls/serde/CHANGELOG.md index e58aeb12c..2c6acdd96 100644 --- a/primitive-types/impls/serde/CHANGELOG.md +++ b/primitive-types/impls/serde/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.3.1] - 2020-05-05 +- Added `no_std` support. [#385](https://github.com/paritytech/parity-common/pull/385) + ## [0.2.3] - 2019-10-29 ### Fixed - Fixed a bug in empty slice serialization. [#253](https://github.com/paritytech/parity-common/pull/253) diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index b89051f75..a76c0e4d9 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-serde" -version = "0.3.0" +version = "0.3.1" authors = ["Parity Technologies "] edition = "2018" license = "MIT OR Apache-2.0" From a28452bc276421c9ba5af0c6c0795516b2cc6b3e Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 18 May 2020 00:06:05 +0200 Subject: [PATCH 126/359] Implement codec for ethereum types (#393) * Implement codec for ethereum types * Bump patch version * Update changelog for ethbloom and ethereum-types * Fix import order * Add CI test of --all-features for ethbloom and ethereum-types * Fix CI --- .travis.yml | 3 ++- ethbloom/CHANGELOG.md | 3 +++ ethbloom/Cargo.toml | 4 +++- ethbloom/src/lib.rs | 9 ++++++--- ethereum-types/CHANGELOG.md | 3 +++ ethereum-types/Cargo.toml | 4 +++- ethereum-types/src/hash.rs | 12 ++++++++++++ ethereum-types/src/uint.rs | 4 ++++ 8 files changed, 36 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index a79ed71d8..ef5f3a01a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -60,4 +60,5 @@ script: - cd rlp/ && cargo test --no-default-features && cargo check --benches && cd .. - cd triehash/ && cargo check --benches && cd .. - cd kvdb-web/ && wasm-pack test --headless --firefox && cd .. - + - cd ethbloom/ && cargo test --all-features && cd .. + - cd ethereum-types/ && cargo test --all-features && cd .. diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index cc467dae8..67480095f 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.2] - 2020-05-18 +- Added `codec` feature. [#393](https://github.com/paritytech/parity-common/pull/393) + ## [0.9.1] - 2020-04-27 - Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index de61568ce..b89f51335 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.9.1" +version = "0.9.2" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT OR Apache-2.0" @@ -15,6 +15,7 @@ crunchy = { version = "0.2.2", default-features = false, features = ["limit_256" fixed-hash = { path = "../fixed-hash", version = "0.6", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } +impl-codec = { version = "0.4.1", path = "../primitive-types/impls/codec", default-features = false, optional = true } [dev-dependencies] criterion = "0.3.0" @@ -27,6 +28,7 @@ std = ["fixed-hash/std", "crunchy/std"] serialize = ["std", "impl-serde"] rustc-hex = ["fixed-hash/rustc-hex"] arbitrary = ["fixed-hash/arbitrary"] +codec = ["impl-codec"] [[bench]] name = "bloom" diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index c12f5cab8..7b18ec9ee 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -54,6 +54,8 @@ use core::{mem, ops}; use crunchy::unroll; use fixed_hash::*; +#[cfg(feature = "codec")] +use impl_codec::impl_fixed_hash_codec; use impl_rlp::impl_fixed_hash_rlp; #[cfg(feature = "serialize")] use impl_serde::impl_fixed_hash_serde; @@ -68,6 +70,10 @@ construct_fixed_hash! { pub struct Bloom(BLOOM_SIZE); } impl_fixed_hash_rlp!(Bloom, BLOOM_SIZE); +#[cfg(feature = "serialize")] +impl_fixed_hash_serde!(Bloom, BLOOM_SIZE); +#[cfg(feature = "codec")] +impl_fixed_hash_codec!(Bloom, BLOOM_SIZE); /// Returns log2. fn log2(x: usize) -> u32 { @@ -264,9 +270,6 @@ impl<'a> From<&'a Bloom> for BloomRef<'a> { } } -#[cfg(feature = "serialize")] -impl_fixed_hash_serde!(Bloom, BLOOM_SIZE); - #[cfg(test)] mod tests { use super::{Bloom, Input}; diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index c91696835..058a2d34c 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.2] - 2020-05-18 +- Added `codec` feature. [#393](https://github.com/paritytech/parity-common/pull/393) + ## [0.9.1] - 2020-04-27 - Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 572ad5530..496a5f88a 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.9.1" +version = "0.9.2" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -14,6 +14,7 @@ uint-crate = { path = "../uint", package = "uint", version = "0.8", default-feat primitive-types = { path = "../primitive-types", version = "0.7", features = ["rlp", "byteorder", "rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.0", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } +impl-codec = { version = "0.4.1", path = "../primitive-types/impls/codec", default-features = false, optional = true } [dev-dependencies] serde_json = "1.0.41" @@ -23,3 +24,4 @@ default = ["std", "serialize"] std = ["uint-crate/std", "fixed-hash/std", "ethbloom/std", "primitive-types/std"] serialize = ["std", "impl-serde", "primitive-types/serde", "ethbloom/serialize"] arbitrary = ["ethbloom/arbitrary", "fixed-hash/arbitrary", "uint-crate/arbitrary"] +codec = ["impl-codec", "ethbloom/codec"] diff --git a/ethereum-types/src/hash.rs b/ethereum-types/src/hash.rs index 9e2057dec..25f00557d 100644 --- a/ethereum-types/src/hash.rs +++ b/ethereum-types/src/hash.rs @@ -8,6 +8,8 @@ use crate::{U128, U256, U512, U64}; use fixed_hash::*; +#[cfg(feature = "codec")] +use impl_codec::impl_fixed_hash_codec; use impl_rlp::impl_fixed_hash_rlp; #[cfg(feature = "serialize")] use impl_serde::impl_fixed_hash_serde; @@ -23,16 +25,22 @@ construct_fixed_hash! { pub struct H32(4); } impl_fixed_hash_rlp!(H32, 4); #[cfg(feature = "serialize")] impl_fixed_hash_serde!(H32, 4); +#[cfg(feature = "codec")] +impl_fixed_hash_codec!(H32, 4); construct_fixed_hash! { pub struct H64(8); } impl_fixed_hash_rlp!(H64, 8); #[cfg(feature = "serialize")] impl_fixed_hash_serde!(H64, 8); +#[cfg(feature = "codec")] +impl_fixed_hash_codec!(H64, 8); construct_fixed_hash! { pub struct H128(16); } impl_fixed_hash_rlp!(H128, 16); #[cfg(feature = "serialize")] impl_fixed_hash_serde!(H128, 16); +#[cfg(feature = "codec")] +impl_fixed_hash_codec!(H128, 16); pub use primitive_types::H160; pub use primitive_types::H256; @@ -41,6 +49,8 @@ construct_fixed_hash! { pub struct H264(33); } impl_fixed_hash_rlp!(H264, 33); #[cfg(feature = "serialize")] impl_fixed_hash_serde!(H264, 33); +#[cfg(feature = "codec")] +impl_fixed_hash_codec!(H264, 33); pub use primitive_types::H512; @@ -48,6 +58,8 @@ construct_fixed_hash! { pub struct H520(65); } impl_fixed_hash_rlp!(H520, 65); #[cfg(feature = "serialize")] impl_fixed_hash_serde!(H520, 65); +#[cfg(feature = "codec")] +impl_fixed_hash_codec!(H520, 65); macro_rules! impl_uint_conversions { ($hash: ident, $uint: ident) => { diff --git a/ethereum-types/src/uint.rs b/ethereum-types/src/uint.rs index 87d09541c..881e0ecd6 100644 --- a/ethereum-types/src/uint.rs +++ b/ethereum-types/src/uint.rs @@ -6,6 +6,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#[cfg(feature = "codec")] +use impl_codec::impl_uint_codec; use impl_rlp::impl_uint_rlp; #[cfg(feature = "serialize")] use impl_serde::impl_uint_serde; @@ -20,6 +22,8 @@ construct_uint! { impl_uint_rlp!(U64, 1); #[cfg(feature = "serialize")] impl_uint_serde!(U64, 1); +#[cfg(feature = "codec")] +impl_uint_codec!(U64, 1); pub use primitive_types::{U128, U256, U512}; From a4a9745ba38f3671948a021d40106621617092a1 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 18 May 2020 13:04:58 +0200 Subject: [PATCH 127/359] uint: simplify `Ord` impl (#389) * uint: simplify Ord impl * uint: fix u512_ord bench to not optimize it away * uint: fix u256_ord bench as well :/ --- uint/benches/bigint.rs | 4 ++-- uint/src/uint.rs | 10 +--------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/uint/benches/bigint.rs b/uint/benches/bigint.rs index ec36c576a..f6a8b1318 100644 --- a/uint/benches/bigint.rs +++ b/uint/benches/bigint.rs @@ -577,7 +577,7 @@ fn u512_shr(c: &mut Criterion) { fn u256_ord(c: &mut Criterion) { let one = U256([12767554894655550452, 16333049135534778834, 140317443000293558, 598963]); let two = U256([2096410819092764509, 8483673822214032535, 36306297304129857, 3453]); - c.bench_function("u256_ord", move |b| b.iter(|| black_box(one < two))); + c.bench_function("u256_ord", move |b| b.iter(|| black_box(one) < black_box(two))); } fn u512_ord(c: &mut Criterion) { @@ -601,7 +601,7 @@ fn u512_ord(c: &mut Criterion) { 36306297304129857, 3453, ]); - c.bench_function("u512_ord", move |b| b.iter(|| black_box(one < two))); + c.bench_function("u512_ord", move |b| b.iter(|| black_box(one) < black_box(two))); } fn u256_from_le(c: &mut Criterion) { diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 2811d7d6c..fc1391e90 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1464,15 +1464,7 @@ macro_rules! construct_uint { impl $crate::core_::cmp::Ord for $name { fn cmp(&self, other: &$name) -> $crate::core_::cmp::Ordering { - let &$name(ref me) = self; - let &$name(ref you) = other; - let mut i = $n_words; - while i > 0 { - i -= 1; - if me[i] < you[i] { return $crate::core_::cmp::Ordering::Less; } - if me[i] > you[i] { return $crate::core_::cmp::Ordering::Greater; } - } - $crate::core_::cmp::Ordering::Equal + self.as_ref().iter().rev().cmp(other.as_ref().iter().rev()) } } From 8e121591acd63e7def9146fbf6bee0693fee3450 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 18 May 2020 13:05:26 +0200 Subject: [PATCH 128/359] uint: manually implement `Eq` and `Hash` (#390) * uint: manually implement some traits * uint: remove unused attr and add Eq tests * uint: fix formatting * uint: add a comment why a manual impl is used * uint: simply Eq impl * uint: simplify Hash and add a regression test * uint: fmt and changelog update --- uint/CHANGELOG.md | 1 + uint/src/uint.rs | 19 ++++++++++++++++++- uint/tests/uint_tests.rs | 29 ++++++++++++++++++++++++++--- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 386475612..3445abef3 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Added a manual impl of `Eq` and `Hash`. [#390](https://github.com/paritytech/parity-common/pull/390) ## [0.8.3] - 2020-04-27 - Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index fc1391e90..25419a7b0 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -445,7 +445,7 @@ macro_rules! construct_uint { /// Little-endian large integer type #[repr(C)] $(#[$attr])* - #[derive(Copy, Clone, Eq, PartialEq, Hash)] + #[derive(Copy, Clone)] $visibility struct $name (pub [u64; $n_words]); /// Get a reference to the underlying little-endian words. @@ -1462,6 +1462,23 @@ macro_rules! construct_uint { } } + // We implement `Eq` and `Hash` manually to workaround + // https://github.com/rust-lang/rust/issues/61415 + impl $crate::core_::cmp::PartialEq for $name { + fn eq(&self, other: &$name) -> bool { + self.as_ref() == other.as_ref() + } + } + + impl $crate::core_::cmp::Eq for $name {} + + impl $crate::core_::hash::Hash for $name { + fn hash(&self, state: &mut H) { + // use the impl as slice &[u64] + self.as_ref().hash(state); + } + } + impl $crate::core_::cmp::Ord for $name { fn cmp(&self, other: &$name) -> $crate::core_::cmp::Ordering { self.as_ref().iter().rev().cmp(other.as_ref().iter().rev()) diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 32a14c728..f01051586 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -20,6 +20,26 @@ construct_uint! { pub struct U512(8); } +#[cfg(feature = "std")] +#[test] +fn hash_impl_is_the_same_as_for_a_slice() { + use core::hash::{Hash, Hasher as _}; + use std::collections::hash_map::DefaultHasher; + + let uint_hash = { + let mut h = DefaultHasher::new(); + let uint = U256::from(123u64); + Hash::hash(&uint, &mut h); + h.finish() + }; + let slice_hash = { + let mut h = DefaultHasher::new(); + Hash::hash(&[123u64, 0, 0, 0], &mut h); + h.finish() + }; + assert_eq!(uint_hash, slice_hash); +} + #[test] fn u128_conversions() { let mut a = U256::from(u128::max_value()); @@ -281,7 +301,6 @@ fn uint256_bits_test() { } #[test] -#[cfg_attr(feature = "dev", allow(eq_op))] fn uint256_comp_test() { let small = U256([10u64, 0, 0, 0]); let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]); @@ -296,6 +315,10 @@ fn uint256_comp_test() { assert!(bigger >= big); assert!(bigger >= small); assert!(small <= small); + assert_eq!(small, small); + assert_eq!(biggest, biggest); + assert_ne!(big, biggest); + assert_ne!(big, bigger); } #[test] @@ -1010,10 +1033,10 @@ fn into_fixed_array() { fn test_u256_from_fixed_array() { let ary = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 123]; let num: U256 = ary.into(); - assert_eq!(num, U256::from(std::u64::MAX) + 1 + 123); + assert_eq!(num, U256::from(core::u64::MAX) + 1 + 123); let a_ref: &U256 = &ary.into(); - assert_eq!(a_ref, &(U256::from(std::u64::MAX) + 1 + 123)); + assert_eq!(a_ref, &(U256::from(core::u64::MAX) + 1 + 123)); } #[test] From 3af0cb2c375f1f04c3d1097b97b6193d63277b80 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 8 Jun 2020 17:33:20 +0200 Subject: [PATCH 129/359] kvdb-rocksdb: call iter_with_prefix directly on cfs in write (#397) --- kvdb-rocksdb/src/lib.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 2b1fd362a..2be3841c1 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -507,8 +507,13 @@ impl Database { let end_range = end_prefix.unwrap_or_else(|| vec![u8::max_value(); 16]); batch.delete_range_cf(cf, &prefix[..], &end_range[..]); if no_end { + use crate::iter::IterationHandler as _; + let prefix = if prefix.len() > end_range.len() { &prefix[..] } else { &end_range[..] }; - for (key, _) in self.iter_with_prefix(col, prefix) { + // We call `iter_with_prefix` directly on `cfs` to avoid taking a lock twice + // See https://github.com/paritytech/parity-common/pull/396. + let read_opts = generate_read_options(); + for (key, _) in cfs.iter_with_prefix(col, prefix, read_opts) { batch.delete_cf(cf, &key[..]); } } From f3023c913cf5eeb9dbac87672101071dced8e931 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 17 Jun 2020 22:59:59 +0200 Subject: [PATCH 130/359] parity-util-mem: optimize MallocSizeOf for flat collections (#398) * parity-util-mem: optimize MallocSizeOf for flat collections * parity-util-mem: obey Nikolay's suggestion to use fold * parity-util-mem: implement size_of_is_zero for more types * parity-util-mem: generalize 0 size to constant size --- parity-util-mem/src/malloc_size.rs | 171 ++++++++++++++++++++++------- 1 file changed, 130 insertions(+), 41 deletions(-) diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index c68f8f660..476ea9d66 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -70,6 +70,7 @@ pub use alloc::boxed::Box; use core::ffi::c_void; #[cfg(feature = "std")] use rstd::hash::Hash; +use rstd::marker::PhantomData; use rstd::mem::size_of; use rstd::ops::Range; use rstd::ops::{Deref, DerefMut}; @@ -161,7 +162,18 @@ impl MallocSizeOfOps { pub trait MallocSizeOf { /// Measure the heap usage of all descendant heap-allocated structures, but /// not the space taken up by the value itself. + /// If `T::size_of` is a constant, consider implementing `constant_size` as well. fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize; + + /// Used to optimize `MallocSizeOf` implementation for collections + /// like `Vec` and `HashMap` to avoid iterating over them unnecessarily. + /// The `Self: Sized` bound is for object safety. + fn constant_size() -> Option + where + Self: Sized, + { + None + } } /// Trait for measuring the "shallow" heap usage of a container. @@ -253,6 +265,9 @@ impl<'a, T: ?Sized> MallocSizeOf for &'a T { // Zero makes sense for a non-owning reference. 0 } + fn constant_size() -> Option { + Some(0) + } } impl MallocSizeOf for Box { @@ -268,6 +283,11 @@ impl MallocSizeOf for Tuple { for_tuples!( #( result += Tuple.size_of(ops); )* ); result } + fn constant_size() -> Option { + let mut result = Some(0); + for_tuples!( #( result = result.and_then(|s| Tuple::constant_size().map(|t| s + t)); )* ); + result + } } impl MallocSizeOf for Option { @@ -278,6 +298,9 @@ impl MallocSizeOf for Option { 0 } } + fn constant_size() -> Option { + T::constant_size().filter(|s| *s == 0) + } } impl MallocSizeOf for Result { @@ -287,18 +310,28 @@ impl MallocSizeOf for Result { Err(ref e) => e.size_of(ops), } } + fn constant_size() -> Option { + // Result has constant size iff T::constant_size == E::constant_size + T::constant_size().and_then(|t| E::constant_size().filter(|e| *e == t)) + } } impl MallocSizeOf for rstd::cell::Cell { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { self.get().size_of(ops) } + fn constant_size() -> Option { + T::constant_size() + } } impl MallocSizeOf for rstd::cell::RefCell { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { self.borrow().size_of(ops) } + fn constant_size() -> Option { + T::constant_size() + } } #[cfg(feature = "std")] @@ -317,8 +350,10 @@ where impl MallocSizeOf for [T] { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = 0; - for elem in self.iter() { - n += elem.size_of(ops); + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) } n } @@ -327,8 +362,10 @@ impl MallocSizeOf for [T] { impl MallocSizeOf for Vec { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = self.shallow_size_of(ops); - for elem in self.iter() { - n += elem.size_of(ops); + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) } n } @@ -354,8 +391,10 @@ impl MallocShallowSizeOf for rstd::collections::VecDeque { impl MallocSizeOf for rstd::collections::VecDeque { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = self.shallow_size_of(ops); - for elem in self.iter() { - n += elem.size_of(ops); + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) } n } @@ -389,8 +428,10 @@ where { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = self.shallow_size_of(ops); - for t in self.iter() { - n += t.size_of(ops); + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) } n } @@ -400,6 +441,9 @@ impl MallocSizeOf for rstd::cmp::Reverse { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { self.0.size_of(ops) } + fn constant_size() -> Option { + I::constant_size() + } } #[cfg(feature = "std")] @@ -422,9 +466,10 @@ where { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = self.shallow_size_of(ops); - for (k, v) in self.iter() { - n += k.size_of(ops); - n += v.size_of(ops); + if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { + n += self.len() * (k + v) + } else { + n = self.iter().fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) } n } @@ -447,9 +492,10 @@ where { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = self.shallow_size_of(ops); - for (k, v) in self.iter() { - n += k.size_of(ops); - n += v.size_of(ops); + if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { + n += self.len() * (k + v) + } else { + n = self.iter().fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) } n } @@ -473,20 +519,15 @@ where { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = self.shallow_size_of(ops); - for k in self.iter() { - n += k.size_of(ops); + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) } n } } -// PhantomData is always 0. -impl MallocSizeOf for rstd::marker::PhantomData { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - 0 - } -} - // XXX: we don't want MallocSizeOf to be defined for Rc and Arc. If negative // trait bounds are ever allowed, this code should be uncommented. // (We do have a compile-fail test for this: @@ -604,27 +645,33 @@ macro_rules! malloc_size_of_is_0( fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { 0 } + #[inline(always)] + fn constant_size() -> Option { Some(0) } } )+ ); (any: $($ty:ident<$($gen:ident),+>),+) => ( $( - impl<$($gen),+> $crate::MallocSizeOf for $ty<$($gen),+> { - #[inline(always)] - fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { - 0 + impl<$($gen),+> $crate::MallocSizeOf for $ty<$($gen),+> { + #[inline(always)] + fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { + 0 + } + #[inline(always)] + fn constant_size() -> Option { Some(0) } } - } )+ ); ($($ty:ident<$($gen:ident),+>),+) => ( $( - impl<$($gen: $crate::MallocSizeOf),+> $crate::MallocSizeOf for $ty<$($gen),+> { - #[inline(always)] - fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { - 0 + impl<$($gen: $crate::MallocSizeOf),+> $crate::MallocSizeOf for $ty<$($gen),+> { + #[inline(always)] + fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { + 0 + } + #[inline(always)] + fn constant_size() -> Option { Some(0) } } - } )+ ); ); @@ -641,6 +688,7 @@ malloc_size_of_is_0!(rstd::sync::atomic::AtomicUsize); malloc_size_of_is_0!(Range, Range, Range, Range, Range); malloc_size_of_is_0!(Range, Range, Range, Range, Range); malloc_size_of_is_0!(Range, Range); +malloc_size_of_is_0!(any: PhantomData); /// Measurable that defers to inner value and used to verify MallocSizeOf implementation in a /// struct. @@ -681,9 +729,10 @@ where { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = self.shallow_size_of(ops); - for (k, v) in self.iter() { - n += k.size_of(ops); - n += v.size_of(ops); + if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { + n += self.len() * (k + v) + } else { + n = self.iter().fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) } n } @@ -698,9 +747,10 @@ where { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = 0; - for (k, v) in self.iter() { - n += k.size_of(ops); - n += v.size_of(ops); + if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { + n += self.len() * (k + v) + } else { + n = self.iter().fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) } n } @@ -721,8 +771,10 @@ macro_rules! impl_smallvec { { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = if self.spilled() { self.capacity() * core::mem::size_of::() } else { 0 }; - for elem in self.iter() { - n += elem.size_of(ops); + if let Some(t) = T::constant_size() { + n += self.len() * t; + } else { + n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) } n } @@ -795,6 +847,15 @@ mod tests { assert!(v.size_of(&mut ops) >= expected_min_allocs); } + #[test] + fn test_large_vec() { + const N: usize = 128 * 1024 * 1024; + let val = vec![1u8; N]; + let mut ops = new_malloc_size_ops(); + assert!(val.size_of(&mut ops) >= N); + assert!(val.size_of(&mut ops) < 2 * N); + } + #[test] fn btree_set() { let mut set = BTreeSet::new(); @@ -816,4 +877,32 @@ mod tests { // MallocSizeOf is not implemented for [u8; 333] assert_eq!(crate::malloc_size(&Data::<[u8; 333]> { phantom: std::marker::PhantomData }), 0); } + + #[test] + fn constant_size() { + struct AlwaysTwo(Vec); + + impl MallocSizeOf for AlwaysTwo { + fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { + self.0.size_of(ops) + } + fn constant_size() -> Option { + Some(2) + } + } + + assert_eq!(AlwaysTwo::constant_size(), Some(2)); + assert_eq!(std::cmp::Reverse::::constant_size(), Some(0)); + assert_eq!(std::cell::RefCell::::constant_size(), Some(0)); + assert_eq!(std::cell::Cell::::constant_size(), Some(0)); + assert_eq!(Result::<(), ()>::constant_size(), Some(0)); + assert_eq!(<(AlwaysTwo, (), [u8; 32], AlwaysTwo)>::constant_size(), Some(2 + 2)); + assert_eq!(Option::::constant_size(), Some(0)); + assert_eq!(<&String>::constant_size(), Some(0)); + + assert_eq!(::constant_size(), None); + assert_eq!(std::borrow::Cow::::constant_size(), None); + assert_eq!(Result::<(), String>::constant_size(), None); + assert_eq!(Option::::constant_size(), None); + } } From 03e3d3466fd0108f8004b8d2e59e29aad90e7d19 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 19 Jun 2020 15:11:23 +0200 Subject: [PATCH 131/359] Box secret memory (#400) Put `Secret` memory on heap. --- parity-crypto/CHANGELOG.md | 5 ++ parity-crypto/Cargo.toml | 2 +- parity-crypto/src/publickey/ec_math_utils.rs | 4 +- parity-crypto/src/publickey/ecdh.rs | 3 +- parity-crypto/src/publickey/extended_keys.rs | 23 +++--- parity-crypto/src/publickey/keypair.rs | 8 ++- parity-crypto/src/publickey/mod.rs | 2 +- parity-crypto/src/publickey/secret_key.rs | 75 ++++++++++++++++---- 8 files changed, 94 insertions(+), 28 deletions(-) diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index 08cd46532..893082e82 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.6.2] - 2020-06-19 +- Put `Secret` memory on heap. [#400](https://github.com/paritytech/parity-common/pull/400) +- Add `copy_from_str` conversion methods for `Secret`. +- Deprecate `From<&str>` in favor of `copy_from_str`. + ## [0.6.1] - 2020-04-11 - Add `recover_allowing_all_zero_message()` and `ZeroesAllowedMessage` to accomodate ethereum's `ecrecover` builtin. [#369](https://github.com/paritytech/parity-common/pull/369) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 1b1944ad3..74f564d7f 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-crypto" -version = "0.6.1" +version = "0.6.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Crypto utils used by ethstore and network." diff --git a/parity-crypto/src/publickey/ec_math_utils.rs b/parity-crypto/src/publickey/ec_math_utils.rs index 5ea82a78a..d9dd7e2e4 100644 --- a/parity-crypto/src/publickey/ec_math_utils.rs +++ b/parity-crypto/src/publickey/ec_math_utils.rs @@ -93,7 +93,6 @@ fn set_public(public: &mut Public, key_public: &key::PublicKey) { mod tests { use super::super::{Generator, Random, Secret}; use super::{generation_point, public_add, public_mul_secret, public_negate, public_sub}; - use std::str::FromStr; #[test] fn public_addition_is_commutative() { @@ -140,7 +139,8 @@ mod tests { #[test] fn public_multiplication_verification() { - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let secret = + Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); let mut public = generation_point(); public_mul_secret(&mut public, &secret).unwrap(); assert_eq!(format!("{:x}", public), "8ce0db0b0359ffc5866ba61903cc2518c3675ef2cf380a7e54bde7ea20e6fa1ab45b7617346cd11b7610001ee6ae5b0155c41cad9527cbcdff44ec67848943a4"); diff --git a/parity-crypto/src/publickey/ecdh.rs b/parity-crypto/src/publickey/ecdh.rs index 93a43bed8..f5890a207 100644 --- a/parity-crypto/src/publickey/ecdh.rs +++ b/parity-crypto/src/publickey/ecdh.rs @@ -34,7 +34,8 @@ mod tests { #[test] fn test_agree() { // Just some random values for secret/public to check we agree with previous implementation. - let secret = Secret::from_str("01a400760945613ff6a46383b250bf27493bfe679f05274916182776f09b28f1").unwrap(); + let secret = + Secret::copy_from_str(&"01a400760945613ff6a46383b250bf27493bfe679f05274916182776f09b28f1").unwrap(); let public= Public::from_str("e37f3cbb0d0601dc930b8d8aa56910dd5629f2a0979cc742418960573efc5c0ff96bc87f104337d8c6ab37e597d4f9ffbd57302bc98a825519f691b378ce13f5").unwrap(); let shared = agree(&secret, &public); diff --git a/parity-crypto/src/publickey/extended_keys.rs b/parity-crypto/src/publickey/extended_keys.rs index be682db39..d83aa6a37 100644 --- a/parity-crypto/src/publickey/extended_keys.rs +++ b/parity-crypto/src/publickey/extended_keys.rs @@ -15,6 +15,7 @@ pub use self::derivation::Error as DerivationError; use super::{Public, Secret}; use ethereum_types::H256; +use zeroize::Zeroize; /// Represents label that can be stored as a part of key derivation pub trait Label { @@ -96,11 +97,13 @@ impl ExtendedSecret { where T: Label, { - let (derived_key, next_chain_code) = derivation::private(*self.secret, self.chain_code, index); + let (mut derived_key, next_chain_code) = derivation::private(*self.secret, self.chain_code, index); - let derived_secret = Secret::from(derived_key.0); + let new_derived_secret = Secret::from(derived_key.0); - ExtendedSecret::with_code(derived_secret, next_chain_code) + derived_key.0.zeroize(); + + ExtendedSecret::with_code(new_derived_secret, next_chain_code) } /// Private key component of the extended key. @@ -401,7 +404,8 @@ mod tests { #[test] fn smoky() { - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let secret = + Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::zero()); // hardened @@ -437,7 +441,7 @@ mod tests { ); let keypair = ExtendedKeyPair::with_secret( - Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(), + Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(), H256::from_low_u64_be(64), ); assert_eq!( @@ -448,7 +452,8 @@ mod tests { #[test] fn h256_soft_match() { - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let secret = + Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); let derivation_secret = H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); @@ -468,7 +473,8 @@ mod tests { #[test] fn h256_hard() { - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let secret = + Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); let derivation_secret = H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::from_low_u64_be(1)); @@ -481,7 +487,8 @@ mod tests { #[test] fn test_key_derivation() { - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let secret = + Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::from_low_u64_be(1)); let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); diff --git a/parity-crypto/src/publickey/keypair.rs b/parity-crypto/src/publickey/keypair.rs index f4ac8b287..330316012 100644 --- a/parity-crypto/src/publickey/keypair.rs +++ b/parity-crypto/src/publickey/keypair.rs @@ -58,6 +58,7 @@ impl KeyPair { } /// Copies a pair from another one + #[inline(always)] pub fn from_keypair(sec: key::SecretKey, publ: key::PublicKey) -> Self { let serialized = publ.serialize_uncompressed(); let secret = Secret::from(sec); @@ -86,11 +87,11 @@ impl KeyPair { #[cfg(test)] mod tests { use super::{KeyPair, Secret}; - use std::str::FromStr; #[test] fn from_secret() { - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let secret = + Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); let _ = KeyPair::from_secret(secret).unwrap(); } @@ -100,7 +101,8 @@ mod tests { "secret: a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65 public: 8ce0db0b0359ffc5866ba61903cc2518c3675ef2cf380a7e54bde7ea20e6fa1ab45b7617346cd11b7610001ee6ae5b0155c41cad9527cbcdff44ec67848943a4 address: 5b073e9233944b5e729e46d618f0d8edf3d9c34a".to_owned(); - let secret = Secret::from_str("a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); + let secret = + Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); let kp = KeyPair::from_secret(secret).unwrap(); assert_eq!(format!("{}", kp), expected); } diff --git a/parity-crypto/src/publickey/mod.rs b/parity-crypto/src/publickey/mod.rs index 54d3ffe79..ec62012f3 100644 --- a/parity-crypto/src/publickey/mod.rs +++ b/parity-crypto/src/publickey/mod.rs @@ -27,7 +27,7 @@ pub use self::error::Error; pub use self::extended_keys::{Derivation, DerivationError, ExtendedKeyPair, ExtendedPublic, ExtendedSecret}; pub use self::keypair::{public_to_address, KeyPair}; pub use self::keypair_generator::Random; -pub use self::secret_key::Secret; +pub use self::secret_key::{Secret, ZeroizeSecretKey}; use ethereum_types::H256; use lazy_static::lazy_static; diff --git a/parity-crypto/src/publickey/secret_key.rs b/parity-crypto/src/publickey/secret_key.rs index ac938a674..269afdf3e 100644 --- a/parity-crypto/src/publickey/secret_key.rs +++ b/parity-crypto/src/publickey/secret_key.rs @@ -23,7 +23,7 @@ use crate::publickey::Error; /// Represents secret key #[derive(Clone, PartialEq, Eq)] pub struct Secret { - inner: H256, + inner: Box, } impl Drop for Secret { @@ -52,21 +52,32 @@ impl fmt::Display for Secret { impl Secret { /// Creates a `Secret` from the given slice, returning `None` if the slice length != 32. + /// Caller is responsible to zeroize input slice. pub fn copy_from_slice(key: &[u8]) -> Option { if key.len() != 32 { return None; } let mut h = H256::zero(); h.as_bytes_mut().copy_from_slice(&key[0..32]); - Some(Secret { inner: h }) + Some(Secret { inner: Box::new(h) }) + } + + /// Creates a `Secret` from the given `str` representation, + /// returning an error for hex big endian representation of + /// the secret. + /// Caller is responsible to zeroize input slice. + pub fn copy_from_str(s: &str) -> Result { + let h = H256::from_str(s).map_err(|e| Error::Custom(format!("{:?}", e)))?; + Ok(Secret { inner: Box::new(h) }) } /// Creates zero key, which is invalid for crypto operations, but valid for math operation. pub fn zero() -> Self { - Secret { inner: H256::zero() } + Secret { inner: Box::new(H256::zero()) } } /// Imports and validates the key. + /// Caller is responsible to zeroize input slice. pub fn import_key(key: &[u8]) -> Result { let secret = key::SecretKey::from_slice(key)?; Ok(secret.into()) @@ -79,7 +90,7 @@ impl Secret { /// Wrapper over hex conversion pub fn to_hex(&self) -> String { - format!("{:x}", self.inner) + format!("{:x}", self.inner.deref()) } /// Inplace add one secret key to another (scalar + scalar) @@ -94,8 +105,9 @@ impl Secret { let mut key_secret = self.to_secp256k1_secret()?; let other_secret = other.to_secp256k1_secret()?; key_secret.add_assign(&other_secret[..])?; - *self = key_secret.into(); + ZeroizeSecretKey(other_secret).zeroize(); + Ok(()) } } @@ -116,6 +128,7 @@ impl Secret { key_secret.add_assign(&other_secret[..])?; *self = key_secret.into(); + ZeroizeSecretKey(other_secret).zeroize(); Ok(()) } } @@ -153,6 +166,7 @@ impl Secret { key_secret.mul_assign(&other_secret[..])?; *self = key_secret.into(); + ZeroizeSecretKey(other_secret).zeroize(); Ok(()) } } @@ -193,11 +207,13 @@ impl Secret { } /// Create a `secp256k1::key::SecretKey` based on this secret. + /// Warning the resulting secret key need to be zeroized manually. pub fn to_secp256k1_secret(&self) -> Result { key::SecretKey::from_slice(&self[..]).map_err(Into::into) } } +#[deprecated(since = "0.6.2", note = "please use `copy_from_str` instead, input is not zeroized")] impl FromStr for Secret { type Err = Error; fn from_str(s: &str) -> Result { @@ -206,17 +222,24 @@ impl FromStr for Secret { } impl From<[u8; 32]> for Secret { - fn from(k: [u8; 32]) -> Self { - Secret { inner: H256(k) } + #[inline(always)] + fn from(mut k: [u8; 32]) -> Self { + let result = Secret { inner: Box::new(H256(k)) }; + k.zeroize(); + result } } impl From for Secret { - fn from(s: H256) -> Self { - s.0.into() + #[inline(always)] + fn from(mut s: H256) -> Self { + let result = s.0.into(); + s.0.zeroize(); + result } } +#[deprecated(since = "0.6.2", note = "please use `copy_from_str` instead, input is not zeroized")] impl TryFrom<&str> for Secret { type Error = Error; @@ -225,6 +248,7 @@ impl TryFrom<&str> for Secret { } } +#[deprecated(since = "0.6.2", note = "please use `copy_from_slice` instead, input is not zeroized")] impl TryFrom<&[u8]> for Secret { type Error = Error; @@ -232,14 +256,16 @@ impl TryFrom<&[u8]> for Secret { if b.len() != SECP256K1_SECRET_KEY_SIZE { return Err(Error::InvalidSecretKey); } - Ok(Self { inner: H256::from_slice(b) }) + Ok(Self { inner: Box::new(H256::from_slice(b)) }) } } impl From for Secret { + #[inline(always)] fn from(key: key::SecretKey) -> Self { let mut a = [0; SECP256K1_SECRET_KEY_SIZE]; a.copy_from_slice(&key[0..SECP256K1_SECRET_KEY_SIZE]); + ZeroizeSecretKey(key).zeroize(); a.into() } } @@ -252,11 +278,33 @@ impl Deref for Secret { } } +/// A wrapper type around `SecretKey` to prevent leaking secret key data. This +/// type will properly zeroize the secret key to `ONE_KEY` in a way that will +/// not get optimized away by the compiler nor be prone to leaks that take +/// advantage of access reordering. +#[derive(Clone, Copy)] +pub struct ZeroizeSecretKey(pub secp256k1::SecretKey); + +impl Default for ZeroizeSecretKey { + fn default() -> Self { + ZeroizeSecretKey(secp256k1::key::ONE_KEY) + } +} + +impl std::ops::Deref for ZeroizeSecretKey { + type Target = secp256k1::SecretKey; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl zeroize::DefaultIsZeroes for ZeroizeSecretKey {} + #[cfg(test)] mod tests { use super::super::{Generator, Random}; use super::Secret; - use std::str::FromStr; #[test] fn secret_pow() { @@ -264,7 +312,10 @@ mod tests { let mut pow0 = secret.clone(); pow0.pow(0).unwrap(); - assert_eq!(pow0, Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap()); + assert_eq!( + pow0, + Secret::copy_from_str(&"0000000000000000000000000000000000000000000000000000000000000001").unwrap() + ); let mut pow1 = secret.clone(); pow1.pow(1).unwrap(); From cf5373100e58b045a8360cde01998c1ddd8cdbde Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 19 Jun 2020 22:03:37 +0200 Subject: [PATCH 132/359] parity-util-mem: export MallocShallowSizeOf (#399) * parity-util-mem: export MallocShallowSizeOf * parity-util-mem: cargo fmt --- parity-util-mem/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index 528f0f668..c1add51a8 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -64,7 +64,7 @@ pub mod ethereum_impls; pub mod primitives_impls; pub use allocators::MallocSizeOfExt; -pub use malloc_size::{MallocSizeOf, MallocSizeOfOps}; +pub use malloc_size::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps}; pub use parity_util_mem_derive::*; From 7855f99fab80d0e754013e8ab249242cd27e53d0 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 23 Jun 2020 10:49:34 +0200 Subject: [PATCH 133/359] primitive-types: scale-info support (#312) * primitive-types: type-metadata support * primitive-types: cargo fmt * switch to scale-info * primitive-types: use scale-info from crates.io * ci: remove duplicate test * primitive-types: fix compilation for test * primitive-types: fmt * primitive-types: update scale-info to 0.2 --- primitive-types/Cargo.toml | 5 +++++ primitive-types/src/lib.rs | 8 ++++++++ primitive-types/tests/scale_info.rs | 28 ++++++++++++++++++++++++++++ 3 files changed, 41 insertions(+) create mode 100644 primitive-types/tests/scale_info.rs diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index d48bde3d2..30ba14ec2 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -13,6 +13,7 @@ uint = { version = "0.8.3", path = "../uint", default-features = false } impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } impl-rlp = { version = "0.2", path = "impls/rlp", default-features = false, optional = true } +scale-info = { version = "0.2", features = ["derive"], default-features = false, optional = true } [features] default = ["std"] @@ -24,3 +25,7 @@ serde_no_std = ["impl-serde"] codec = ["impl-codec"] rlp = ["impl-rlp"] arbitrary = ["fixed-hash/arbitrary", "uint/arbitrary"] + +[[test]] +name = "scale_info" +required-features = ["scale-info"] diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index 0b4af97c8..ad5c48045 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -16,6 +16,8 @@ use core::convert::TryFrom; use fixed_hash::{construct_fixed_hash, impl_fixed_hash_conversions}; +#[cfg(feature = "scale-info")] +use scale_info::TypeInfo; use uint::{construct_uint, uint_full_mul_reg}; /// Error type for conversion. @@ -27,27 +29,33 @@ pub enum Error { construct_uint! { /// 128-bit unsigned integer. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct U128(2); } construct_uint! { /// 256-bit unsigned integer. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct U256(4); } construct_uint! { /// 512-bits unsigned integer. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct U512(8); } construct_fixed_hash! { /// Fixed-size uninterpreted hash type with 20 bytes (160 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct H160(20); } construct_fixed_hash! { /// Fixed-size uninterpreted hash type with 32 bytes (256 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct H256(32); } construct_fixed_hash! { /// Fixed-size uninterpreted hash type with 64 bytes (512 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct H512(64); } diff --git a/primitive-types/tests/scale_info.rs b/primitive-types/tests/scale_info.rs new file mode 100644 index 000000000..5a61ef133 --- /dev/null +++ b/primitive-types/tests/scale_info.rs @@ -0,0 +1,28 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tests for scale-info feature of primitive-types. + +use primitive_types::{H256, U256}; +use scale_info::{build::Fields, Path, Type, TypeInfo}; + +#[test] +fn u256_scale_info() { + let r#type = + Type::builder().path(Path::new("U256", "primitive_types")).composite(Fields::unnamed().field_of::<[u64; 4]>()); + + assert_eq!(U256::type_info(), r#type.into()); +} + +#[test] +fn h256_scale_info() { + let r#type = + Type::builder().path(Path::new("H256", "primitive_types")).composite(Fields::unnamed().field_of::<[u8; 32]>()); + + assert_eq!(H256::type_info(), r#type.into()); +} From 1d08c667681126e7792819f8ae198dee5b334675 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 24 Jun 2020 10:42:14 +0200 Subject: [PATCH 134/359] parity-util-mem: bump version and deps and update changelog (#402) parity-util-mem: bump version and deps and update changelog --- kvdb-memorydb/CHANGELOG.md | 3 +++ kvdb-memorydb/Cargo.toml | 8 ++++---- kvdb-rocksdb/CHANGELOG.md | 3 +++ kvdb-rocksdb/Cargo.toml | 8 ++++---- kvdb-shared-tests/Cargo.toml | 4 ++-- kvdb-web/Cargo.toml | 10 +++++----- kvdb/CHANGELOG.md | 3 +++ kvdb/Cargo.toml | 4 ++-- parity-util-mem/CHANGELOG.md | 5 +++++ parity-util-mem/Cargo.toml | 6 +++--- 10 files changed, 34 insertions(+), 20 deletions(-) diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index 94f297a2d..89b2a4ed0 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.7.0] - 2020-06-24 +- Updated `kvdb` to 0.7. [#402](https://github.com/paritytech/parity-common/pull/402) + ## [0.6.0] - 2020-05-05 ### Breaking - Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 7104c0c25..1fc78291e 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.6.0" +version = "0.7.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -8,9 +8,9 @@ license = "MIT OR Apache-2.0" edition = "2018" [dependencies] -parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-features = false, features = ["std"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.7", default-features = false, features = ["std"] } parking_lot = "0.10.0" -kvdb = { version = "0.6", path = "../kvdb" } +kvdb = { version = "0.7", path = "../kvdb" } [dev-dependencies] -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.4" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.5" } diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 2c1cd2813..5f157347f 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.0] - 2020-06-24 +- Updated `kvdb` to 0.7. [#402](https://github.com/paritytech/parity-common/pull/402) + ## [0.8.0] - 2020-05-05 - Updated RocksDB to 6.7.3. [#379](https://github.com/paritytech/parity-common/pull/379) ### Breaking diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 235d3bd1d..da466c469 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -14,20 +14,20 @@ harness = false [dependencies] smallvec = "1.0.0" fs-swap = "0.2.4" -kvdb = { path = "../kvdb", version = "0.6" } +kvdb = { path = "../kvdb", version = "0.7" } log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.10.0" regex = "1.3.1" rocksdb = { version = "0.14", features = ["snappy"], default-features = false } owning_ref = "0.4.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-features = false, features = ["std", "smallvec"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.7", default-features = false, features = ["std", "smallvec"] } [dev-dependencies] alloc_counter = "0.0.4" criterion = "0.3" ethereum-types = { path = "../ethereum-types" } -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.4" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.5" } rand = "0.7.2" tempdir = "0.3.7" keccak-hash = { path = "../keccak-hash" } diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index e2eb7647e..4e531d5fc 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "kvdb-shared-tests" -version = "0.4.0" +version = "0.5.0" authors = ["Parity Technologies "] edition = "2018" description = "Shared tests for kvdb functionality, to be executed against actual implementations" license = "MIT OR Apache-2.0" [dependencies] -kvdb = { path = "../kvdb", version = "0.6" } +kvdb = { path = "../kvdb", version = "0.7" } diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index e6e7292ee..09c26b7d8 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-web" -version = "0.6.0" +version = "0.7.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" @@ -11,12 +11,12 @@ edition = "2018" [dependencies] wasm-bindgen = "0.2.54" js-sys = "0.3.31" -kvdb = { version = "0.6", path = "../kvdb" } -kvdb-memorydb = { version = "0.6", path = "../kvdb-memorydb" } +kvdb = { version = "0.7", path = "../kvdb" } +kvdb-memorydb = { version = "0.7", path = "../kvdb-memorydb" } futures = "0.3" log = "0.4.8" send_wrapper = "0.3.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.7", default-features = false } [dependencies.web-sys] version = "0.3.31" @@ -40,6 +40,6 @@ features = [ [dev-dependencies] console_log = "0.1.2" -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.4" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.5" } wasm-bindgen-test = "0.3.4" wasm-bindgen-futures = "0.4.4" diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 0ce19f17f..e67725cd7 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.7.0] - 2020-06-24 +- Updated `parity-util-mem` to 0.7. [#402](https://github.com/paritytech/parity-common/pull/402) + ## [0.6.0] - 2020-05-05 ### Breaking - Removed `write_buffered` and `flush` methods. [#313](https://github.com/paritytech/parity-common/pull/313) diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 7638955ef..1accfb38b 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.6.0" +version = "0.7.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" @@ -9,4 +9,4 @@ edition = "2018" [dependencies] smallvec = "1.0.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.6", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.7", default-features = false } diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index a23c2164d..f6ff26a44 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.7.0] - 2020-06-24 +- Added `const_size` to `MallocSizeOf` to optimize it for flat collections. [#398](https://github.com/paritytech/parity-common/pull/398) +- Exported `MallocShallowSizeOf`. [#399](https://github.com/paritytech/parity-common/pull/399) +- Updated dependencies. + ## [0.6.1] - 2020-04-15 - Fix compilation on Windows for no-std. [#375](https://github.com/paritytech/parity-common/pull/375) - Prevent multiple versions from being linked into the same program. [#363](https://github.com/paritytech/parity-common/pull/363) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 97e20cf0e..91128228a 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.6.1" +version = "0.7.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" @@ -18,8 +18,8 @@ build = "build.rs" cfg-if = "0.1.10" dlmalloc = { version = "0.1.3", features = ["global"], optional = true } wee_alloc = { version = "0.4.5", optional = true } -lru = { version = "0.4", optional = true } -hashbrown = { version = "0.6", optional = true } +lru = { version = "0.5", optional = true } +hashbrown = { version = "0.8", optional = true } mimalloc = { version = "0.1.18", optional = true } libmimalloc-sys = { version = "0.1.14", optional = true } parity-util-mem-derive = { path = "derive", version = "0.1" } From de6545d66fdac950259b1d54128c60c89b5cdeb2 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 6 Jul 2020 15:39:01 +0200 Subject: [PATCH 135/359] Changelog to publish kvdb-web 0.7.0 (#404) * prepare publish * update date * missing pr number * Update kvdb-web/CHANGELOG.md Co-authored-by: Andronik Ordian --- kvdb-web/CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index 128934771..64dd9ba34 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.7.0] - 2020-07-06 +- Updated `kvdb` to 0.7.0 [#404](https://github.com/paritytech/parity-common/pull/404) + ## [0.6.0] - 2020-05-05 ### Breaking - Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) From e4751ed882f50530ef3d1bcdeac1899fd17b3bb2 Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Thu, 16 Jul 2020 11:07:58 +0300 Subject: [PATCH 136/359] Port runtime to tokio-compat (#403) * Port runtime to tokio-compat * Accept legacy IntoFutures --- runtime/Cargo.toml | 6 ++- runtime/examples/simple.rs | 23 ++++------- runtime/src/lib.rs | 79 +++++++++++++++++++++++--------------- 3 files changed, 60 insertions(+), 48 deletions(-) diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index ca53759a0..3a5867e8f 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -12,8 +12,10 @@ keywords = ["parity", "runtime", "tokio"] include = ["Cargo.toml", "src/**/*.rs", "README.md", "CHANGELOG.md"] [dependencies] -futures = "0.1" -tokio = "0.1.22" +futures = { version = "0.3", default-features = false, features = ["compat"] } +futures01 = { package = "futures", version = "0.1" } +tokio-compat = "0.1" +tokio = { version = "0.2", features = ["full"] } [features] test-helpers = [] \ No newline at end of file diff --git a/runtime/examples/simple.rs b/runtime/examples/simple.rs index 6448bcbf6..c037f74b7 100644 --- a/runtime/examples/simple.rs +++ b/runtime/examples/simple.rs @@ -16,26 +16,19 @@ //! Simple example, illustating usage of runtime wrapper. -use futures::{Future, Stream}; use parity_runtime::Runtime; -use std::thread::park_timeout; -use std::time::Duration; -use tokio::fs::read_dir; +use std::{thread::park_timeout, time::Duration}; +use tokio::{fs::read_dir, stream::*}; /// Read current directory in a future, which is executed in the created runtime fn main() { - let fut = read_dir(".") - .flatten_stream() - .for_each(|dir| { - println!("{:?}", dir.path()); - Ok(()) - }) - .map_err(|err| { - eprintln!("Error: {:?}", err); - () - }); let runtime = Runtime::with_default_thread_count(); - runtime.executor().spawn(fut); + runtime.executor().spawn_std(async move { + let mut dirs = read_dir(".").await.unwrap(); + while let Some(dir) = dirs.try_next().await.expect("Error") { + println!("{:?}", dir.path()); + } + }); let timeout = Duration::from_secs(3); park_timeout(timeout); } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index acb1e4b6e..9284454be 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -16,11 +16,10 @@ //! Tokio Runtime wrapper. -use futures::{future, Future, IntoFuture}; -use std::sync::mpsc; -use std::{fmt, thread}; -pub use tokio::runtime::{Builder as TokioRuntimeBuilder, Runtime as TokioRuntime, TaskExecutor}; -pub use tokio::timer::Delay; +use futures::compat::*; +use futures01::{Future as Future01, IntoFuture as IntoFuture01}; +use std::{fmt, future::Future, thread}; +pub use tokio_compat::runtime::{Builder as TokioRuntimeBuilder, Runtime as TokioRuntime, TaskExecutor}; /// Runtime for futures. /// @@ -30,19 +29,21 @@ pub struct Runtime { handle: RuntimeHandle, } +const RUNTIME_BUILD_PROOF: &str = + "Building a Tokio runtime will only fail when mio components cannot be initialized (catastrophic)"; + impl Runtime { fn new(runtime_bldr: &mut TokioRuntimeBuilder) -> Self { - let mut runtime = runtime_bldr.build().expect( - "Building a Tokio runtime will only fail when mio components \ - cannot be initialized (catastrophic)", - ); - let (stop, stopped) = futures::oneshot(); - let (tx, rx) = mpsc::channel(); + let mut runtime = runtime_bldr.build().expect(RUNTIME_BUILD_PROOF); + + let (stop, stopped) = tokio::sync::oneshot::channel(); + let (tx, rx) = std::sync::mpsc::channel(); let handle = thread::spawn(move || { - tx.send(runtime.executor()).expect("Rx is blocking upper thread."); - runtime - .block_on(futures::empty().select(stopped).map(|_| ()).map_err(|_| ())) - .expect("Tokio runtime should not have unhandled errors."); + let executor = runtime.executor(); + runtime.block_on_std(async move { + tx.send(executor).expect("Rx is blocking upper thread."); + let _ = stopped.await; + }); }); let executor = rx.recv().expect("tx is transfered to a newly spawned thread."); @@ -110,6 +111,10 @@ impl fmt::Debug for Mode { } } +fn block_on + Send + 'static>(r: F) { + tokio::runtime::Builder::new().enable_all().basic_scheduler().build().expect(RUNTIME_BUILD_PROOF).block_on(r) +} + #[derive(Debug, Clone)] pub struct Executor { inner: Mode, @@ -128,37 +133,49 @@ impl Executor { Executor { inner: Mode::ThreadPerFuture } } - /// Spawn a future on this runtime + /// Spawn a legacy future on this runtime pub fn spawn(&self, r: R) where - R: IntoFuture + Send + 'static, + R: IntoFuture01 + Send + 'static, R::Future: Send + 'static, { - match self.inner { - Mode::Tokio(ref executor) => executor.spawn(r.into_future()), - Mode::Sync => { - let _ = r.into_future().wait(); + self.spawn_std(async move { + let _ = r.into_future().compat().await; + }) + } + + /// Spawn an std future on this runtime + pub fn spawn_std(&self, r: R) + where + R: Future + Send + 'static, + { + match &self.inner { + Mode::Tokio(executor) => { + let _ = executor.spawn_handle_std(r); } + Mode::Sync => block_on(r), Mode::ThreadPerFuture => { - thread::spawn(move || { - let _ = r.into_future().wait(); - }); + thread::spawn(move || block_on(r)); } } } } -impl + Send + 'static> future::Executor for Executor { - fn execute(&self, future: F) -> Result<(), future::ExecuteError> { - match self.inner { - Mode::Tokio(ref executor) => executor.execute(future), +impl + Send + 'static> futures01::future::Executor for Executor { + fn execute(&self, future: F) -> Result<(), futures01::future::ExecuteError> { + match &self.inner { + Mode::Tokio(executor) => executor.execute(future), Mode::Sync => { - let _ = future.wait(); + block_on(async move { + let _ = future.compat().await; + }); Ok(()) } Mode::ThreadPerFuture => { thread::spawn(move || { - let _ = future.wait(); + block_on(async move { + let _ = future.compat().await; + }) }); Ok(()) } @@ -168,7 +185,7 @@ impl + Send + 'static> future::Executor for /// A handle to a runtime. Dropping the handle will cause runtime to shutdown. pub struct RuntimeHandle { - close: Option>, + close: Option>, handle: Option>, } From bbdd0899c934176f1083469e6618f5bb4bb5fc91 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 17 Jul 2020 17:33:24 +0200 Subject: [PATCH 137/359] parity-runtime: bump to v0.1.2 (#405) * parity-runtime: bump to v0.2.0 * Change release version to 0.1.2 instead of 0.2.0 --- runtime/CHANGELOG.md | 4 ++++ runtime/Cargo.toml | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/runtime/CHANGELOG.md b/runtime/CHANGELOG.md index e73fb6f28..7c02a7690 100644 --- a/runtime/CHANGELOG.md +++ b/runtime/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.1.2] - 2020-07-16 +### Changed +- Port runtime to tokio-compat. [#403](https://github.com/paritytech/parity-common/pull/403) + ## [0.1.1] - 2020-02-11 ### Changed - Moved to parity common repo, prepared for publishing. [#271](https://github.com/paritytech/parity-common/pull/271) diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 3a5867e8f..2ab8c8629 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-runtime" -version = "0.1.1" +version = "0.1.2" authors = ["Parity Technologies "] edition = "2018" @@ -18,4 +18,4 @@ tokio-compat = "0.1" tokio = { version = "0.2", features = ["full"] } [features] -test-helpers = [] \ No newline at end of file +test-helpers = [] From 83f5a9758b20fe1dcc69e2b840c24f60740f8437 Mon Sep 17 00:00:00 2001 From: Sascha Hanse Date: Mon, 27 Jul 2020 11:25:44 +0200 Subject: [PATCH 138/359] plain_hasher/uint: remove some unsafe code and don't assume endianness (#407) * patches * Apply suggestions from code review Co-authored-by: Andronik Ordian --- plain_hasher/src/lib.rs | 9 +++++---- uint/src/uint.rs | 19 ++++++++++++------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/plain_hasher/src/lib.rs b/plain_hasher/src/lib.rs index 4da4a508b..5d0ed87e4 100644 --- a/plain_hasher/src/lib.rs +++ b/plain_hasher/src/lib.rs @@ -30,17 +30,18 @@ impl Hasher for PlainHasher { fn write(&mut self, bytes: &[u8]) { debug_assert!(bytes.len() == 32); let mut bytes_ptr = bytes.as_ptr(); - let mut prefix_ptr = &mut self.prefix as *mut u64 as *mut u8; + let mut prefix_bytes = self.prefix.to_le_bytes(); unroll! { - for _i in 0..8 { + for i in 0..8 { unsafe { - *prefix_ptr ^= (*bytes_ptr ^ *bytes_ptr.offset(8)) ^ (*bytes_ptr.offset(16) ^ *bytes_ptr.offset(24)); + prefix_bytes[i] ^= (*bytes_ptr ^ *bytes_ptr.offset(8)) ^ (*bytes_ptr.offset(16) ^ *bytes_ptr.offset(24)); bytes_ptr = bytes_ptr.offset(1); - prefix_ptr = prefix_ptr.offset(1); } } } + + self.prefix = u64::from_le_bytes(prefix_bytes); } } diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 25419a7b0..6f5aa8af1 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1115,13 +1115,15 @@ macro_rules! construct_uint { /// Converts from big endian representation bytes in memory. pub fn from_big_endian(slice: &[u8]) -> Self { + use $crate::byteorder::{ByteOrder, BigEndian}; assert!($n_words * 8 >= slice.len()); + let mut padded = [0u8; $n_words * 8]; + padded[$n_words * 8 - slice.len() .. $n_words * 8].copy_from_slice(&slice); + let mut ret = [0; $n_words]; - unsafe { - let ret_u8: &mut [u8; $n_words * 8] = $crate::core_::mem::transmute(&mut ret); - ret_u8[0..slice.len()].copy_from_slice(slice); - ret_u8[0..slice.len()].reverse(); + for i in 0..$n_words { + ret[$n_words - i - 1] = BigEndian::read_u64(&padded[8 * i..]); } $name(ret) @@ -1129,12 +1131,15 @@ macro_rules! construct_uint { /// Converts from little endian representation bytes in memory. pub fn from_little_endian(slice: &[u8]) -> Self { + use $crate::byteorder::{ByteOrder, LittleEndian}; assert!($n_words * 8 >= slice.len()); + let mut padded = [0u8; $n_words * 8]; + padded[0..slice.len()].copy_from_slice(&slice); + let mut ret = [0; $n_words]; - unsafe { - let ret_u8: &mut [u8; $n_words * 8] = $crate::core_::mem::transmute(&mut ret); - ret_u8[0..slice.len()].copy_from_slice(&slice); + for i in 0..$n_words { + ret[i] = LittleEndian::read_u64(&padded[8 * i..]); } $name(ret) From 4a7afbb38e75d3d8407464dff401e0d1617785d1 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 27 Jul 2020 16:51:06 +0200 Subject: [PATCH 139/359] fix benches in uint and plain_hasher (#408) --- plain_hasher/benches/bench.rs | 6 +++--- uint/benches/bigint.rs | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/plain_hasher/benches/bench.rs b/plain_hasher/benches/bench.rs index e14d9d7d7..8e496d50d 100644 --- a/plain_hasher/benches/bench.rs +++ b/plain_hasher/benches/bench.rs @@ -9,14 +9,14 @@ use std::collections::hash_map::DefaultHasher; use std::hash::Hasher; -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; use plain_hasher::PlainHasher; fn bench_write_hasher(c: &mut Criterion) { c.bench_function("write_plain_hasher", |b| { b.iter(|| { (0..100u8).fold(PlainHasher::default(), |mut old, new| { - let bb = [new; 32]; + let bb = black_box([new; 32]); old.write(&bb); old }); @@ -25,7 +25,7 @@ fn bench_write_hasher(c: &mut Criterion) { c.bench_function("write_default_hasher", |b| { b.iter(|| { (0..100u8).fold(DefaultHasher::default(), |mut old, new| { - let bb = [new; 32]; + let bb = black_box([new; 32]); old.write(&bb); old }); diff --git a/uint/benches/bigint.rs b/uint/benches/bigint.rs index f6a8b1318..c7767c7ae 100644 --- a/uint/benches/bigint.rs +++ b/uint/benches/bigint.rs @@ -637,8 +637,8 @@ fn from_fixed_array(c: &mut Criterion) { [255, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 0, 0, 0, 0, 0, 213, 0, 0, 0, 0, 0, 0]; c.bench_function("from_fixed_array", move |b| { b.iter(|| { - let _: U512 = black_box(ary512.into()); - let _: U256 = black_box(ary256.into()); + let _: U512 = black_box(black_box(ary512).into()); + let _: U256 = black_box(black_box(ary256).into()); }) }); } From c3d4d3d637c324025cd910ef954175007c964c46 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 27 Jul 2020 18:21:27 +0200 Subject: [PATCH 140/359] kvdb-rocksdb: set format_version to 5 (#395) * kvdb-rocksdb: set format_version to 5 * Update kvdb-rocksdb/src/lib.rs Co-authored-by: David Co-authored-by: David --- kvdb-rocksdb/src/lib.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 2be3841c1..8b08ebb4a 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -339,6 +339,9 @@ fn generate_read_options() -> ReadOptions { fn generate_block_based_options(config: &DatabaseConfig) -> BlockBasedOptions { let mut block_opts = BlockBasedOptions::default(); block_opts.set_block_size(config.compaction.block_size); + // See https://github.com/facebook/rocksdb/blob/a1523efcdf2f0e8133b9a9f6e170a0dad49f928f/include/rocksdb/table.h#L246-L271 for details on what the format versions are/do. + block_opts.set_format_version(5); + block_opts.set_block_restart_interval(16); // Set cache size as recommended by // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#block-cache-size let cache_size = config.memory_budget() / 3; @@ -1111,7 +1114,7 @@ rocksdb.db.get.micros P50 : 2.000000 P95 : 3.000000 P99 : 4.000000 P100 : 5.0000 // Don't fsync every store assert!(settings.contains("Options.use_fsync: 0")); - // We're using the old format - assert!(settings.contains("format_version: 2")); + // We're using the new format + assert!(settings.contains("format_version: 5")); } } From b6cf4364d73f52ac3fe909bafeeb3ea645acf757 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 28 Jul 2020 10:07:45 +0200 Subject: [PATCH 141/359] plain_hasher: remove std feature and unsafe code (#410) * plain_hasher: remove unsafe and std feature, bump version * uint: update changelog * Update plain_hasher/src/lib.rs Co-authored-by: Nikolay Volf Co-authored-by: Nikolay Volf --- plain_hasher/CHANGELOG.md | 4 ++++ plain_hasher/Cargo.toml | 4 +--- plain_hasher/src/lib.rs | 9 ++------- uint/CHANGELOG.md | 1 + 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/plain_hasher/CHANGELOG.md b/plain_hasher/CHANGELOG.md index c3f142cfd..09fa2352d 100644 --- a/plain_hasher/CHANGELOG.md +++ b/plain_hasher/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.3.0] - 2020-07-27 +- Add support for big-endian platforms. [#407](https://github.com/paritytech/parity-common/pull/407) +- Remove unsafe code and `std` feature. [#410](https://github.com/paritytech/parity-common/pull/410) + ## [0.2.3] - 2020-03-16 - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/plain_hasher/Cargo.toml b/plain_hasher/Cargo.toml index bb5a1668d..f0c51c7c5 100644 --- a/plain_hasher/Cargo.toml +++ b/plain_hasher/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "plain_hasher" description = "Hasher for 32-byte keys." -version = "0.2.3" +version = "0.3.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" keywords = ["hash", "hasher"] @@ -16,8 +16,6 @@ crunchy = { version = "0.2.2", default-features = false } criterion = "0.3.0" [features] -default = ["std"] -std = ["crunchy/std"] [[bench]] name = "bench" diff --git a/plain_hasher/src/lib.rs b/plain_hasher/src/lib.rs index 5d0ed87e4..6084b449f 100644 --- a/plain_hasher/src/lib.rs +++ b/plain_hasher/src/lib.rs @@ -6,7 +6,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] use core::hash::Hasher; @@ -26,18 +26,13 @@ impl Hasher for PlainHasher { } #[inline] - #[allow(unused_assignments)] fn write(&mut self, bytes: &[u8]) { debug_assert!(bytes.len() == 32); - let mut bytes_ptr = bytes.as_ptr(); let mut prefix_bytes = self.prefix.to_le_bytes(); unroll! { for i in 0..8 { - unsafe { - prefix_bytes[i] ^= (*bytes_ptr ^ *bytes_ptr.offset(8)) ^ (*bytes_ptr.offset(16) ^ *bytes_ptr.offset(24)); - bytes_ptr = bytes_ptr.offset(1); - } + prefix_bytes[i] ^= (bytes[i] ^ bytes[i + 8]) ^ (bytes[i + 16] ^ bytes[i + 24]); } } diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 3445abef3..09ac8447c 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - Added a manual impl of `Eq` and `Hash`. [#390](https://github.com/paritytech/parity-common/pull/390) +- Remove some unsafe code and add big-endian support. [#407](https://github.com/paritytech/parity-common/pull/407) ## [0.8.3] - 2020-04-27 - Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) From 760447f0e3791a0222ec89a4b9698f2d94a5d972 Mon Sep 17 00:00:00 2001 From: Peter Goodspeed-Niklaus Date: Tue, 28 Jul 2020 10:08:00 +0200 Subject: [PATCH 142/359] Add automatic methods to KeyValueDB which test for the existence of certain objects (#409) Motivation: We have a use case in which we care about a large object's existence, but don't care to load megabytes of data from disc. We have only a `dyn KeyValueDB` handle available; whatever we use has to be part of the trait. By adding automatic implementations for the new methods, we avoid breaking existing code which is no worse than the existing strategy. Part 2 of this fix will be tracking down the concrete type in use and implementing specializations of those methods, so that we actually do save work, but that's dependent on this PR being merged. --- kvdb/src/lib.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 7cacff666..f4d553583 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -139,6 +139,16 @@ pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { fn io_stats(&self, _kind: IoStatsKind) -> IoStats { IoStats::empty() } + + /// Check for the existence of a value by key. + fn has_key(&self, col: u32, key: &[u8]) -> io::Result { + self.get(col, key).map(|opt| opt.is_some()) + } + + /// Check for the existence of a value by prefix. + fn has_prefix(&self, col: u32, prefix: &[u8]) -> bool { + self.get_by_prefix(col, prefix).is_some() + } } /// For a given start prefix (inclusive), returns the correct end prefix (non-inclusive). From 1c731dbb04d80c6b8e218b0f1f7ec1ae1d238e9b Mon Sep 17 00:00:00 2001 From: meehow Date: Tue, 28 Jul 2020 22:29:46 +0200 Subject: [PATCH 143/359] dependency upgrades to remove block-cipher-trait (#415) https://rustsec.org/advisories/RUSTSEC-2020-0018 --- parity-crypto/Cargo.toml | 6 +++--- parity-crypto/src/aes.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 74f564d7f..4df7a4522 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -14,9 +14,9 @@ harness = false required-features = ["publickey"] [dependencies] -aes = "0.3.2" -aes-ctr = "0.3.0" -block-modes = "0.3.3" +aes = "0.4.0" +aes-ctr = "0.4.0" +block-modes = "0.5.0" digest = "0.8" ethereum-types = { version = "0.9.0", optional = true, path = "../ethereum-types" } hmac = "0.7" diff --git a/parity-crypto/src/aes.rs b/parity-crypto/src/aes.rs index 120e4ef3f..3ba869958 100644 --- a/parity-crypto/src/aes.rs +++ b/parity-crypto/src/aes.rs @@ -6,7 +6,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use aes::block_cipher_trait::generic_array::GenericArray; +use aes::block_cipher::generic_array::GenericArray; use aes::{Aes128, Aes256}; use aes_ctr::stream_cipher::{NewStreamCipher, SyncStreamCipher}; use block_modes::{ From d3a2f9c1adb0742b3dc8d71568e6ba1f14eadd6c Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 3 Aug 2020 12:23:10 +0200 Subject: [PATCH 144/359] uint: Add checked_pow (#417) * uint: Add checked_pow Similar to how 025a0c1ea added `checked_{add,sub,mul,div,rem,neg}` add `checked_pow`. * uint/tests: Extend checked_pow tests - Port `pow` and `pow_overflow` tests. - Include overflow check in quickcheck suite. * CHANGELOG: Add entry * uint/tests: Add U256::MAX to the power of 2 test --- uint/CHANGELOG.md | 3 ++- uint/src/uint.rs | 8 ++++++++ uint/tests/uint_tests.rs | 12 ++++++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 09ac8447c..f2a023b29 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -6,7 +6,8 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - Added a manual impl of `Eq` and `Hash`. [#390](https://github.com/paritytech/parity-common/pull/390) -- Remove some unsafe code and add big-endian support. [#407](https://github.com/paritytech/parity-common/pull/407) +- Removed some unsafe code and add big-endian support. [#407](https://github.com/paritytech/parity-common/pull/407) +- Added `checked_pow`. [#417](https://github.com/paritytech/parity-common/pull/417) ## [0.8.3] - 2020-04-27 - Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 6f5aa8af1..9b50d6c7c 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -899,6 +899,14 @@ macro_rules! construct_uint { (res, overflow) } + /// Checked exponentiation. Returns `None` if overflow occurred. + pub fn checked_pow(self, expon: $name) -> Option<$name> { + match self.overflowing_pow(expon) { + (_, true) => None, + (val, _) => Some(val), + } + } + /// Add with overflow. #[inline(always)] pub fn overflowing_add(self, other: $name) -> ($name, bool) { diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index f01051586..973300449 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -56,6 +56,14 @@ fn uint256_checked_ops() { let a = U256::from(10); let b = !U256::from(1); + assert_eq!(U256::from(10).checked_pow(U256::from(0)), Some(U256::from(1))); + assert_eq!(U256::from(10).checked_pow(U256::from(1)), Some(U256::from(10))); + assert_eq!(U256::from(10).checked_pow(U256::from(2)), Some(U256::from(100))); + assert_eq!(U256::from(10).checked_pow(U256::from(3)), Some(U256::from(1000))); + assert_eq!(U256::from(10).checked_pow(U256::from(20)), Some(U256::exp10(20))); + assert_eq!(U256::from(2).checked_pow(U256::from(0x100)), None); + assert_eq!(U256::max_value().checked_pow(U256::from(2)), None); + assert_eq!(a.checked_add(b), None); assert_eq!(a.checked_add(a), Some(20.into())); @@ -1185,6 +1193,10 @@ pub mod laws { quickcheck! { fn pow_mul(x: $uint_ty) -> TestResult { if x.overflowing_pow($uint_ty::from(2)).1 || x.overflowing_pow($uint_ty::from(3)).1 { + // On overflow `checked_pow` should return `None`. + assert_eq!(x.checked_pow($uint_ty::from(2)), None); + assert_eq!(x.checked_pow($uint_ty::from(3)), None); + return TestResult::discard(); } From 1880a8ba6c4a0389e4401e2f461679add1eb12b4 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 3 Aug 2020 13:37:52 +0200 Subject: [PATCH 145/359] uint: bump minor version (#418) --- uint/CHANGELOG.md | 4 +++- uint/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index f2a023b29..97c0a091a 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -5,8 +5,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.8.4] - 2020-08-03 - Added a manual impl of `Eq` and `Hash`. [#390](https://github.com/paritytech/parity-common/pull/390) -- Removed some unsafe code and add big-endian support. [#407](https://github.com/paritytech/parity-common/pull/407) +- Removed some unsafe code and added big-endian support. [#407](https://github.com/paritytech/parity-common/pull/407) - Added `checked_pow`. [#417](https://github.com/paritytech/parity-common/pull/417) ## [0.8.3] - 2020-04-27 diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 274b50692..690582bb3 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -4,7 +4,7 @@ homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" name = "uint" -version = "0.8.3" +version = "0.8.4" authors = ["Parity Technologies "] readme = "README.md" edition = "2018" From 0431acb4f34751af44c664b0b0a6f36b0cd147b3 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 4 Aug 2020 15:14:39 +0200 Subject: [PATCH 146/359] travis: pin geckodriver in wasm-pack tests (#419) * travis: pin geckodriver and enable chrome tests * travis: use same workaround for linux * travis: disable chrome --- .travis.yml | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index ef5f3a01a..5950b71dc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,20 +21,17 @@ matrix: - os: osx osx_image: xcode11.3 addons: - chrome: stable firefox: latest - install: - - curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | sh - - source ~/.nvm/nvm.sh - - nvm install --lts - - npm install -g chromedriver - - curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh - - which chromedriver rust: stable allow_failures: - rust: nightly install: + - curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | sh + - source ~/.nvm/nvm.sh + - nvm install --lts + - npm install -g geckodriver@1.19.1 # https://github.com/rustwasm/wasm-bindgen/issues/2261 - curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + - which geckodriver script: - if [ "$TRAVIS_OS_NAME" == "linux" ]; then cargo fmt -- --check; From c36aeadc2f80955b8920cc1da59a962b1f35eec5 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 25 Aug 2020 12:22:08 +0200 Subject: [PATCH 147/359] uint: make const matching work again (#421) * uint: revert #390 partially * uint: add a regression test * uint: version bump * tomorrow, maybe? --- uint/CHANGELOG.md | 3 +++ uint/Cargo.toml | 2 +- uint/src/uint.rs | 25 ++++--------------------- uint/tests/uint_tests.rs | 10 ++++++++++ 4 files changed, 18 insertions(+), 22 deletions(-) diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 97c0a091a..b856d52fc 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.8.5] - 2020-08-12 +- Make const matching work again. [#421](https://github.com/paritytech/parity-common/pull/421) + ## [0.8.4] - 2020-08-03 - Added a manual impl of `Eq` and `Hash`. [#390](https://github.com/paritytech/parity-common/pull/390) - Removed some unsafe code and added big-endian support. [#407](https://github.com/paritytech/parity-common/pull/407) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 690582bb3..f88d9b38b 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -4,7 +4,7 @@ homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" name = "uint" -version = "0.8.4" +version = "0.8.5" authors = ["Parity Technologies "] readme = "README.md" edition = "2018" diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 9b50d6c7c..3715afbe5 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -135,7 +135,7 @@ macro_rules! uint_overflowing_binop { } ($name(ret), carry > 0) - }}; + }}; } #[macro_export] @@ -244,7 +244,7 @@ macro_rules! panic_on_overflow { ($name: expr) => { if $name { panic!("arithmetic operation overflow") - } + } }; } @@ -445,7 +445,7 @@ macro_rules! construct_uint { /// Little-endian large integer type #[repr(C)] $(#[$attr])* - #[derive(Copy, Clone)] + #[derive(Copy, Clone, Eq, PartialEq, Hash)] $visibility struct $name (pub [u64; $n_words]); /// Get a reference to the underlying little-endian words. @@ -1475,26 +1475,9 @@ macro_rules! construct_uint { } } - // We implement `Eq` and `Hash` manually to workaround - // https://github.com/rust-lang/rust/issues/61415 - impl $crate::core_::cmp::PartialEq for $name { - fn eq(&self, other: &$name) -> bool { - self.as_ref() == other.as_ref() - } - } - - impl $crate::core_::cmp::Eq for $name {} - - impl $crate::core_::hash::Hash for $name { - fn hash(&self, state: &mut H) { - // use the impl as slice &[u64] - self.as_ref().hash(state); - } - } - impl $crate::core_::cmp::Ord for $name { fn cmp(&self, other: &$name) -> $crate::core_::cmp::Ordering { - self.as_ref().iter().rev().cmp(other.as_ref().iter().rev()) + self.as_ref().iter().rev().cmp(other.as_ref().iter().rev()) } } diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 973300449..578353e78 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -40,6 +40,16 @@ fn hash_impl_is_the_same_as_for_a_slice() { assert_eq!(uint_hash, slice_hash); } +// https://github.com/paritytech/parity-common/issues/420 +#[test] +fn const_matching_works() { + const ONE: U256 = U256([1, 0, 0, 0]); + match U256::zero() { + ONE => unreachable!(), + _ => {} + } +} + #[test] fn u128_conversions() { let mut a = U256::from(u128::max_value()); From bc97844dbced3c2e818f3810ebe7eb0d4cc05f24 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 25 Aug 2020 12:22:55 +0200 Subject: [PATCH 148/359] uint/src/uint.rs: Fix doc comment typo (#423) --- uint/src/uint.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 3715afbe5..fc6f93dc4 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -597,7 +597,7 @@ macro_rules! construct_uint { r } - /// Returns the number of leading zeros in the binary representation of self. + /// Returns the number of trailing zeros in the binary representation of self. pub fn trailing_zeros(&self) -> u32 { let mut r = 0; for i in 0..$n_words { From 8137204f82266799fa441b6670576001b440617e Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 26 Aug 2020 12:05:55 +0200 Subject: [PATCH 149/359] kvdb-rocksdb: bump rocksdb, version, update changelog (#424) * kvdb-rocksdb: bump rocksdb, version, update changelog * Update kvdb-rocksdb/CHANGELOG.md --- kvdb-rocksdb/CHANGELOG.md | 4 ++++ kvdb-rocksdb/Cargo.toml | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 5f157347f..acf6282fb 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.1] - 2020-08-26 +- Updated rocksdb to 0.15. [#424](https://github.com/paritytech/parity-common/pull/424) +- Set `format_version` to 5. [#395](https://github.com/paritytech/parity-common/pull/395) + ## [0.9.0] - 2020-06-24 - Updated `kvdb` to 0.7. [#402](https://github.com/paritytech/parity-common/pull/402) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index da466c469..e352ba0b2 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.9.0" +version = "0.9.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -19,7 +19,7 @@ log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.10.0" regex = "1.3.1" -rocksdb = { version = "0.14", features = ["snappy"], default-features = false } +rocksdb = { version = "0.15", features = ["snappy"], default-features = false } owning_ref = "0.4.0" parity-util-mem = { path = "../parity-util-mem", version = "0.7", default-features = false, features = ["std", "smallvec"] } From adc85bf161a14f171bf1713bf66a09d311212f9c Mon Sep 17 00:00:00 2001 From: Valentin Date: Wed, 26 Aug 2020 14:55:01 +0200 Subject: [PATCH 150/359] Fix panic in FromStr implementation (#425) * Remove duplicate tests * Fix panic in FromStr implementation The current implementation panics if the hex is too strong in `from_big_endian`. --- uint/src/uint.rs | 4 ++ uint/tests/uint_tests.rs | 81 ++-------------------------------------- 2 files changed, 7 insertions(+), 78 deletions(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index fc6f93dc4..a926129ce 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1573,6 +1573,10 @@ macro_rules! impl_std_for_uint { false => ("0".to_owned() + value).from_hex()?, }; + if $n_words * 8 < bytes.len() { + return Err(Self::Err::InvalidHexLength); + } + let bytes_ref: &[u8] = &bytes; Ok(From::from(bytes_ref)) } diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 578353e78..9319b40e1 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -136,90 +136,15 @@ fn uint256_from() { assert_eq!(e, sa); assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); - assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); - assert_eq!( - U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() - ); - let sa = U256::from_str("0a").unwrap(); - assert_eq!(e, sa); - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_str("0100000000000012f0").unwrap()); assert_eq!( U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), U256::from_str("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").unwrap() ); + + // This string contains more bits than what fits in a U256. + assert!(U256::from_str("000000000000000000000000000000000000000000000000000000000000000000").is_err()); } #[test] From 7ac4a9431a27c02793bd0de70d2986904977a34d Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 26 Aug 2020 17:43:40 +0200 Subject: [PATCH 151/359] kvdb-rocksdb: do not use deprecated cache option (#426) --- kvdb-rocksdb/src/lib.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 8b08ebb4a..d4da1da57 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -336,7 +336,7 @@ fn generate_read_options() -> ReadOptions { } /// Generate the block based options for RocksDB, based on the given `DatabaseConfig`. -fn generate_block_based_options(config: &DatabaseConfig) -> BlockBasedOptions { +fn generate_block_based_options(config: &DatabaseConfig) -> io::Result { let mut block_opts = BlockBasedOptions::default(); block_opts.set_block_size(config.compaction.block_size); // See https://github.com/facebook/rocksdb/blob/a1523efcdf2f0e8133b9a9f6e170a0dad49f928f/include/rocksdb/table.h#L246-L271 for details on what the format versions are/do. @@ -348,7 +348,8 @@ fn generate_block_based_options(config: &DatabaseConfig) -> BlockBasedOptions { if cache_size == 0 { block_opts.disable_cache() } else { - block_opts.set_lru_cache(cache_size); + let cache = rocksdb::Cache::new_lru_cache(cache_size).map_err(other_io_err)?; + block_opts.set_block_cache(&cache); // "index and filter blocks will be stored in block cache, together with all other data blocks." // See: https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#indexes-and-filter-blocks block_opts.set_cache_index_and_filter_blocks(true); @@ -357,7 +358,7 @@ fn generate_block_based_options(config: &DatabaseConfig) -> BlockBasedOptions { } block_opts.set_bloom_filter(10, true); - block_opts + Ok(block_opts) } impl Database { @@ -372,7 +373,7 @@ impl Database { assert!(config.columns > 0, "the number of columns must not be zero"); let opts = generate_options(config); - let block_opts = generate_block_based_options(config); + let block_opts = generate_block_based_options(config)?; // attempt database repair if it has been previously marked as corrupted let db_corrupted = Path::new(path).join(Database::CORRUPTION_FILE_NAME); From c6fb6444cb9031163a3a1d68d9bb17bcca0f1e2e Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Thu, 10 Sep 2020 21:40:46 +0300 Subject: [PATCH 152/359] rlp: impl Encodable for Box (#427) * rlp: impl Encodable for Box * Add Decodable impl * Add changelog entry * Add Box import for alloc --- rlp/CHANGELOG.md | 1 + rlp/src/impls.rs | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index cee20902d..ca1dd05e7 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Implement Encodable, Decodable for boxed types. [#427](https://github.com/paritytech/parity-common/pull/427) ## [0.4.5] - 2020-03-16 ### Dependencies diff --git a/rlp/src/impls.rs b/rlp/src/impls.rs index c4815019f..f9d2f25d9 100644 --- a/rlp/src/impls.rs +++ b/rlp/src/impls.rs @@ -7,7 +7,7 @@ // except according to those terms. #[cfg(not(feature = "std"))] -use alloc::{borrow::ToOwned, string::String, vec::Vec}; +use alloc::{borrow::ToOwned, boxed::Box, string::String, vec::Vec}; use core::iter::{empty, once}; use core::{mem, str}; @@ -33,6 +33,18 @@ pub fn decode_usize(bytes: &[u8]) -> Result { } } +impl Encodable for Box { + fn rlp_append(&self, s: &mut RlpStream) { + Encodable::rlp_append(&**self, s) + } +} + +impl Decodable for Box { + fn decode(rlp: &Rlp) -> Result { + T::decode(rlp).map(Box::new) + } +} + impl Encodable for bool { fn rlp_append(&self, s: &mut RlpStream) { s.encoder().encode_iter(once(if *self { 1u8 } else { 0 })); From 28144173846143aac858307cd60cb8dcc58e3f0b Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 29 Sep 2020 16:15:55 +0200 Subject: [PATCH 153/359] bump rlp to 0.4.6 (#430) --- rlp/CHANGELOG.md | 2 ++ rlp/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index ca1dd05e7..0eff86727 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.4.6] - 2020-09-29 - Implement Encodable, Decodable for boxed types. [#427](https://github.com/paritytech/parity-common/pull/427) ## [0.4.5] - 2020-03-16 diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index cb8694c9e..1c69ab875 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rlp" -version = "0.4.5" +version = "0.4.6" description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" From 9d80e9ba773b87d8282d7456cf511eb5ff61b7b7 Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Thu, 15 Oct 2020 22:04:39 +0300 Subject: [PATCH 154/359] rlp: u128 support (#431) * rlp: u128 support * add u128 decode test --- rlp/src/impls.rs | 2 ++ rlp/tests/tests.rs | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/rlp/src/impls.rs b/rlp/src/impls.rs index f9d2f25d9..02663349e 100644 --- a/rlp/src/impls.rs +++ b/rlp/src/impls.rs @@ -170,10 +170,12 @@ macro_rules! impl_decodable_for_u { impl_encodable_for_u!(u16); impl_encodable_for_u!(u32); impl_encodable_for_u!(u64); +impl_encodable_for_u!(u128); impl_decodable_for_u!(u16); impl_decodable_for_u!(u32); impl_decodable_for_u!(u64); +impl_decodable_for_u!(u128); impl Encodable for usize { fn rlp_append(&self, s: &mut RlpStream) { diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index ac8e8d951..75257d6e3 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -179,6 +179,16 @@ fn encode_u64() { run_encode_tests(tests); } +#[test] +fn encode_u128() { + let tests = vec![ + ETestPair(0u128, vec![0x80u8]), + ETestPair(0x0100_0000_0000_0000, vec![0x88, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + ETestPair(0xFFFF_FFFF_FFFF_FFFF, vec![0x88, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + ]; + run_encode_tests(tests); +} + #[test] fn encode_u256() { let tests = vec![ @@ -334,6 +344,15 @@ fn decode_untrusted_u64() { run_decode_tests(tests); } +#[test] +fn decode_untrusted_u128() { + let tests = vec![ + DTestPair(0x0100_0000_0000_0000u128, vec![0x88, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + DTestPair(0xFFFF_FFFF_FFFF_FFFFu128, vec![0x88, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + ]; + run_decode_tests(tests); +} + #[test] fn decode_untrusted_u256() { let tests = vec![ From 0b190c65452866bad54a8ed187a89ccc403a95e1 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 23 Oct 2020 07:18:31 -0400 Subject: [PATCH 155/359] Implement `RlpIterator` for `ExactSizedIterator` (#433) --- rlp/src/rlpin.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/rlp/src/rlpin.rs b/rlp/src/rlpin.rs index 319723c06..5ffaa89b7 100644 --- a/rlp/src/rlpin.rs +++ b/rlp/src/rlpin.rs @@ -369,6 +369,12 @@ impl<'a, 'view> Iterator for RlpIterator<'a, 'view> { } } +impl<'a, 'view> ExactSizeIterator for RlpIterator<'a, 'view> { + fn len(&self) -> usize { + self.rlp.item_count().unwrap_or(0) + } +} + pub struct BasicDecoder<'a> { rlp: &'a [u8], } From fd1b1787462abce4e7c95528153683575ac7349b Mon Sep 17 00:00:00 2001 From: Valentin Date: Mon, 26 Oct 2020 10:48:44 +0100 Subject: [PATCH 156/359] fixed-hash: Remove allocation in FromStr (#435) --- fixed-hash/src/hash.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 972b750b4..a971b4136 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -588,14 +588,15 @@ macro_rules! impl_rustc_hex_for_fixed_hash { /// - When encountering invalid non hex-digits /// - Upon empty string input or invalid input length in general fn from_str(input: &str) -> $crate::core_::result::Result<$name, $crate::rustc_hex::FromHexError> { - #[cfg(not(feature = "std"))] - use $crate::alloc_::vec::Vec; - use $crate::rustc_hex::FromHex; - let bytes: Vec = input.from_hex()?; - if bytes.len() != Self::len_bytes() { - return Err($crate::rustc_hex::FromHexError::InvalidHexLength); + let mut iter = $crate::rustc_hex::FromHexIter::new(input); + let mut result = Self::zero(); + for byte in result.as_mut() { + *byte = iter.next().ok_or(Self::Err::InvalidHexLength)??; } - Ok($name::from_slice(&bytes)) + if iter.next().is_some() { + return Err(Self::Err::InvalidHexLength); + } + Ok(result) } } }; From a19cf1f517732f458c47d7a8c4ae3b6e19912311 Mon Sep 17 00:00:00 2001 From: Drew Stone Date: Tue, 27 Oct 2020 18:16:42 -0400 Subject: [PATCH 157/359] Add H128 type (#434) * Add H128 type * Update lib.rs --- primitive-types/src/lib.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index ad5c48045..4746343f0 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -43,6 +43,12 @@ construct_uint! { pub struct U512(8); } +construct_fixed_hash! { + /// Fixed-size uninterpreted hash type with 16 bytes (128 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] + pub struct H128(16); +} + construct_fixed_hash! { /// Fixed-size uninterpreted hash type with 20 bytes (160 bits) size. #[cfg_attr(feature = "scale-info", derive(TypeInfo))] @@ -68,6 +74,7 @@ mod serde { impl_uint_serde!(U256, 4); impl_uint_serde!(U512, 8); + impl_fixed_hash_serde!(H128, 16); impl_fixed_hash_serde!(H160, 20); impl_fixed_hash_serde!(H256, 32); impl_fixed_hash_serde!(H512, 64); @@ -82,6 +89,7 @@ mod codec { impl_uint_codec!(U256, 4); impl_uint_codec!(U512, 8); + impl_fixed_hash_codec!(H128, 16); impl_fixed_hash_codec!(H160, 20); impl_fixed_hash_codec!(H256, 32); impl_fixed_hash_codec!(H512, 64); @@ -96,6 +104,7 @@ mod rlp { impl_uint_rlp!(U256, 4); impl_uint_rlp!(U512, 8); + impl_fixed_hash_rlp!(H128, 16); impl_fixed_hash_rlp!(H160, 20); impl_fixed_hash_rlp!(H256, 32); impl_fixed_hash_rlp!(H512, 64); From ac082b501416e0a7f09fedeb91fe1fe9167bb733 Mon Sep 17 00:00:00 2001 From: Benjamin Smith Date: Thu, 29 Oct 2020 13:44:59 +0100 Subject: [PATCH 158/359] Add conversions U256 <-> f64 (#436) * add conversions U256 <-> f64 * cargo fmt * implement to and from f64 lossy for U256 as suggested * use std library for powi * move conversion to own module as a feature * include feature test in TOML file as suggested --- primitive-types/Cargo.toml | 5 ++ primitive-types/src/fp_conversion.rs | 43 +++++++++++++++ primitive-types/src/lib.rs | 3 + primitive-types/tests/fp_conversion.rs | 76 ++++++++++++++++++++++++++ 4 files changed, 127 insertions(+) create mode 100644 primitive-types/src/fp_conversion.rs create mode 100644 primitive-types/tests/fp_conversion.rs diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 30ba14ec2..0a3f287db 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -25,7 +25,12 @@ serde_no_std = ["impl-serde"] codec = ["impl-codec"] rlp = ["impl-rlp"] arbitrary = ["fixed-hash/arbitrary", "uint/arbitrary"] +fp-conversion = ["std"] [[test]] name = "scale_info" required-features = ["scale-info"] + +[[test]] +name = "fp_conversion" +required-features = ["fp-conversion"] diff --git a/primitive-types/src/fp_conversion.rs b/primitive-types/src/fp_conversion.rs new file mode 100644 index 000000000..cfc51d279 --- /dev/null +++ b/primitive-types/src/fp_conversion.rs @@ -0,0 +1,43 @@ +use super::U256; + +impl U256 { + /// Lossy saturating conversion from a `f64` to a `U256`. Like for floating point to + /// primitive integer type conversions, this truncates fractional parts. + /// + /// The conversion follows the same rules as converting `f64` to other + /// primitive integer types. Namely, the conversion of `value: f64` behaves as + /// follows: + /// - `NaN` => `0` + /// - `(-∞, 0]` => `0` + /// - `(0, u256::MAX]` => `value as u256` + /// - `(u256::MAX, +∞)` => `u256::MAX` + pub fn from_f64_lossy(value: f64) -> U256 { + if value >= 1.0 { + let bits = value.to_bits(); + // NOTE: Don't consider the sign or check that the subtraction will + // underflow since we already checked that the value is greater + // than 1.0. + let exponent = ((bits >> 52) & 0x7ff) - 1023; + let mantissa = (bits & 0x0f_ffff_ffff_ffff) | 0x10_0000_0000_0000; + if exponent <= 52 { + U256::from(mantissa >> (52 - exponent)) + } else if exponent >= 256 { + U256::MAX + } else { + U256::from(mantissa) << U256::from(exponent - 52) + } + } else { + 0.into() + } + } + + /// Lossy conversion of `U256` to `f64`. + pub fn to_f64_lossy(self) -> f64 { + let (res, factor) = match self { + U256([_, _, 0, 0]) => (self, 1.0), + U256([_, _, _, 0]) => (self >> 64, 2.0f64.powi(64)), + U256([_, _, _, _]) => (self >> 128, 2.0f64.powi(128)), + }; + (res.low_u128() as f64) * factor + } +} diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index 4746343f0..fe1eb4ac7 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -14,6 +14,9 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "fp-conversion")] +mod fp_conversion; + use core::convert::TryFrom; use fixed_hash::{construct_fixed_hash, impl_fixed_hash_conversions}; #[cfg(feature = "scale-info")] diff --git a/primitive-types/tests/fp_conversion.rs b/primitive-types/tests/fp_conversion.rs new file mode 100644 index 000000000..ef3112edb --- /dev/null +++ b/primitive-types/tests/fp_conversion.rs @@ -0,0 +1,76 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Testing to and from f64 lossy for U256 primitive type. + +use primitive_types::U256; + +#[test] +#[allow(clippy::float_cmp)] +fn convert_u256_to_f64() { + assert_eq!(U256::from(0).to_f64_lossy(), 0.0); + assert_eq!(U256::from(42).to_f64_lossy(), 42.0); + assert_eq!(U256::from(1_000_000_000_000_000_000u128).to_f64_lossy(), 1_000_000_000_000_000_000.0,); +} + +#[test] +#[allow(clippy::excessive_precision, clippy::float_cmp, clippy::unreadable_literal)] +#[cfg(feature = "std")] +fn convert_u256_to_f64_precision_loss() { + assert_eq!(U256::from(u64::max_value()).to_f64_lossy(), u64::max_value() as f64,); + assert_eq!( + U256::MAX.to_f64_lossy(), + 115792089237316195423570985008687907853269984665640564039457584007913129639935.0, + ); + assert_eq!( + U256::MAX.to_f64_lossy(), + 115792089237316200000000000000000000000000000000000000000000000000000000000000.0, + ); +} + +#[test] +fn convert_f64_to_u256() { + assert_eq!(U256::from_f64_lossy(0.0), 0.into()); + assert_eq!(U256::from_f64_lossy(13.37), 13.into()); + assert_eq!(U256::from_f64_lossy(42.0), 42.into()); + assert_eq!(U256::from_f64_lossy(999.999), 999.into()); + assert_eq!(U256::from_f64_lossy(1_000_000_000_000_000_000.0), 1_000_000_000_000_000_000u128.into(),); +} + +#[test] +fn convert_f64_to_u256_large() { + let value = U256::from(1) << U256::from(255); + assert_eq!(U256::from_f64_lossy(format!("{}", value).parse::().expect("unexpected error parsing f64")), value); +} + +#[test] +#[allow(clippy::unreadable_literal)] +fn convert_f64_to_u256_overflow() { + assert_eq!( + U256::from_f64_lossy(115792089237316200000000000000000000000000000000000000000000000000000000000000.0), + U256::MAX, + ); + assert_eq!( + U256::from_f64_lossy(999999999999999999999999999999999999999999999999999999999999999999999999999999.0), + U256::MAX, + ); +} + +#[test] +fn convert_f64_to_u256_non_normal() { + assert_eq!(U256::from_f64_lossy(f64::EPSILON), 0.into()); + assert_eq!(U256::from_f64_lossy(f64::from_bits(0)), 0.into()); + assert_eq!(U256::from_f64_lossy(f64::NAN), 0.into()); + assert_eq!(U256::from_f64_lossy(f64::NEG_INFINITY), 0.into()); + assert_eq!(U256::from_f64_lossy(f64::INFINITY), U256::MAX); +} + +#[test] +fn f64_to_u256_truncation() { + assert_eq!(U256::from_f64_lossy(10.5), 10.into()); +} From 539a279a525ba7fdfce7abdeb8b04e1e8fd204e0 Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Sat, 14 Nov 2020 01:51:46 +0300 Subject: [PATCH 159/359] Bump secp256k1 to 0.19, use global context (#438) * Bump secp256k1 to 0.19, use global context * fmt * Remove zero-sig infrastructure, fix tests * Update changelog --- parity-crypto/CHANGELOG.md | 1 + parity-crypto/Cargo.toml | 2 +- parity-crypto/src/publickey/ec_math_utils.rs | 3 +- parity-crypto/src/publickey/ecdh.rs | 2 +- .../src/publickey/ecdsa_signature.rs | 68 +++++-------------- parity-crypto/src/publickey/extended_keys.rs | 6 +- parity-crypto/src/publickey/keypair.rs | 4 +- .../src/publickey/keypair_generator.rs | 3 +- parity-crypto/src/publickey/mod.rs | 35 ++-------- 9 files changed, 38 insertions(+), 86 deletions(-) diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index 893082e82..19e2174e4 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Bump `rust-secp256k1` to v0.19, remove infrastructure for handling zero signatures (breaking). [#438](https://github.com/paritytech/parity-common/pull/438) ## [0.6.2] - 2020-06-19 - Put `Secret` memory on heap. [#400](https://github.com/paritytech/parity-common/pull/400) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 4df7a4522..3c929998e 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -26,7 +26,7 @@ rand = "0.7.2" ripemd160 = "0.8.0" rustc-hex = { version = "2.1.0", default-features = false, optional = true } scrypt = { version = "0.2.0", default-features = false } -secp256k1 = { version = "0.17.2", optional = true, features = ["recovery", "rand-std"] } +secp256k1 = { version = "0.19", optional = true, features = ["global-context", "recovery", "rand-std"] } sha2 = "0.8.0" subtle = "2.2.1" tiny-keccak = { version = "2.0", features = ["keccak"] } diff --git a/parity-crypto/src/publickey/ec_math_utils.rs b/parity-crypto/src/publickey/ec_math_utils.rs index d9dd7e2e4..af2e2bf96 100644 --- a/parity-crypto/src/publickey/ec_math_utils.rs +++ b/parity-crypto/src/publickey/ec_math_utils.rs @@ -8,11 +8,12 @@ //! Multiple primitives for work with public and secret keys and with secp256k1 curve points -use super::{Error, Public, Secret, SECP256K1}; +use super::{Error, Public, Secret}; use ethereum_types::{BigEndianHash as _, H256, U256}; use lazy_static::lazy_static; use secp256k1::constants::CURVE_ORDER as SECP256K1_CURVE_ORDER; use secp256k1::key; +use secp256k1::SECP256K1; /// Generation point array combined from X and Y coordinates /// Equivalent to uncompressed form, see https://tools.ietf.org/id/draft-jivsov-ecc-compact-05.html#rfc.section.3 diff --git a/parity-crypto/src/publickey/ecdh.rs b/parity-crypto/src/publickey/ecdh.rs index f5890a207..a44eaabd3 100644 --- a/parity-crypto/src/publickey/ecdh.rs +++ b/parity-crypto/src/publickey/ecdh.rs @@ -21,7 +21,7 @@ pub fn agree(secret: &Secret, public: &Public) -> Result { let publ = key::PublicKey::from_slice(&pdata)?; let sec = key::SecretKey::from_slice(secret.as_bytes())?; - let shared = ecdh::SharedSecret::new_with_hash(&publ, &sec, |x, _| x.into())?; + let shared = ecdh::SharedSecret::new_with_hash(&publ, &sec, |x, _| x.into()); Secret::import_key(&shared[0..32]).map_err(|_| Error::Secp(secp256k1::Error::InvalidSecretKey)) } diff --git a/parity-crypto/src/publickey/ecdsa_signature.rs b/parity-crypto/src/publickey/ecdsa_signature.rs index 7968967a7..b7d924c6c 100644 --- a/parity-crypto/src/publickey/ecdsa_signature.rs +++ b/parity-crypto/src/publickey/ecdsa_signature.rs @@ -8,19 +8,21 @@ //! Signature based on ECDSA, algorithm's description: https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm -use super::{public_to_address, Address, Error, Message, Public, Secret, ZeroesAllowedMessage, SECP256K1}; +use super::{public_to_address, Address, Error, Message, Public, Secret}; use ethereum_types::{H256, H520}; use rustc_hex::{FromHex, ToHex}; -use secp256k1::key::{PublicKey, SecretKey}; use secp256k1::{ + key::{PublicKey, SecretKey}, recovery::{RecoverableSignature, RecoveryId}, - Error as SecpError, Message as SecpMessage, + Error as SecpError, Message as SecpMessage, SECP256K1, +}; +use std::{ + cmp::PartialEq, + fmt, + hash::{Hash, Hasher}, + ops::{Deref, DerefMut}, + str::FromStr, }; -use std::cmp::PartialEq; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::ops::{Deref, DerefMut}; -use std::str::FromStr; /// Signature encoded as RSV components #[repr(C)] @@ -254,48 +256,14 @@ pub fn recover(signature: &Signature, message: &Message) -> Result Result { - let rsig = RecoverableSignature::from_compact(&signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; - let pubkey = &SECP256K1.recover(&message.into(), &rsig)?; - let serialized = pubkey.serialize_uncompressed(); - let mut public = Public::zero(); - public.as_bytes_mut().copy_from_slice(&serialized[1..65]); - Ok(public) -} - #[cfg(test)] mod tests { - use super::super::{Generator, Message, Random, SECP256K1}; use super::{ - recover, recover_allowing_all_zero_message, sign, verify_address, verify_public, Secret, Signature, - ZeroesAllowedMessage, + super::{Generator, Message, Random}, + recover, sign, verify_address, verify_public, Signature, }; - use secp256k1::SecretKey; use std::str::FromStr; - // Copy of `sign()` that allows signing all-zero Messages. - // Note: this is for *tests* only. DO NOT USE UNLESS YOU NEED IT. - fn sign_zero_message(secret: &Secret) -> Signature { - let context = &SECP256K1; - let sec = SecretKey::from_slice(secret.as_ref()).unwrap(); - // force an all-zero message into a secp `Message` bypassing the validity check. - let zero_msg = ZeroesAllowedMessage(Message::zero()); - let s = context.sign_recoverable(&zero_msg.into(), &sec); - let (rec_id, data) = s.serialize_compact(); - let mut data_arr = [0; 65]; - - // no need to check if s is low, it always is - data_arr[0..64].copy_from_slice(&data[0..64]); - data_arr[64] = rec_id.to_i32() as u8; - Signature(data_arr) - } - #[test] fn vrs_conversion() { // given @@ -330,19 +298,19 @@ mod tests { } #[test] - fn sign_and_recover_public_fails_with_zeroed_messages() { + fn sign_and_recover_public_works_with_zeroed_messages() { let keypair = Random.generate(); - let signature = sign_zero_message(keypair.secret()); + let signature = sign(keypair.secret(), &Message::zero()).unwrap(); let zero_message = Message::zero(); - assert!(&recover(&signature, &zero_message).is_err()); + assert_eq!(keypair.public(), &recover(&signature, &zero_message).unwrap()); } #[test] fn recover_allowing_all_zero_message_can_recover_from_all_zero_messages() { let keypair = Random.generate(); - let signature = sign_zero_message(keypair.secret()); - let zero_message = ZeroesAllowedMessage(Message::zero()); - assert_eq!(keypair.public(), &recover_allowing_all_zero_message(&signature, zero_message).unwrap()) + let signature = sign(keypair.secret(), &Message::zero()).unwrap(); + let zero_message = Message::zero(); + assert_eq!(keypair.public(), &recover(&signature, &zero_message).unwrap()) } #[test] diff --git a/parity-crypto/src/publickey/extended_keys.rs b/parity-crypto/src/publickey/extended_keys.rs index d83aa6a37..adc4f3862 100644 --- a/parity-crypto/src/publickey/extended_keys.rs +++ b/parity-crypto/src/publickey/extended_keys.rs @@ -202,11 +202,13 @@ impl ExtendedKeyPair { // https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki mod derivation { use super::super::ec_math_utils::CURVE_ORDER; - use super::super::SECP256K1; use super::{Derivation, Label}; use crate::{hmac, Keccak256}; use ethereum_types::{BigEndianHash, H256, H512, U256, U512}; - use secp256k1::key::{PublicKey, SecretKey}; + use secp256k1::{ + key::{PublicKey, SecretKey}, + SECP256K1, + }; use std::convert::TryInto; #[derive(Debug)] diff --git a/parity-crypto/src/publickey/keypair.rs b/parity-crypto/src/publickey/keypair.rs index 330316012..dbbf637dc 100644 --- a/parity-crypto/src/publickey/keypair.rs +++ b/parity-crypto/src/publickey/keypair.rs @@ -8,9 +8,9 @@ //! Key pair (public + secret) description. -use super::{Address, Error, Public, Secret, SECP256K1}; +use super::{Address, Error, Public, Secret}; use crate::Keccak256; -use secp256k1::key; +use secp256k1::{key, SECP256K1}; use std::fmt; /// Convert public key into the address diff --git a/parity-crypto/src/publickey/keypair_generator.rs b/parity-crypto/src/publickey/keypair_generator.rs index 9dea21de6..6ebef0985 100644 --- a/parity-crypto/src/publickey/keypair_generator.rs +++ b/parity-crypto/src/publickey/keypair_generator.rs @@ -8,7 +8,8 @@ //! Random key pair generator. Relies on the secp256k1 C-library to generate random data. -use super::{Generator, KeyPair, SECP256K1}; +use super::{Generator, KeyPair}; +use secp256k1::SECP256K1; /// Randomly generates new keypair, instantiating the RNG each time. pub struct Random; diff --git a/parity-crypto/src/publickey/mod.rs b/parity-crypto/src/publickey/mod.rs index ec62012f3..c7981515c 100644 --- a/parity-crypto/src/publickey/mod.rs +++ b/parity-crypto/src/publickey/mod.rs @@ -20,47 +20,26 @@ pub mod ecdh; pub mod ecies; pub mod error; -pub use self::ecdsa_signature::{ - recover, recover_allowing_all_zero_message, sign, verify_address, verify_public, Signature, +pub use self::{ + ecdsa_signature::{recover, sign, verify_address, verify_public, Signature}, + error::Error, + extended_keys::{Derivation, DerivationError, ExtendedKeyPair, ExtendedPublic, ExtendedSecret}, + keypair::{public_to_address, KeyPair}, + keypair_generator::Random, + secret_key::{Secret, ZeroizeSecretKey}, }; -pub use self::error::Error; -pub use self::extended_keys::{Derivation, DerivationError, ExtendedKeyPair, ExtendedPublic, ExtendedSecret}; -pub use self::keypair::{public_to_address, KeyPair}; -pub use self::keypair_generator::Random; -pub use self::secret_key::{Secret, ZeroizeSecretKey}; use ethereum_types::H256; -use lazy_static::lazy_static; pub use ethereum_types::{Address, Public}; pub type Message = H256; -use secp256k1::ThirtyTwoByteHash; - -/// In ethereum we allow public key recovery from a signature + message pair -/// where the message is all-zeroes. This conflicts with the best practise of -/// not allowing such values and so in order to avoid breaking consensus we need -/// this to work around it. The `ZeroesAllowedType` wraps an `H256` that can be -/// converted to a `[u8; 32]` which in turn can be cast to a -/// `secp256k1::Message` by the `ThirtyTwoByteHash` and satisfy the API for -/// `recover()`. -pub struct ZeroesAllowedMessage(pub H256); -impl ThirtyTwoByteHash for ZeroesAllowedMessage { - fn into_32(self) -> [u8; 32] { - self.0.to_fixed_bytes() - } -} - /// The number -1 encoded as a secret key const MINUS_ONE_KEY: &'static [u8] = &[ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40, ]; -lazy_static! { - static ref SECP256K1: secp256k1::Secp256k1 = secp256k1::Secp256k1::new(); -} - /// Generates new keypair. pub trait Generator { /// Should be called to generate new keypair. From e1b63617504fadc956bcbcba97fdaf604c756b47 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 24 Nov 2020 14:40:48 +0100 Subject: [PATCH 160/359] add dependabot config (#442) --- .github/dependabot.yml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..39fd4fa43 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + labels: ["A2-insubstantial", "M5-dependencies"] + schedule: + interval: "daily" From 822e039f0afe16de3372fcf9761f572d5e9d8b67 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Nov 2020 14:17:00 +0000 Subject: [PATCH 161/359] build(deps): update console_log requirement from 0.1.2 to 0.2.0 (#443) Updates the requirements on [console_log](https://github.com/iamcodemaker/console_log) to permit the latest version. - [Release notes](https://github.com/iamcodemaker/console_log/releases) - [Commits](https://github.com/iamcodemaker/console_log/commits) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-web/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 09c26b7d8..c2f2155ec 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -39,7 +39,7 @@ features = [ ] [dev-dependencies] -console_log = "0.1.2" +console_log = "0.2.0" kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.5" } wasm-bindgen-test = "0.3.4" wasm-bindgen-futures = "0.4.4" From 27bc11d53bdbb479917020aba17b35ef9498d4fc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Nov 2020 14:17:53 +0000 Subject: [PATCH 162/359] build(deps): update cfg-if requirement from 0.1.10 to 1.0.0 (#444) Updates the requirements on [cfg-if](https://github.com/alexcrichton/cfg-if) to permit the latest version. - [Release notes](https://github.com/alexcrichton/cfg-if/releases) - [Commits](https://github.com/alexcrichton/cfg-if/compare/0.1.10...1.0.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- parity-util-mem/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 91128228a..a3df19fd6 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -15,7 +15,7 @@ links = "parity-util-mem-ban-duplicates" build = "build.rs" [dependencies] -cfg-if = "0.1.10" +cfg-if = "1.0.0" dlmalloc = { version = "0.1.3", features = ["global"], optional = true } wee_alloc = { version = "0.4.5", optional = true } lru = { version = "0.5", optional = true } From 7719dc99819233b854a62892444950139c856470 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 24 Nov 2020 16:39:32 +0100 Subject: [PATCH 163/359] primitive-types 0.7.3 (#440) * primitive-types: update the changelog * primitive-types: bump versions --- primitive-types/CHANGELOG.md | 5 +++++ primitive-types/Cargo.toml | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 4d317d442..11242518f 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.7.3] - 2020-11-12 +- Added `scale_info` support. [#312](https://github.com/paritytech/parity-common/pull/312) +- Added `H128` type. [#434](https://github.com/paritytech/parity-common/pull/434) +- Added `fp-conversion` feature: `U256` <-> `f64`. [#436](https://github.com/paritytech/parity-common/pull/436) + ## [0.7.2] - 2020-05-05 - Added `serde_no_std` feature. [#385](https://github.com/paritytech/parity-common/pull/385) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 0a3f287db..243a4d31d 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.7.2" +version = "0.7.3" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -13,7 +13,7 @@ uint = { version = "0.8.3", path = "../uint", default-features = false } impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } impl-rlp = { version = "0.2", path = "impls/rlp", default-features = false, optional = true } -scale-info = { version = "0.2", features = ["derive"], default-features = false, optional = true } +scale-info = { version = "0.4", features = ["derive"], default-features = false, optional = true } [features] default = ["std"] From 773f4f8aad65cf45e50b5a50c94105d47de46be9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Nov 2020 09:29:52 +0000 Subject: [PATCH 164/359] build(deps): update hex-literal requirement from 0.2.1 to 0.3.1 (#449) Updates the requirements on [hex-literal](https://github.com/RustCrypto/utils) to permit the latest version. - [Release notes](https://github.com/RustCrypto/utils/releases) - [Commits](https://github.com/RustCrypto/utils/compare/hex-literal-v0.2.1...hex-literal-v0.3.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ethbloom/Cargo.toml | 2 +- parity-crypto/Cargo.toml | 2 +- rlp/Cargo.toml | 2 +- triehash/Cargo.toml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index b89f51335..f99e61a6c 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -20,7 +20,7 @@ impl-codec = { version = "0.4.1", path = "../primitive-types/impls/codec", defau [dev-dependencies] criterion = "0.3.0" rand = "0.7.2" -hex-literal = "0.2.1" +hex-literal = "0.3.1" [features] default = ["std", "serialize", "rustc-hex"] diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 3c929998e..4e23dafda 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -34,7 +34,7 @@ zeroize = { version = "1.0.0", default-features = false } [dev-dependencies] criterion = "0.3.0" -hex-literal = "0.2.1" +hex-literal = "0.3.1" [features] default = [] diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 1c69ab875..6e482a81b 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -12,7 +12,7 @@ rustc-hex = { version = "2.0.1", default-features = false } [dev-dependencies] criterion = "0.3.0" -hex-literal = "0.2.1" +hex-literal = "0.3.1" primitive-types = { path = "../primitive-types", version = "0.7", features = ["impl-rlp"] } [features] diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index 358161a80..e50537e76 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -17,7 +17,7 @@ keccak-hasher = "0.15.2" ethereum-types = { version = "0.9.0", path = "../ethereum-types" } tiny-keccak = { version = "2.0", features = ["keccak"] } trie-standardmap = "0.15.2" -hex-literal = "0.2.1" +hex-literal = "0.3.1" [features] default = ["std"] From 14403e867651e5c17ba8e1842a00ba9fef73bb0c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Nov 2020 09:31:18 +0000 Subject: [PATCH 165/359] build(deps): update lru requirement from 0.5 to 0.6 (#448) Updates the requirements on [lru](https://github.com/jeromefroe/lru-rs) to permit the latest version. - [Release notes](https://github.com/jeromefroe/lru-rs/releases) - [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/jeromefroe/lru-rs/compare/0.5.0...0.6.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- parity-util-mem/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index a3df19fd6..78ef9d4cf 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -18,7 +18,7 @@ build = "build.rs" cfg-if = "1.0.0" dlmalloc = { version = "0.1.3", features = ["global"], optional = true } wee_alloc = { version = "0.4.5", optional = true } -lru = { version = "0.5", optional = true } +lru = { version = "0.6", optional = true } hashbrown = { version = "0.8", optional = true } mimalloc = { version = "0.1.18", optional = true } libmimalloc-sys = { version = "0.1.14", optional = true } From d6f3ccb4b88748f181ec93e329ec53ed49463e15 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Nov 2020 10:42:23 +0100 Subject: [PATCH 166/359] build(deps): update sysinfo requirement from 0.11.7 to 0.15.3 (#447) Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Release notes](https://github.com/GuillaumeGomez/sysinfo/releases) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index e352ba0b2..92b2c4d58 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -31,6 +31,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.5" } rand = "0.7.2" tempdir = "0.3.7" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.11.7" +sysinfo = "0.15.3" ctrlc = "3.1.4" time = "0.1" From 356b74e995ff3c759ea4cf22c1b5acc67137a935 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 26 Nov 2020 12:38:42 +0100 Subject: [PATCH 167/359] build(deps): update num-bigint requirement from 0.2.3 to 0.3.1 (#451) Updates the requirements on [num-bigint](https://github.com/rust-num/num-bigint) to permit the latest version. - [Release notes](https://github.com/rust-num/num-bigint/releases) - [Changelog](https://github.com/rust-num/num-bigint/blob/master/RELEASES.md) - [Commits](https://github.com/rust-num/num-bigint/compare/num-bigint-0.2.3...num-bigint-0.3.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- uint/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index f88d9b38b..05c208ed4 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -32,7 +32,7 @@ required-features = ["std"] [dev-dependencies] criterion = "0.3.0" -num-bigint = "0.2.3" +num-bigint = "0.3.1" [target.'cfg(unix)'.dev-dependencies] rug = { version = "1.6.0", default-features = false, features = ["integer"] } From e07e7db9dd6cd1148f3d9f2e3af3e9e9c7587d87 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 26 Nov 2020 12:42:41 +0100 Subject: [PATCH 168/359] build(deps): update dlmalloc requirement from 0.1.3 to 0.2.1 (#452) * build(deps): update dlmalloc requirement from 0.1.3 to 0.2.1 Updates the requirements on [dlmalloc](https://github.com/alexcrichton/dlmalloc-rs) to permit the latest version. - [Release notes](https://github.com/alexcrichton/dlmalloc-rs/releases) - [Commits](https://github.com/alexcrichton/dlmalloc-rs/compare/0.1.3...0.2.1) Signed-off-by: dependabot[bot] * update to changelog (breaking change) * hmm, actually no Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andronik Ordian --- parity-util-mem/CHANGELOG.md | 1 + parity-util-mem/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index f6ff26a44..c1ba5fe12 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Updated dlmalloc to 0.2.1. [#452](https://github.com/paritytech/parity-common/pull/452) ## [0.7.0] - 2020-06-24 - Added `const_size` to `MallocSizeOf` to optimize it for flat collections. [#398](https://github.com/paritytech/parity-common/pull/398) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 78ef9d4cf..35d90535a 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -16,7 +16,7 @@ build = "build.rs" [dependencies] cfg-if = "1.0.0" -dlmalloc = { version = "0.1.3", features = ["global"], optional = true } +dlmalloc = { version = "0.2.1", features = ["global"], optional = true } wee_alloc = { version = "0.4.5", optional = true } lru = { version = "0.6", optional = true } hashbrown = { version = "0.8", optional = true } From fce73151ecd05ef77aade202e2602e6719cc0e97 Mon Sep 17 00:00:00 2001 From: David Date: Mon, 30 Nov 2020 09:18:19 +0000 Subject: [PATCH 169/359] Fix typo (#457) Closes https://github.com/paritytech/parity-common/issues/454 --- uint/src/uint.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index a926129ce..84cc8d9a5 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -681,7 +681,7 @@ macro_rules! construct_uint { fn full_shl(self, shift: u32) -> [u64; $n_words + 1] { debug_assert!(shift < Self::WORD_BITS as u32); - let mut u = [064; $n_words + 1]; + let mut u = [0u64; $n_words + 1]; let u_lo = self.0[0] << shift; let u_hi = self >> (Self::WORD_BITS as u32 - shift); u[0] = u_lo; From f9850988973858f7870af4fd2335a54554441eb7 Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Mon, 30 Nov 2020 14:22:05 +0300 Subject: [PATCH 170/359] rlp: Use BytesMut for RlpStream's backing buffer (#453) * Use BufMut::put_u8 * Remove RlpStream::drain * Switch internal repr to BytesMut * Remove deprecated RlpStream::complete_unbounded_list * only pull bytes/std when std is enabled * RlpStream::new_with_buffer * redirect RlpStream::{new,new_list} * fix test --- rlp/Cargo.toml | 3 ++- rlp/src/lib.rs | 19 ++++++++++-------- rlp/src/stream.rs | 50 +++++++++++++++++++++++----------------------- rlp/src/traits.rs | 9 ++++----- rlp/tests/tests.rs | 17 ++++++++++++++-- 5 files changed, 57 insertions(+), 41 deletions(-) diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 6e482a81b..ebec7d305 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -8,6 +8,7 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] +bytes = { version = "0.6", default-features = false } rustc-hex = { version = "2.0.1", default-features = false } [dev-dependencies] @@ -17,7 +18,7 @@ primitive-types = { path = "../primitive-types", version = "0.7", features = ["i [features] default = ["std"] -std = ["rustc-hex/std"] +std = ["bytes/std", "rustc-hex/std"] [[bench]] name = "rlp" diff --git a/rlp/src/lib.rs b/rlp/src/lib.rs index 3a913d69f..135b3b75c 100644 --- a/rlp/src/lib.rs +++ b/rlp/src/lib.rs @@ -45,12 +45,15 @@ mod traits; #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use bytes::BytesMut; use core::borrow::Borrow; -pub use self::error::DecoderError; -pub use self::rlpin::{PayloadInfo, Prototype, Rlp, RlpIterator}; -pub use self::stream::RlpStream; -pub use self::traits::{Decodable, Encodable}; +pub use self::{ + error::DecoderError, + rlpin::{PayloadInfo, Prototype, Rlp, RlpIterator}, + stream::RlpStream, + traits::{Decodable, Encodable}, +}; /// The RLP encoded empty data (used to mean "null value"). pub const NULL_RLP: [u8; 1] = [0x80; 1]; @@ -87,21 +90,21 @@ where /// let out = rlp::encode(&animal); /// assert_eq!(out, vec![0x83, b'c', b'a', b't']); /// ``` -pub fn encode(object: &E) -> Vec +pub fn encode(object: &E) -> BytesMut where E: Encodable, { let mut stream = RlpStream::new(); stream.append(object); - stream.drain() + stream.out() } -pub fn encode_list(object: &[K]) -> Vec +pub fn encode_list(object: &[K]) -> BytesMut where E: Encodable, K: Borrow, { let mut stream = RlpStream::new(); stream.append_list(object); - stream.drain() + stream.out() } diff --git a/rlp/src/stream.rs b/rlp/src/stream.rs index 14983d5ef..d99d84c9e 100644 --- a/rlp/src/stream.rs +++ b/rlp/src/stream.rs @@ -8,6 +8,7 @@ #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use bytes::{BufMut, BytesMut}; use core::borrow::Borrow; use crate::traits::Encodable; @@ -28,7 +29,7 @@ impl ListInfo { /// Appendable rlp encoder. pub struct RlpStream { unfinished_lists: Vec, - buffer: Vec, + buffer: BytesMut, finished_list: bool, } @@ -41,12 +42,22 @@ impl Default for RlpStream { impl RlpStream { /// Initializes instance of empty `Stream`. pub fn new() -> Self { - RlpStream { unfinished_lists: Vec::with_capacity(16), buffer: Vec::with_capacity(1024), finished_list: false } + Self::new_with_buffer(BytesMut::with_capacity(1024)) } /// Initializes the `Stream` as a list. pub fn new_list(len: usize) -> Self { - let mut stream = RlpStream::new(); + Self::new_list_with_buffer(BytesMut::with_capacity(1024), len) + } + + /// Initializes instance of empty `Stream`. + pub fn new_with_buffer(buffer: BytesMut) -> Self { + RlpStream { unfinished_lists: Vec::with_capacity(16), buffer, finished_list: false } + } + + /// Initializes the `Stream` as a list. + pub fn new_list_with_buffer(buffer: BytesMut, len: usize) -> Self { + let mut stream = RlpStream::new_with_buffer(buffer); stream.begin_list(len); stream } @@ -62,7 +73,7 @@ impl RlpStream { /// ``` pub fn append_empty_data(&mut self) -> &mut Self { // self push raw item - self.buffer.push(0x80); + self.buffer.put_u8(0x80); // try to finish and prepend the length self.note_appended(1); @@ -71,11 +82,6 @@ impl RlpStream { self } - /// Drain the object and return the underlying ElasticArray. Panics if it is not finished. - pub fn drain(self) -> Vec { - self.out() - } - /// Appends raw (pre-serialised) RLP data. Use with caution. Chainable. pub fn append_raw(&mut self, bytes: &[u8], item_count: usize) -> &mut Self { // push raw items @@ -168,14 +174,14 @@ impl RlpStream { match len { 0 => { // we may finish, if the appended list len is equal 0 - self.buffer.push(0xc0u8); + self.buffer.put_u8(0xc0u8); self.note_appended(1); self.finished_list = true; } _ => { // payload is longer than 1 byte only for lists > 55 bytes // by pushing always this 1 byte we may avoid unnecessary shift of data - self.buffer.push(0); + self.buffer.put_u8(0); let position = self.buffer.len(); self.unfinished_lists.push(ListInfo::new(position, Some(len))); @@ -191,7 +197,7 @@ impl RlpStream { self.finished_list = false; // payload is longer than 1 byte only for lists > 55 bytes // by pushing always this 1 byte we may avoid unnecessary shift of data - self.buffer.push(0); + self.buffer.put_u8(0); let position = self.buffer.len(); self.unfinished_lists.push(ListInfo::new(position, None)); // return chainable self @@ -275,7 +281,7 @@ impl RlpStream { /// Streams out encoded bytes. /// /// panic! if stream is not finished. - pub fn out(self) -> Vec { + pub fn out(self) -> BytesMut { if self.is_finished() { self.buffer } else { @@ -325,16 +331,10 @@ impl RlpStream { self.note_appended(1); self.finished_list = true; } - - /// Finalize current unbounded list. Panics if no unbounded list has been opened. - #[deprecated(since = "0.4.3", note = "use finalize_unbounded_list instead")] - pub fn complete_unbounded_list(&mut self) { - self.finalize_unbounded_list(); - } } pub struct BasicEncoder<'a> { - buffer: &'a mut Vec, + buffer: &'a mut BytesMut, } impl<'a> BasicEncoder<'a> { @@ -387,22 +387,22 @@ impl<'a> BasicEncoder<'a> { }; match len { // just 0 - 0 => self.buffer.push(0x80u8), + 0 => self.buffer.put_u8(0x80u8), len @ 1..=55 => { let first = value.next().expect("iterator length is higher than 1"); if len == 1 && first < 0x80 { // byte is its own encoding if < 0x80 - self.buffer.push(first); + self.buffer.put_u8(first); } else { // (prefix + length), followed by the string - self.buffer.push(0x80u8 + len as u8); - self.buffer.push(first); + self.buffer.put_u8(0x80u8 + len as u8); + self.buffer.put_u8(first); self.buffer.extend(value); } } // (prefix + length of length), followed by the length, followd by the string len => { - self.buffer.push(0); + self.buffer.put_u8(0); let position = self.buffer.len(); let inserted_bytes = self.insert_size(len, position); self.buffer[position - 1] = 0xb7 + inserted_bytes; diff --git a/rlp/src/traits.rs b/rlp/src/traits.rs index b5dfa2764..e96cf4c46 100644 --- a/rlp/src/traits.rs +++ b/rlp/src/traits.rs @@ -9,10 +9,9 @@ //! Common RLP traits #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use bytes::BytesMut; -use crate::error::DecoderError; -use crate::rlpin::Rlp; -use crate::stream::RlpStream; +use crate::{error::DecoderError, rlpin::Rlp, stream::RlpStream}; /// RLP decodable trait pub trait Decodable: Sized { @@ -26,9 +25,9 @@ pub trait Encodable { fn rlp_append(&self, s: &mut RlpStream); /// Get rlp-encoded bytes for this instance - fn rlp_bytes(&self) -> Vec { + fn rlp_bytes(&self) -> BytesMut { let mut s = RlpStream::new(); self.rlp_append(&mut s); - s.drain() + s.out() } } diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index 75257d6e3..77d514b76 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -8,6 +8,7 @@ use core::{cmp, fmt}; +use bytes::BytesMut; use hex_literal::hex; use primitive_types::{H160, U256}; use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; @@ -228,6 +229,18 @@ fn encode_str() { run_encode_tests(tests); } +#[test] +fn encode_into_existing_buffer() { + let mut buffer = BytesMut::new(); + buffer.extend_from_slice(b"junk"); + + let mut s = RlpStream::new_with_buffer(buffer.split_off(buffer.len())); + s.append(&"cat"); + buffer.unsplit(s.out()); + + assert_eq!(&buffer[..], &[b'j', b'u', b'n', b'k', 0x83, b'c', b'a', b't']); +} + #[test] fn encode_address() { let tests = vec![ETestPair( @@ -480,7 +493,7 @@ fn test_rlp_nested_empty_list_encode() { let mut stream = RlpStream::new_list(2); stream.append_list(&(Vec::new() as Vec)); stream.append(&40u32); - assert_eq!(stream.drain()[..], [0xc2u8, 0xc0u8, 40u8][..]); + assert_eq!(stream.out()[..], [0xc2u8, 0xc0u8, 40u8][..]); } #[test] @@ -497,7 +510,7 @@ fn test_rlp_stream_size_limit() { let item = [0u8; 1]; let mut stream = RlpStream::new(); while stream.append_raw_checked(&item, 1, limit) {} - assert_eq!(stream.drain().len(), limit); + assert_eq!(stream.out().len(), limit); } } From 38627a664080d21dd61c8daca998649ab532f5a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Nov 2020 13:16:29 +0100 Subject: [PATCH 171/359] build(deps): update hashbrown requirement from 0.8 to 0.9 (#456) Updates the requirements on [hashbrown](https://github.com/rust-lang/hashbrown) to permit the latest version. - [Release notes](https://github.com/rust-lang/hashbrown/releases) - [Changelog](https://github.com/rust-lang/hashbrown/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/hashbrown/compare/v0.8.0...v0.9.1) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- parity-util-mem/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 35d90535a..0baba5692 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -19,7 +19,7 @@ cfg-if = "1.0.0" dlmalloc = { version = "0.2.1", features = ["global"], optional = true } wee_alloc = { version = "0.4.5", optional = true } lru = { version = "0.6", optional = true } -hashbrown = { version = "0.8", optional = true } +hashbrown = { version = "0.9", optional = true } mimalloc = { version = "0.1.18", optional = true } libmimalloc-sys = { version = "0.1.14", optional = true } parity-util-mem-derive = { path = "derive", version = "0.1" } From 903c06ec88bca4111e4fa30d0b37f54715a0a85e Mon Sep 17 00:00:00 2001 From: David Date: Mon, 30 Nov 2020 12:25:24 +0000 Subject: [PATCH 172/359] The time crate after 0.2 is unergonomic to use; just use chrono (#458) The 0.2 series of `time` doesn't make it easy to "just" create a timestamp. The `chrono` crate uses `time` v0.1 and is, I believe, what users like us should be using. So let's just do that. --- kvdb-rocksdb/Cargo.toml | 2 +- kvdb-rocksdb/examples/memtest.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 92b2c4d58..63d033db6 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -33,4 +33,4 @@ tempdir = "0.3.7" keccak-hash = { path = "../keccak-hash" } sysinfo = "0.15.3" ctrlc = "3.1.4" -time = "0.1" +chrono = "0.4" diff --git a/kvdb-rocksdb/examples/memtest.rs b/kvdb-rocksdb/examples/memtest.rs index 59fa1a137..3b44de4ee 100644 --- a/kvdb-rocksdb/examples/memtest.rs +++ b/kvdb-rocksdb/examples/memtest.rs @@ -136,7 +136,7 @@ fn main() { keyvalues = KeyValueSeed::with_seed(seed); if step % 10000 == 9999 { - let timestamp = time::strftime("%Y-%m-%d %H:%M:%S", &time::now()).expect("Error formatting log timestamp"); + let timestamp = chrono::Local::now().format("%Y-%m-%d %H:%M:%S"); println!("{}", timestamp); println!("\tData written: {} keys - {} Mb", step + 1, ((step + 1) * 64 * 128) / 1024 / 1024); From 6bdc113de7a755d6faf04c58fe7d708953a313e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Dec 2020 12:48:07 +0100 Subject: [PATCH 173/359] build(deps): update send_wrapper requirement from 0.3.0 to 0.5.0 (#461) Updates the requirements on [send_wrapper](https://github.com/thk1/send_wrapper) to permit the latest version. - [Release notes](https://github.com/thk1/send_wrapper/releases) - [Changelog](https://github.com/thk1/send_wrapper/blob/master/CHANGELOG.md) - [Commits](https://github.com/thk1/send_wrapper/compare/v0.3.0...v0.5.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-web/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index c2f2155ec..50420598a 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -15,7 +15,7 @@ kvdb = { version = "0.7", path = "../kvdb" } kvdb-memorydb = { version = "0.7", path = "../kvdb-memorydb" } futures = "0.3" log = "0.4.8" -send_wrapper = "0.3.0" +send_wrapper = "0.5.0" parity-util-mem = { path = "../parity-util-mem", version = "0.7", default-features = false } [dependencies.web-sys] From 93ede8bbce00cfdfde45b152e43f867104f32ade Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Tue, 1 Dec 2020 14:48:19 +0300 Subject: [PATCH 174/359] rlp: add bytes impls (#459) * rlp: add bytes impls * add tests --- rlp/src/impls.rs | 25 +++++++++++++++++++++++++ rlp/tests/tests.rs | 46 +++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/rlp/src/impls.rs b/rlp/src/impls.rs index 02663349e..7aa013925 100644 --- a/rlp/src/impls.rs +++ b/rlp/src/impls.rs @@ -8,6 +8,7 @@ #[cfg(not(feature = "std"))] use alloc::{borrow::ToOwned, boxed::Box, string::String, vec::Vec}; +use bytes::{Bytes, BytesMut}; use core::iter::{empty, once}; use core::{mem, str}; @@ -79,6 +80,30 @@ impl Decodable for Vec { } } +impl Encodable for Bytes { + fn rlp_append(&self, s: &mut RlpStream) { + s.encoder().encode_value(self); + } +} + +impl Decodable for Bytes { + fn decode(rlp: &Rlp) -> Result { + rlp.decoder().decode_value(|bytes| Ok(Bytes::copy_from_slice(bytes))) + } +} + +impl Encodable for BytesMut { + fn rlp_append(&self, s: &mut RlpStream) { + s.encoder().encode_value(self); + } +} + +impl Decodable for BytesMut { + fn decode(rlp: &Rlp) -> Result { + rlp.decoder().decode_value(|bytes| Ok(bytes.into())) + } +} + impl Encodable for Option where T: Encodable, diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index 77d514b76..07651af83 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -8,7 +8,7 @@ use core::{cmp, fmt}; -use bytes::BytesMut; +use bytes::{Bytes, BytesMut}; use hex_literal::hex; use primitive_types::{H160, U256}; use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; @@ -265,6 +265,28 @@ fn encode_vector_u8() { run_encode_tests(tests); } +#[test] +fn encode_bytes() { + let tests = vec![ + ETestPair(Bytes::from_static(&[]), vec![0x80]), + ETestPair(Bytes::from_static(&[0u8]), vec![0]), + ETestPair(Bytes::from_static(&[0x15]), vec![0x15]), + ETestPair(Bytes::from_static(&[0x40, 0x00]), vec![0x82, 0x40, 0x00]), + ]; + run_encode_tests(tests); +} + +#[test] +fn encode_bytesmut() { + let tests = vec![ + ETestPair(BytesMut::from(&[] as &[u8]), vec![0x80]), + ETestPair(BytesMut::from(&[0_u8] as &[u8]), vec![0]), + ETestPair(BytesMut::from(&[0x15_u8] as &[u8]), vec![0x15]), + ETestPair(BytesMut::from(&[0x40_u8, 0x00_u8] as &[u8]), vec![0x82, 0x40, 0x00]), + ]; + run_encode_tests(tests); +} + #[test] fn encode_vector_u64() { let tests = vec![ @@ -327,6 +349,28 @@ fn decode_vector_u8() { run_decode_tests(tests); } +#[test] +fn decode_bytes() { + let tests = vec![ + DTestPair(Bytes::from_static(&[]), vec![0x80]), + DTestPair(Bytes::from_static(&[0u8]), vec![0]), + DTestPair(Bytes::from_static(&[0x15]), vec![0x15]), + DTestPair(Bytes::from_static(&[0x40, 0x00]), vec![0x82, 0x40, 0x00]), + ]; + run_decode_tests(tests); +} + +#[test] +fn decode_bytesmut() { + let tests = vec![ + DTestPair(BytesMut::from(&[] as &[u8]), vec![0x80]), + DTestPair(BytesMut::from(&[0_u8] as &[u8]), vec![0]), + DTestPair(BytesMut::from(&[0x15_u8] as &[u8]), vec![0x15]), + DTestPair(BytesMut::from(&[0x40_u8, 0x00_u8] as &[u8]), vec![0x82, 0x40, 0x00]), + ]; + run_decode_tests(tests); +} + #[test] fn decode_untrusted_u8() { let tests = vec![DTestPair(0x0u8, vec![0x80]), DTestPair(0x77u8, vec![0x77]), DTestPair(0xccu8, vec![0x81, 0xcc])]; From 2394a1104155f1693d384524f8b6ba3fc5cc55b5 Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Fri, 4 Dec 2020 12:08:41 +0300 Subject: [PATCH 175/359] rlp: Fix buffer indexing (#462) * Fix buffer indexing * test for clear --- rlp/src/stream.rs | 42 ++++++++++++++++++++++++++---------------- rlp/tests/tests.rs | 29 +++++++++++++++++++++++++++-- 2 files changed, 53 insertions(+), 18 deletions(-) diff --git a/rlp/src/stream.rs b/rlp/src/stream.rs index d99d84c9e..607dc9344 100644 --- a/rlp/src/stream.rs +++ b/rlp/src/stream.rs @@ -29,6 +29,7 @@ impl ListInfo { /// Appendable rlp encoder. pub struct RlpStream { unfinished_lists: Vec, + start_pos: usize, buffer: BytesMut, finished_list: bool, } @@ -52,7 +53,7 @@ impl RlpStream { /// Initializes instance of empty `Stream`. pub fn new_with_buffer(buffer: BytesMut) -> Self { - RlpStream { unfinished_lists: Vec::with_capacity(16), buffer, finished_list: false } + RlpStream { unfinished_lists: Vec::with_capacity(16), start_pos: buffer.len(), buffer, finished_list: false } } /// Initializes the `Stream` as a list. @@ -62,6 +63,10 @@ impl RlpStream { stream } + fn total_written(&self) -> usize { + self.buffer.len() - self.start_pos + } + /// Apends null to the end of stream, chainable. /// /// ``` @@ -183,7 +188,7 @@ impl RlpStream { // by pushing always this 1 byte we may avoid unnecessary shift of data self.buffer.put_u8(0); - let position = self.buffer.len(); + let position = self.total_written(); self.unfinished_lists.push(ListInfo::new(position, Some(len))); } } @@ -198,7 +203,7 @@ impl RlpStream { // payload is longer than 1 byte only for lists > 55 bytes // by pushing always this 1 byte we may avoid unnecessary shift of data self.buffer.put_u8(0); - let position = self.buffer.len(); + let position = self.total_written(); self.unfinished_lists.push(ListInfo::new(position, None)); // return chainable self self @@ -215,7 +220,7 @@ impl RlpStream { /// Calculate total RLP size for appended payload. pub fn estimate_size(&self, add: usize) -> usize { - let total_size = self.buffer.len() + add; + let total_size = self.total_written() + add; let mut base_size = total_size; for list in &self.unfinished_lists[..] { let len = total_size - list.position; @@ -250,7 +255,7 @@ impl RlpStream { /// ``` pub fn clear(&mut self) { // clear bytes - self.buffer.clear(); + self.buffer.truncate(self.start_pos); // clear lists self.unfinished_lists.clear(); @@ -309,7 +314,7 @@ impl RlpStream { }; if should_finish { let x = self.unfinished_lists.pop().unwrap(); - let len = self.buffer.len() - x.position; + let len = self.total_written() - x.position; self.encoder().insert_list_payload(len, x.position); self.note_appended(1); } @@ -317,7 +322,7 @@ impl RlpStream { } pub fn encoder(&mut self) -> BasicEncoder { - BasicEncoder::new(self) + BasicEncoder::new(self, self.start_pos) } /// Finalize current unbounded list. Panics if no unbounded list has been opened. @@ -326,7 +331,7 @@ impl RlpStream { if list.max.is_some() { panic!("List type mismatch."); } - let len = self.buffer.len() - list.position; + let len = self.total_written() - list.position; self.encoder().insert_list_payload(len, list.position); self.note_appended(1); self.finished_list = true; @@ -335,11 +340,16 @@ impl RlpStream { pub struct BasicEncoder<'a> { buffer: &'a mut BytesMut, + start_pos: usize, } impl<'a> BasicEncoder<'a> { - fn new(stream: &'a mut RlpStream) -> Self { - BasicEncoder { buffer: &mut stream.buffer } + fn new(stream: &'a mut RlpStream, start_pos: usize) -> Self { + BasicEncoder { buffer: &mut stream.buffer, start_pos } + } + + fn total_written(&self) -> usize { + self.buffer.len() - self.start_pos } fn insert_size(&mut self, size: usize, position: usize) -> u8 { @@ -347,10 +357,10 @@ impl<'a> BasicEncoder<'a> { let leading_empty_bytes = size.leading_zeros() as usize / 8; let size_bytes = 4 - leading_empty_bytes as u8; let buffer: [u8; 4] = size.to_be_bytes(); - assert!(position <= self.buffer.len()); + assert!(position <= self.total_written()); self.buffer.extend_from_slice(&buffer[leading_empty_bytes..]); - self.buffer[position..].rotate_right(size_bytes as usize); + self.buffer[self.start_pos + position..].rotate_right(size_bytes as usize); size_bytes as u8 } @@ -359,11 +369,11 @@ impl<'a> BasicEncoder<'a> { // 1 byte was already reserved for payload earlier match len { 0..=55 => { - self.buffer[pos - 1] = 0xc0u8 + len as u8; + self.buffer[self.start_pos + pos - 1] = 0xc0u8 + len as u8; } _ => { let inserted_bytes = self.insert_size(len, pos); - self.buffer[pos - 1] = 0xf7u8 + inserted_bytes; + self.buffer[self.start_pos + pos - 1] = 0xf7u8 + inserted_bytes; } }; } @@ -403,9 +413,9 @@ impl<'a> BasicEncoder<'a> { // (prefix + length of length), followed by the length, followd by the string len => { self.buffer.put_u8(0); - let position = self.buffer.len(); + let position = self.total_written(); let inserted_bytes = self.insert_size(len, position); - self.buffer[position - 1] = 0xb7 + inserted_bytes; + self.buffer[self.start_pos + position - 1] = 0xb7 + inserted_bytes; self.buffer.extend(value); } } diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index 07651af83..979dba8d1 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -234,11 +234,23 @@ fn encode_into_existing_buffer() { let mut buffer = BytesMut::new(); buffer.extend_from_slice(b"junk"); - let mut s = RlpStream::new_with_buffer(buffer.split_off(buffer.len())); + let mut split_buffer = buffer.split_off(buffer.len()); + split_buffer.extend_from_slice(b"!"); + + let mut s = RlpStream::new_with_buffer(split_buffer); s.append(&"cat"); buffer.unsplit(s.out()); - assert_eq!(&buffer[..], &[b'j', b'u', b'n', b'k', 0x83, b'c', b'a', b't']); + buffer.extend_from_slice(b" and "); + + let mut s = RlpStream::new_with_buffer(buffer); + s.append(&"dog"); + let buffer = s.out(); + + assert_eq!( + &buffer[..], + &[b'j', b'u', b'n', b'k', b'!', 0x83, b'c', b'a', b't', b' ', b'a', b'n', b'd', b' ', 0x83, b'd', b'o', b'g'] + ); } #[test] @@ -307,6 +319,19 @@ fn encode_vector_str() { run_encode_tests_list(tests); } +#[test] +fn clear() { + let mut buffer = BytesMut::new(); + buffer.extend_from_slice(b"junk"); + + let mut s = RlpStream::new_with_buffer(buffer); + s.append(&"parrot"); + s.clear(); + s.append(&"cat"); + + assert_eq!(&s.out()[..], &[b'j', b'u', b'n', b'k', 0x83, b'c', b'a', b't']); +} + struct DTestPair(T, Vec) where T: Decodable + fmt::Debug + cmp::Eq; From 1a7fc68f93e518f5987f6a1f4c9c54eb526a3fbd Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Fri, 4 Dec 2020 15:06:17 +0300 Subject: [PATCH 176/359] rlp: store test bytes in hex literals (#465) --- rlp/tests/tests.rs | 250 ++++++++++++++++++++++++--------------------- 1 file changed, 135 insertions(+), 115 deletions(-) diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index 979dba8d1..9ba2e3592 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -22,7 +22,7 @@ fn test_rlp_display() { #[test] fn length_overflow() { - let bs = [0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe5]; + let bs = hex!("bfffffffffffffffffffffffe5"); let rlp = Rlp::new(&bs); let res: Result = rlp.as_val(); assert_eq!(Err(DecoderError::RlpInvalidLength), res); @@ -150,12 +150,32 @@ where } } +impl From<(T, Repr)> for ETestPair +where + T: Encodable, + Repr: Into>, +{ + fn from((v, repr): (T, Repr)) -> Self { + Self(v, repr.into()) + } +} + +impl From<(Vec, Repr)> for VETestPair +where + T: Encodable, + Repr: Into>, +{ + fn from((v, repr): (Vec, Repr)) -> Self { + Self(v, repr.into()) + } +} + #[test] fn encode_u16() { let tests = vec![ - ETestPair(0u16, vec![0x80u8]), - ETestPair(0x100, vec![0x82, 0x01, 0x00]), - ETestPair(0xffff, vec![0x82, 0xff, 0xff]), + ETestPair::from((0_u16, hex!("80"))), + ETestPair::from((0x100_u16, hex!("820100"))), + ETestPair::from((0xffff_u16, hex!("82ffff"))), ]; run_encode_tests(tests); } @@ -163,9 +183,9 @@ fn encode_u16() { #[test] fn encode_u32() { let tests = vec![ - ETestPair(0u32, vec![0x80u8]), - ETestPair(0x0001_0000, vec![0x83, 0x01, 0x00, 0x00]), - ETestPair(0x00ff_ffff, vec![0x83, 0xff, 0xff, 0xff]), + ETestPair::from((0_u32, hex!("80"))), + ETestPair::from((0x0001_0000_u32, hex!("83010000"))), + ETestPair::from((0x00ff_ffff_u32, hex!("83ffffff"))), ]; run_encode_tests(tests); } @@ -173,9 +193,9 @@ fn encode_u32() { #[test] fn encode_u64() { let tests = vec![ - ETestPair(0u64, vec![0x80u8]), - ETestPair(0x0100_0000, vec![0x84, 0x01, 0x00, 0x00, 0x00]), - ETestPair(0xFFFF_FFFF, vec![0x84, 0xff, 0xff, 0xff, 0xff]), + ETestPair::from((0_u64, hex!("80"))), + ETestPair::from((0x0100_0000_u64, hex!("8401000000"))), + ETestPair::from((0xFFFF_FFFF_u64, hex!("84ffffffff"))), ]; run_encode_tests(tests); } @@ -183,9 +203,9 @@ fn encode_u64() { #[test] fn encode_u128() { let tests = vec![ - ETestPair(0u128, vec![0x80u8]), - ETestPair(0x0100_0000_0000_0000, vec![0x88, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), - ETestPair(0xFFFF_FFFF_FFFF_FFFF, vec![0x88, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + ETestPair::from((0_u128, hex!("80"))), + ETestPair::from((0x0100_0000_0000_0000_u128, hex!("880100000000000000"))), + ETestPair::from((0xFFFF_FFFF_FFFF_FFFF_u128, hex!("88ffffffffffffffff"))), ]; run_encode_tests(tests); } @@ -193,18 +213,13 @@ fn encode_u128() { #[test] fn encode_u256() { let tests = vec![ - ETestPair(U256::from(0u64), vec![0x80u8]), - ETestPair(U256::from(0x0100_0000u64), vec![0x84, 0x01, 0x00, 0x00, 0x00]), - ETestPair(U256::from(0xffff_ffffu64), vec![0x84, 0xff, 0xff, 0xff, 0xff]), - ETestPair( - ("8090a0b0c0d0e0f00910203040506077000000000000\ - 000100000000000012f0") - .into(), - vec![ - 0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0, - ], - ), + ETestPair::from((U256::from(0_u64), hex!("80"))), + ETestPair::from((U256::from(0x0100_0000_u64), hex!("8401000000"))), + ETestPair::from((U256::from(0xffff_ffff_u64), hex!("84ffffffff"))), + ETestPair::from(( + hex!(" 8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").into(), + hex!("a08090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0"), + )), ]; run_encode_tests(tests); } @@ -212,11 +227,11 @@ fn encode_u256() { #[test] fn encode_str() { let tests = vec![ - ETestPair("cat", vec![0x83, b'c', b'a', b't']), - ETestPair("dog", vec![0x83, b'd', b'o', b'g']), - ETestPair("Marek", vec![0x85, b'M', b'a', b'r', b'e', b'k']), - ETestPair("", vec![0x80]), - ETestPair( + ETestPair::from(("cat", vec![0x83, b'c', b'a', b't'])), + ETestPair::from(("dog", vec![0x83, b'd', b'o', b'g'])), + ETestPair::from(("Marek", vec![0x85, b'M', b'a', b'r', b'e', b'k'])), + ETestPair::from(("", hex!("80"))), + ETestPair::from(( "Lorem ipsum dolor sit amet, consectetur adipisicing elit", vec![ 0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i', b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', @@ -224,7 +239,7 @@ fn encode_str() { b'e', b'c', b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i', b'p', b'i', b's', b'i', b'c', b'i', b'n', b'g', b' ', b'e', b'l', b'i', b't', ], - ), + )), ]; run_encode_tests(tests); } @@ -255,13 +270,10 @@ fn encode_into_existing_buffer() { #[test] fn encode_address() { - let tests = vec![ETestPair( + let tests = vec![ETestPair::from(( H160::from(hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106")), - vec![ - 0x94, 0xef, 0x2d, 0x6d, 0x19, 0x40, 0x84, 0xc2, 0xde, 0x36, 0xe0, 0xda, 0xbf, 0xce, 0x45, 0xd0, 0x46, 0xb3, - 0x7d, 0x11, 0x06, - ], - )]; + hex!("94ef2d6d194084c2de36e0dabfce45d046b37d1106"), + ))]; run_encode_tests(tests); } @@ -269,10 +281,10 @@ fn encode_address() { #[test] fn encode_vector_u8() { let tests = vec![ - ETestPair(vec![], vec![0x80]), - ETestPair(vec![0u8], vec![0]), - ETestPair(vec![0x15], vec![0x15]), - ETestPair(vec![0x40, 0x00], vec![0x82, 0x40, 0x00]), + ETestPair::from((vec![], hex!("80"))), + ETestPair::from((vec![0u8], hex!("00"))), + ETestPair::from((vec![0x15], hex!("15"))), + ETestPair::from((vec![0x40, 0x00], hex!("824000"))), ]; run_encode_tests(tests); } @@ -280,10 +292,10 @@ fn encode_vector_u8() { #[test] fn encode_bytes() { let tests = vec![ - ETestPair(Bytes::from_static(&[]), vec![0x80]), - ETestPair(Bytes::from_static(&[0u8]), vec![0]), - ETestPair(Bytes::from_static(&[0x15]), vec![0x15]), - ETestPair(Bytes::from_static(&[0x40, 0x00]), vec![0x82, 0x40, 0x00]), + ETestPair::from((Bytes::from_static(&hex!("")), hex!("80"))), + ETestPair::from((Bytes::from_static(&hex!("00")), hex!("00"))), + ETestPair::from((Bytes::from_static(&hex!("15")), hex!("15"))), + ETestPair::from((Bytes::from_static(&hex!("4000")), hex!("824000"))), ]; run_encode_tests(tests); } @@ -291,10 +303,10 @@ fn encode_bytes() { #[test] fn encode_bytesmut() { let tests = vec![ - ETestPair(BytesMut::from(&[] as &[u8]), vec![0x80]), - ETestPair(BytesMut::from(&[0_u8] as &[u8]), vec![0]), - ETestPair(BytesMut::from(&[0x15_u8] as &[u8]), vec![0x15]), - ETestPair(BytesMut::from(&[0x40_u8, 0x00_u8] as &[u8]), vec![0x82, 0x40, 0x00]), + ETestPair::from((BytesMut::from(&[] as &[u8]), hex!("80"))), + ETestPair::from((BytesMut::from(&hex!("00") as &[u8]), hex!("00"))), + ETestPair::from((BytesMut::from(&hex!("15") as &[u8]), hex!("15"))), + ETestPair::from((BytesMut::from(&hex!("4000") as &[u8]), hex!("824000"))), ]; run_encode_tests(tests); } @@ -302,13 +314,10 @@ fn encode_bytesmut() { #[test] fn encode_vector_u64() { let tests = vec![ - VETestPair(vec![], vec![0xc0]), - VETestPair(vec![15u64], vec![0xc1, 0x0f]), - VETestPair(vec![1, 2, 3, 7, 0xff], vec![0xc6, 1, 2, 3, 7, 0x81, 0xff]), - VETestPair( - vec![0xffff_ffff, 1, 2, 3, 7, 0xff], - vec![0xcb, 0x84, 0xff, 0xff, 0xff, 0xff, 1, 2, 3, 7, 0x81, 0xff], - ), + VETestPair::from((vec![], hex!("c0"))), + VETestPair::from((vec![15_u64], hex!("c10f"))), + VETestPair::from((vec![1, 2, 3, 7, 0xff], hex!("c60102030781ff"))), + VETestPair::from((vec![0xffff_ffff, 1, 2, 3, 7, 0xff], hex!("cb84ffffffff0102030781ff"))), ]; run_encode_tests_list(tests); } @@ -362,14 +371,34 @@ where } } +impl From<(T, Repr)> for DTestPair +where + T: Decodable + fmt::Debug + cmp::Eq, + Repr: Into>, +{ + fn from((v, repr): (T, Repr)) -> Self { + Self(v, repr.into()) + } +} + +impl From<(Vec, Repr)> for VDTestPair +where + T: Decodable + fmt::Debug + cmp::Eq, + Repr: Into>, +{ + fn from((v, repr): (Vec, Repr)) -> Self { + Self(v, repr.into()) + } +} + /// Vec (Bytes) is treated as a single value #[test] fn decode_vector_u8() { let tests = vec![ - DTestPair(vec![], vec![0x80]), - DTestPair(vec![0u8], vec![0]), - DTestPair(vec![0x15], vec![0x15]), - DTestPair(vec![0x40, 0x00], vec![0x82, 0x40, 0x00]), + DTestPair::from((vec![], hex!("80"))), + DTestPair::from((vec![0_u8], hex!("00"))), + DTestPair::from((vec![0x15], hex!("15"))), + DTestPair::from((vec![0x40, 0x00], hex!("824000"))), ]; run_decode_tests(tests); } @@ -377,10 +406,10 @@ fn decode_vector_u8() { #[test] fn decode_bytes() { let tests = vec![ - DTestPair(Bytes::from_static(&[]), vec![0x80]), - DTestPair(Bytes::from_static(&[0u8]), vec![0]), - DTestPair(Bytes::from_static(&[0x15]), vec![0x15]), - DTestPair(Bytes::from_static(&[0x40, 0x00]), vec![0x82, 0x40, 0x00]), + DTestPair::from((Bytes::from_static(&hex!("")), hex!("80"))), + DTestPair::from((Bytes::from_static(&hex!("00")), hex!("00"))), + DTestPair::from((Bytes::from_static(&hex!("15")), hex!("15"))), + DTestPair::from((Bytes::from_static(&hex!("4000")), hex!("824000"))), ]; run_decode_tests(tests); } @@ -388,40 +417,42 @@ fn decode_bytes() { #[test] fn decode_bytesmut() { let tests = vec![ - DTestPair(BytesMut::from(&[] as &[u8]), vec![0x80]), - DTestPair(BytesMut::from(&[0_u8] as &[u8]), vec![0]), - DTestPair(BytesMut::from(&[0x15_u8] as &[u8]), vec![0x15]), - DTestPair(BytesMut::from(&[0x40_u8, 0x00_u8] as &[u8]), vec![0x82, 0x40, 0x00]), + DTestPair::from((BytesMut::from(&hex!("") as &[u8]), hex!("80"))), + DTestPair::from((BytesMut::from(&hex!("00") as &[u8]), hex!("00"))), + DTestPair::from((BytesMut::from(&hex!("15") as &[u8]), hex!("15"))), + DTestPair::from((BytesMut::from(&hex!("4000") as &[u8]), hex!("824000"))), ]; run_decode_tests(tests); } #[test] fn decode_untrusted_u8() { - let tests = vec![DTestPair(0x0u8, vec![0x80]), DTestPair(0x77u8, vec![0x77]), DTestPair(0xccu8, vec![0x81, 0xcc])]; + let tests = vec![ + DTestPair::from((0x0_u8, hex!("80"))), + DTestPair::from((0x77_u8, hex!("77"))), + DTestPair::from((0xcc_u8, hex!("81cc"))), + ]; run_decode_tests(tests); } #[test] fn decode_untrusted_u16() { - let tests = vec![DTestPair(0x100u16, vec![0x82, 0x01, 0x00]), DTestPair(0xffffu16, vec![0x82, 0xff, 0xff])]; + let tests = vec![DTestPair::from((0x100u16, hex!("820100"))), DTestPair::from((0xffffu16, hex!("82ffff")))]; run_decode_tests(tests); } #[test] fn decode_untrusted_u32() { - let tests = vec![ - DTestPair(0x0001_0000u32, vec![0x83, 0x01, 0x00, 0x00]), - DTestPair(0x00ff_ffffu32, vec![0x83, 0xff, 0xff, 0xff]), - ]; + let tests = + vec![DTestPair::from((0x0001_0000u32, hex!("83010000"))), DTestPair::from((0x00ff_ffffu32, hex!("83ffffff")))]; run_decode_tests(tests); } #[test] fn decode_untrusted_u64() { let tests = vec![ - DTestPair(0x0100_0000u64, vec![0x84, 0x01, 0x00, 0x00, 0x00]), - DTestPair(0xFFFF_FFFFu64, vec![0x84, 0xff, 0xff, 0xff, 0xff]), + DTestPair::from((0x0100_0000_u64, hex!("8401000000"))), + DTestPair::from((0xFFFF_FFFF_u64, hex!("84ffffffff"))), ]; run_decode_tests(tests); } @@ -429,8 +460,8 @@ fn decode_untrusted_u64() { #[test] fn decode_untrusted_u128() { let tests = vec![ - DTestPair(0x0100_0000_0000_0000u128, vec![0x88, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), - DTestPair(0xFFFF_FFFF_FFFF_FFFFu128, vec![0x88, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + DTestPair::from((0x0100_0000_0000_0000_u128, hex!("880100000000000000"))), + DTestPair::from((0xFFFF_FFFF_FFFF_FFFF_u128, hex!("88ffffffffffffffff"))), ]; run_decode_tests(tests); } @@ -438,18 +469,13 @@ fn decode_untrusted_u128() { #[test] fn decode_untrusted_u256() { let tests = vec![ - DTestPair(U256::from(0u64), vec![0x80u8]), - DTestPair(U256::from(0x0100_0000u64), vec![0x84, 0x01, 0x00, 0x00, 0x00]), - DTestPair(U256::from(0xffff_ffffu64), vec![0x84, 0xff, 0xff, 0xff, 0xff]), - DTestPair( - ("8090a0b0c0d0e0f00910203040506077000000000000\ - 000100000000000012f0") - .into(), - vec![ - 0xa0, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0xf0, - ], - ), + DTestPair::from((U256::from(0_u64), hex!("80"))), + DTestPair::from((U256::from(0x0100_0000_u64), hex!("8401000000"))), + DTestPair::from((U256::from(0xffff_ffff_u64), hex!("84ffffffff"))), + DTestPair::from(( + hex!(" 8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").into(), + hex!("a08090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0"), + )), ]; run_decode_tests(tests); } @@ -457,11 +483,11 @@ fn decode_untrusted_u256() { #[test] fn decode_untrusted_str() { let tests = vec![ - DTestPair("cat".to_owned(), vec![0x83, b'c', b'a', b't']), - DTestPair("dog".to_owned(), vec![0x83, b'd', b'o', b'g']), - DTestPair("Marek".to_owned(), vec![0x85, b'M', b'a', b'r', b'e', b'k']), - DTestPair("".to_owned(), vec![0x80]), - DTestPair( + DTestPair::from(("cat".to_owned(), vec![0x83, b'c', b'a', b't'])), + DTestPair::from(("dog".to_owned(), vec![0x83, b'd', b'o', b'g'])), + DTestPair::from(("Marek".to_owned(), vec![0x85, b'M', b'a', b'r', b'e', b'k'])), + DTestPair::from(("".to_owned(), hex!("80"))), + DTestPair::from(( "Lorem ipsum dolor sit amet, consectetur adipisicing elit".to_owned(), vec![ 0xb8, 0x38, b'L', b'o', b'r', b'e', b'm', b' ', b'i', b'p', b's', b'u', b'm', b' ', b'd', b'o', b'l', @@ -469,33 +495,27 @@ fn decode_untrusted_str() { b'e', b'c', b't', b'e', b't', b'u', b'r', b' ', b'a', b'd', b'i', b'p', b'i', b's', b'i', b'c', b'i', b'n', b'g', b' ', b'e', b'l', b'i', b't', ], - ), + )), ]; run_decode_tests(tests); } #[test] fn decode_untrusted_address() { - let tests = vec![DTestPair( + let tests = vec![DTestPair::from(( H160::from(hex!("ef2d6d194084c2de36e0dabfce45d046b37d1106")), - vec![ - 0x94, 0xef, 0x2d, 0x6d, 0x19, 0x40, 0x84, 0xc2, 0xde, 0x36, 0xe0, 0xda, 0xbf, 0xce, 0x45, 0xd0, 0x46, 0xb3, - 0x7d, 0x11, 0x06, - ], - )]; + hex!("94ef2d6d194084c2de36e0dabfce45d046b37d1106"), + ))]; run_decode_tests(tests); } #[test] fn decode_untrusted_vector_u64() { let tests = vec![ - VDTestPair(vec![], vec![0xc0]), - VDTestPair(vec![15u64], vec![0xc1, 0x0f]), - VDTestPair(vec![1, 2, 3, 7, 0xff], vec![0xc6, 1, 2, 3, 7, 0x81, 0xff]), - VDTestPair( - vec![0xffff_ffff, 1, 2, 3, 7, 0xff], - vec![0xcb, 0x84, 0xff, 0xff, 0xff, 0xff, 1, 2, 3, 7, 0x81, 0xff], - ), + VDTestPair::from((vec![], hex!("c0"))), + VDTestPair::from((vec![15_u64], hex!("c10f"))), + VDTestPair::from((vec![1, 2, 3, 7, 0xff], hex!("c60102030781ff"))), + VDTestPair::from((vec![0xffff_ffff, 1, 2, 3, 7, 0xff], hex!("cb84ffffffff0102030781ff"))), ]; run_decode_tests_list(tests); } @@ -520,7 +540,7 @@ fn test_rlp_data_length_check() { #[test] fn test_rlp_long_data_length_check() { - let mut data: Vec = vec![0xb8, 255]; + let mut data = hex!("b8ff").to_vec(); for _ in 0..253 { data.push(b'c'); } @@ -533,7 +553,7 @@ fn test_rlp_long_data_length_check() { #[test] fn test_the_exact_long_string() { - let mut data: Vec = vec![0xb8, 255]; + let mut data = hex!("b8ff").to_vec(); for _ in 0..255 { data.push(b'c'); } @@ -546,7 +566,7 @@ fn test_the_exact_long_string() { #[test] fn test_rlp_2bytes_data_length_check() { - let mut data: Vec = vec![0xb9, 2, 255]; // 512+255 + let mut data = hex!("b902ff").to_vec(); // 512+255 for _ in 0..700 { data.push(b'c'); } @@ -561,13 +581,13 @@ fn test_rlp_2bytes_data_length_check() { fn test_rlp_nested_empty_list_encode() { let mut stream = RlpStream::new_list(2); stream.append_list(&(Vec::new() as Vec)); - stream.append(&40u32); - assert_eq!(stream.out()[..], [0xc2u8, 0xc0u8, 40u8][..]); + stream.append(&0x28_u32); + assert_eq!(stream.out()[..], hex!("c2c028")[..]); } #[test] fn test_rlp_list_length_overflow() { - let data: Vec = vec![0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00]; + let data = hex!("ffffffffffffffffff000000"); let rlp = Rlp::new(&data); let as_val: Result = rlp.val_at(0); assert_eq!(Err(DecoderError::RlpIsTooShort), as_val); From 85bcb113524d471e15d3211da3049d0fb9d30adb Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Mon, 7 Dec 2020 12:44:14 +0300 Subject: [PATCH 177/359] Make RLP optional in several crates (#466) --- ethbloom/Cargo.toml | 5 +++-- ethbloom/src/lib.rs | 2 ++ ethereum-types/Cargo.toml | 7 ++++--- ethereum-types/src/hash.rs | 6 ++++++ ethereum-types/src/uint.rs | 2 ++ 5 files changed, 17 insertions(+), 5 deletions(-) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index f99e61a6c..247d4bebe 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -14,7 +14,7 @@ tiny-keccak = { version = "2.0", features = ["keccak"] } crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } fixed-hash = { path = "../fixed-hash", version = "0.6", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3", default-features = false, optional = true } -impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } +impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "../primitive-types/impls/codec", default-features = false, optional = true } [dev-dependencies] @@ -23,11 +23,12 @@ rand = "0.7.2" hex-literal = "0.3.1" [features] -default = ["std", "serialize", "rustc-hex"] +default = ["std", "rlp", "serialize", "rustc-hex"] std = ["fixed-hash/std", "crunchy/std"] serialize = ["std", "impl-serde"] rustc-hex = ["fixed-hash/rustc-hex"] arbitrary = ["fixed-hash/arbitrary"] +rlp = ["impl-rlp"] codec = ["impl-codec"] [[bench]] diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index 7b18ec9ee..aa8993aae 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -56,6 +56,7 @@ use crunchy::unroll; use fixed_hash::*; #[cfg(feature = "codec")] use impl_codec::impl_fixed_hash_codec; +#[cfg(feature = "rlp")] use impl_rlp::impl_fixed_hash_rlp; #[cfg(feature = "serialize")] use impl_serde::impl_fixed_hash_serde; @@ -69,6 +70,7 @@ construct_fixed_hash! { /// Bloom hash type with 256 bytes (2048 bits) size. pub struct Bloom(BLOOM_SIZE); } +#[cfg(feature = "rlp")] impl_fixed_hash_rlp!(Bloom, BLOOM_SIZE); #[cfg(feature = "serialize")] impl_fixed_hash_serde!(Bloom, BLOOM_SIZE); diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 496a5f88a..e1384d116 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -11,17 +11,18 @@ edition = "2018" ethbloom = { path = "../ethbloom", version = "0.9", default-features = false } fixed-hash = { path = "../fixed-hash", version = "0.6", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.8", default-features = false } -primitive-types = { path = "../primitive-types", version = "0.7", features = ["rlp", "byteorder", "rustc-hex"], default-features = false } +primitive-types = { path = "../primitive-types", version = "0.7", features = ["byteorder", "rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.0", default-features = false, optional = true } -impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false } +impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "../primitive-types/impls/codec", default-features = false, optional = true } [dev-dependencies] serde_json = "1.0.41" [features] -default = ["std", "serialize"] +default = ["std", "rlp", "serialize"] std = ["uint-crate/std", "fixed-hash/std", "ethbloom/std", "primitive-types/std"] serialize = ["std", "impl-serde", "primitive-types/serde", "ethbloom/serialize"] arbitrary = ["ethbloom/arbitrary", "fixed-hash/arbitrary", "uint-crate/arbitrary"] +rlp = ["impl-rlp", "ethbloom/rlp"] codec = ["impl-codec", "ethbloom/codec"] diff --git a/ethereum-types/src/hash.rs b/ethereum-types/src/hash.rs index 25f00557d..68f435756 100644 --- a/ethereum-types/src/hash.rs +++ b/ethereum-types/src/hash.rs @@ -10,6 +10,7 @@ use crate::{U128, U256, U512, U64}; use fixed_hash::*; #[cfg(feature = "codec")] use impl_codec::impl_fixed_hash_codec; +#[cfg(feature = "rlp")] use impl_rlp::impl_fixed_hash_rlp; #[cfg(feature = "serialize")] use impl_serde::impl_fixed_hash_serde; @@ -22,6 +23,7 @@ pub trait BigEndianHash { } construct_fixed_hash! { pub struct H32(4); } +#[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H32, 4); #[cfg(feature = "serialize")] impl_fixed_hash_serde!(H32, 4); @@ -29,6 +31,7 @@ impl_fixed_hash_serde!(H32, 4); impl_fixed_hash_codec!(H32, 4); construct_fixed_hash! { pub struct H64(8); } +#[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H64, 8); #[cfg(feature = "serialize")] impl_fixed_hash_serde!(H64, 8); @@ -36,6 +39,7 @@ impl_fixed_hash_serde!(H64, 8); impl_fixed_hash_codec!(H64, 8); construct_fixed_hash! { pub struct H128(16); } +#[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H128, 16); #[cfg(feature = "serialize")] impl_fixed_hash_serde!(H128, 16); @@ -46,6 +50,7 @@ pub use primitive_types::H160; pub use primitive_types::H256; construct_fixed_hash! { pub struct H264(33); } +#[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H264, 33); #[cfg(feature = "serialize")] impl_fixed_hash_serde!(H264, 33); @@ -55,6 +60,7 @@ impl_fixed_hash_codec!(H264, 33); pub use primitive_types::H512; construct_fixed_hash! { pub struct H520(65); } +#[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H520, 65); #[cfg(feature = "serialize")] impl_fixed_hash_serde!(H520, 65); diff --git a/ethereum-types/src/uint.rs b/ethereum-types/src/uint.rs index 881e0ecd6..440221c16 100644 --- a/ethereum-types/src/uint.rs +++ b/ethereum-types/src/uint.rs @@ -8,6 +8,7 @@ #[cfg(feature = "codec")] use impl_codec::impl_uint_codec; +#[cfg(feature = "rlp")] use impl_rlp::impl_uint_rlp; #[cfg(feature = "serialize")] use impl_serde::impl_uint_serde; @@ -19,6 +20,7 @@ construct_uint! { /// Unsigned 64-bit integer. pub struct U64(1); } +#[cfg(feature = "rlp")] impl_uint_rlp!(U64, 1); #[cfg(feature = "serialize")] impl_uint_serde!(U64, 1); From 4978bf01aecf0ff32f77d60d5ae7c3bb0bbe0706 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 7 Dec 2020 13:55:40 +0100 Subject: [PATCH 178/359] bump versions and update changelogs (#463) --- contract-address/CHANGELOG.md | 4 +++- contract-address/Cargo.toml | 6 +++--- ethbloom/CHANGELOG.md | 2 ++ ethbloom/Cargo.toml | 4 ++-- ethereum-types/CHANGELOG.md | 2 ++ ethereum-types/Cargo.toml | 8 ++++---- keccak-hash/CHANGELOG.md | 2 ++ keccak-hash/Cargo.toml | 2 +- parity-crypto/CHANGELOG.md | 6 ++++-- parity-crypto/Cargo.toml | 4 ++-- parity-util-mem/CHANGELOG.md | 2 ++ parity-util-mem/Cargo.toml | 4 ++-- primitive-types/CHANGELOG.md | 1 + primitive-types/Cargo.toml | 4 ++-- primitive-types/impls/rlp/CHANGELOG.md | 4 +++- primitive-types/impls/rlp/Cargo.toml | 4 ++-- rlp/CHANGELOG.md | 2 ++ rlp/Cargo.toml | 4 ++-- transaction-pool/Cargo.toml | 2 +- triehash/Cargo.toml | 4 ++-- 20 files changed, 44 insertions(+), 27 deletions(-) diff --git a/contract-address/CHANGELOG.md b/contract-address/CHANGELOG.md index a9b2a3a0b..20714d132 100644 --- a/contract-address/CHANGELOG.md +++ b/contract-address/CHANGELOG.md @@ -1,10 +1,12 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated `ethereum-types` to 0.10. [#463](https://github.com/paritytech/parity-common/pull/463) ## [0.9.0] - 2020-03-16 - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/contract-address/Cargo.toml b/contract-address/Cargo.toml index b8a0b7993..6754f6d63 100644 --- a/contract-address/Cargo.toml +++ b/contract-address/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "contract-address" -version = "0.4.0" +version = "0.5.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -11,8 +11,8 @@ edition = "2018" readme = "README.md" [dependencies] -ethereum-types = { version = "0.9.0", path = "../ethereum-types" } -rlp = { version = "0.4", path = "../rlp" } +ethereum-types = { version = "0.10.0", path = "../ethereum-types" } +rlp = { version = "0.5", path = "../rlp" } keccak-hash = { version = "0.5", path = "../keccak-hash", default-features = false } [features] diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 67480095f..773f5785e 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) ## [0.9.2] - 2020-05-18 - Added `codec` feature. [#393](https://github.com/paritytech/parity-common/pull/393) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 247d4bebe..7d21efd5d 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.9.2" +version = "0.10.0" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT OR Apache-2.0" @@ -14,7 +14,7 @@ tiny-keccak = { version = "2.0", features = ["keccak"] } crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } fixed-hash = { path = "../fixed-hash", version = "0.6", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3", default-features = false, optional = true } -impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false, optional = true } +impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "../primitive-types/impls/codec", default-features = false, optional = true } [dev-dependencies] diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 058a2d34c..19795de50 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) ## [0.9.2] - 2020-05-18 - Added `codec` feature. [#393](https://github.com/paritytech/parity-common/pull/393) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index e1384d116..663b8da9a 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.9.2" +version = "0.10.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -8,12 +8,12 @@ description = "Ethereum types" edition = "2018" [dependencies] -ethbloom = { path = "../ethbloom", version = "0.9", default-features = false } +ethbloom = { path = "../ethbloom", version = "0.10", default-features = false } fixed-hash = { path = "../fixed-hash", version = "0.6", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.8", default-features = false } -primitive-types = { path = "../primitive-types", version = "0.7", features = ["byteorder", "rustc-hex"], default-features = false } +primitive-types = { path = "../primitive-types", version = "0.8", features = ["byteorder", "rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.0", default-features = false, optional = true } -impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.2", default-features = false, optional = true } +impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "../primitive-types/impls/codec", default-features = false, optional = true } [dev-dependencies] diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index e8580f5d2..77435e8fd 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated `primitive-types` to 0.8. [#463](https://github.com/paritytech/parity-common/pull/463) ## [0.5.1] - 2020-04-10 - Added `keccak256_range` and `keccak512_range` functions. [#370](https://github.com/paritytech/parity-common/pull/370) diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index c663c66c5..9f1e4523b 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -10,7 +10,7 @@ edition = "2018" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } -primitive-types = { path = "../primitive-types", version = "0.7", default-features = false } +primitive-types = { path = "../primitive-types", version = "0.8", default-features = false } [dev-dependencies] tempdir = "0.3.7" diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index 19e2174e4..42a24d6a5 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -5,7 +5,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -- Bump `rust-secp256k1` to v0.19, remove infrastructure for handling zero signatures (breaking). [#438](https://github.com/paritytech/parity-common/pull/438) +### Breaking +- Bump `rust-secp256k1` to v0.19, always allow zero signatures. [#438](https://github.com/paritytech/parity-common/pull/438) +- Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) ## [0.6.2] - 2020-06-19 - Put `Secret` memory on heap. [#400](https://github.com/paritytech/parity-common/pull/400) @@ -14,7 +16,7 @@ The format is based on [Keep a Changelog]. ## [0.6.1] - 2020-04-11 - Add `recover_allowing_all_zero_message()` and `ZeroesAllowedMessage` to accomodate ethereum's `ecrecover` builtin. [#369](https://github.com/paritytech/parity-common/pull/369) - + ## [0.6.0] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 4e23dafda..33ce92f6f 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-crypto" -version = "0.6.2" +version = "0.7.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Crypto utils used by ethstore and network." @@ -18,7 +18,7 @@ aes = "0.4.0" aes-ctr = "0.4.0" block-modes = "0.5.0" digest = "0.8" -ethereum-types = { version = "0.9.0", optional = true, path = "../ethereum-types" } +ethereum-types = { version = "0.10.0", optional = true, path = "../ethereum-types" } hmac = "0.7" lazy_static = { version = "1.0", optional = true } pbkdf2 = "0.3.0" diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index c1ba5fe12..b8dfe28f9 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -6,6 +6,8 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - Updated dlmalloc to 0.2.1. [#452](https://github.com/paritytech/parity-common/pull/452) +### Breaking +- Updated `ethereum-types` to 0.10. [#463](https://github.com/paritytech/parity-common/pull/463) ## [0.7.0] - 2020-06-24 - Added `const_size` to `MallocSizeOf` to optimize it for flat collections. [#398](https://github.com/paritytech/parity-common/pull/398) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 0baba5692..f2ccbfb09 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -26,9 +26,9 @@ parity-util-mem-derive = { path = "derive", version = "0.1" } impl-trait-for-tuples = "0.1.3" smallvec = { version = "1.0.0", optional = true } -ethereum-types = { version = "0.9.0", optional = true, path = "../ethereum-types" } +ethereum-types = { version = "0.10.0", optional = true, path = "../ethereum-types" } parking_lot = { version = "0.10.0", optional = true } -primitive-types = { version = "0.7", path = "../primitive-types", default-features = false, optional = true } +primitive-types = { version = "0.8", path = "../primitive-types", default-features = false, optional = true } [target.'cfg(target_os = "windows")'.dependencies] winapi = { version = "0.3.8", features = ["heapapi"] } diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 11242518f..d78c908d9 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Updated `impl-rlp` to `rlp` 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) ## [0.7.3] - 2020-11-12 - Added `scale_info` support. [#312](https://github.com/paritytech/parity-common/pull/312) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 243a4d31d..d881bce56 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.7.3" +version = "0.8.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -12,7 +12,7 @@ fixed-hash = { version = "0.6", path = "../fixed-hash", default-features = false uint = { version = "0.8.3", path = "../uint", default-features = false } impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } -impl-rlp = { version = "0.2", path = "impls/rlp", default-features = false, optional = true } +impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } scale-info = { version = "0.4", features = ["derive"], default-features = false, optional = true } [features] diff --git a/primitive-types/impls/rlp/CHANGELOG.md b/primitive-types/impls/rlp/CHANGELOG.md index 927c9dc9c..30dab8dfb 100644 --- a/primitive-types/impls/rlp/CHANGELOG.md +++ b/primitive-types/impls/rlp/CHANGELOG.md @@ -1,7 +1,9 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) diff --git a/primitive-types/impls/rlp/Cargo.toml b/primitive-types/impls/rlp/Cargo.toml index fbc12c7fb..622aa0f37 100644 --- a/primitive-types/impls/rlp/Cargo.toml +++ b/primitive-types/impls/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-rlp" -version = "0.2.1" +version = "0.3.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -8,7 +8,7 @@ description = "RLP serialization support for uint and fixed hash." edition = "2018" [dependencies] -rlp = { version = "0.4", path = "../../../rlp", default-features = false } +rlp = { version = "0.5", path = "../../../rlp", default-features = false } [features] default = ["std"] diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index 0eff86727..4f25e9b9e 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Use BytesMut for `RlpStream`'s backing buffer. [#453](https://github.com/paritytech/parity-common/pull/453) ## [0.4.6] - 2020-09-29 - Implement Encodable, Decodable for boxed types. [#427](https://github.com/paritytech/parity-common/pull/427) diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index ebec7d305..9a06199eb 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rlp" -version = "0.4.6" +version = "0.5.0" description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" @@ -14,7 +14,7 @@ rustc-hex = { version = "2.0.1", default-features = false } [dev-dependencies] criterion = "0.3.0" hex-literal = "0.3.1" -primitive-types = { path = "../primitive-types", version = "0.7", features = ["impl-rlp"] } +primitive-types = { path = "../primitive-types", version = "0.8", features = ["impl-rlp"] } [features] default = ["std"] diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index a13bc5767..a0de36b27 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -13,4 +13,4 @@ smallvec = "0.6.10" trace-time = { path = "../trace-time", version = "0.1" } [dev-dependencies] -ethereum-types = { version = "0.9.0", path = "../ethereum-types" } +ethereum-types = { version = "0.10.0", path = "../ethereum-types" } diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index e50537e76..acceca2f5 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -9,12 +9,12 @@ edition = "2018" [dependencies] hash-db = { version = "0.15.2", default-features = false } -rlp = { version = "0.4", path = "../rlp", default-features = false } +rlp = { version = "0.5", path = "../rlp", default-features = false } [dev-dependencies] criterion = "0.3.0" keccak-hasher = "0.15.2" -ethereum-types = { version = "0.9.0", path = "../ethereum-types" } +ethereum-types = { version = "0.10.0", path = "../ethereum-types" } tiny-keccak = { version = "2.0", features = ["keccak"] } trie-standardmap = "0.15.2" hex-literal = "0.3.1" From 64dd79e45dafba237a9fcc707c069beb6b73c3c4 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 7 Dec 2020 15:04:36 +0100 Subject: [PATCH 179/359] update other dependencies (#471) --- kvdb-web/Cargo.toml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 50420598a..70080ae9a 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -9,17 +9,17 @@ license = "MIT OR Apache-2.0" edition = "2018" [dependencies] -wasm-bindgen = "0.2.54" -js-sys = "0.3.31" +wasm-bindgen = "0.2.69" +js-sys = "0.3.46" kvdb = { version = "0.7", path = "../kvdb" } kvdb-memorydb = { version = "0.7", path = "../kvdb-memorydb" } -futures = "0.3" -log = "0.4.8" +futures = "0.3.8" +log = "0.4.11" send_wrapper = "0.5.0" parity-util-mem = { path = "../parity-util-mem", version = "0.7", default-features = false } [dependencies.web-sys] -version = "0.3.31" +version = "0.3.46" features = [ 'console', 'Window', @@ -41,5 +41,5 @@ features = [ [dev-dependencies] console_log = "0.2.0" kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.5" } -wasm-bindgen-test = "0.3.4" -wasm-bindgen-futures = "0.4.4" +wasm-bindgen-test = "0.3.19" +wasm-bindgen-futures = "0.4.19" From 54076a524e5d28405b3a4ad3146a108c928998a2 Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Tue, 8 Dec 2020 14:27:30 +0300 Subject: [PATCH 180/359] ethereum-types/rlp should pull primitive-types/rlp (#474) --- ethereum-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 663b8da9a..e3b413f20 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -24,5 +24,5 @@ default = ["std", "rlp", "serialize"] std = ["uint-crate/std", "fixed-hash/std", "ethbloom/std", "primitive-types/std"] serialize = ["std", "impl-serde", "primitive-types/serde", "ethbloom/serialize"] arbitrary = ["ethbloom/arbitrary", "fixed-hash/arbitrary", "uint-crate/arbitrary"] -rlp = ["impl-rlp", "ethbloom/rlp"] +rlp = ["impl-rlp", "ethbloom/rlp", "primitive-types/rlp"] codec = ["impl-codec", "ethbloom/codec"] From 7a54f5ba926acce75d6e5d201a7054fad1717e6a Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Wed, 9 Dec 2020 19:08:23 +0300 Subject: [PATCH 181/359] uint: optimize FromStr, make it no_std-compatible (#468) * Add from_str bench * uint: optimize FromStr, make it no_std-compatible * custom error type * fmt::Display is actually available in core * Error::source for FromHexError * uppercase consts * additional tests --- uint/Cargo.toml | 4 +- uint/benches/bigint.rs | 11 +++++ uint/src/lib.rs | 2 +- uint/src/uint.rs | 89 ++++++++++++++++++++++++++-------------- uint/tests/uint_tests.rs | 4 ++ 5 files changed, 76 insertions(+), 34 deletions(-) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 05c208ed4..b27645ac6 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -14,13 +14,13 @@ byteorder = { version = "1.3.2", default-features = false } crunchy = { version = "0.2.2", default-features = false } qc = { package = "quickcheck", version = "0.9.0", optional = true } rand = { version = "0.7.2", default-features = false, optional = true } -rustc-hex = { version = "2.0.1", default-features = false } +hex = { version = "0.4", default-features = false } static_assertions = "1.0.0" arbitrary = { version = "0.4", optional = true } [features] default = ["std"] -std = ["byteorder/std", "rustc-hex/std", "crunchy/std"] +std = ["byteorder/std", "crunchy/std", "hex/std"] quickcheck = ["qc", "rand"] [[example]] diff --git a/uint/benches/bigint.rs b/uint/benches/bigint.rs index c7767c7ae..a79c01d8e 100644 --- a/uint/benches/bigint.rs +++ b/uint/benches/bigint.rs @@ -71,6 +71,7 @@ criterion_group!( u128_mul, u128_div, from_fixed_array, + from_str, ); criterion_main!(bigint); @@ -642,3 +643,13 @@ fn from_fixed_array(c: &mut Criterion) { }) }); } + +fn from_str(c: &mut Criterion) { + c.bench_function("from_str", move |b| { + b.iter(|| { + black_box(U512::from_str(black_box("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")).unwrap()); + black_box(U512::from_str(black_box("0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")).unwrap()); + black_box(U512::from_str(black_box("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")).unwrap()); + }) + }); +} diff --git a/uint/src/lib.rs b/uint/src/lib.rs index 7da1f24a5..0aedc11d8 100644 --- a/uint/src/lib.rs +++ b/uint/src/lib.rs @@ -19,7 +19,7 @@ pub use byteorder; pub use core as core_; #[doc(hidden)] -pub use rustc_hex; +pub use hex; #[cfg(feature = "quickcheck")] #[doc(hidden)] diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 84cc8d9a5..0d7a0dcaf 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -29,6 +29,8 @@ //! implementations for even more speed, hidden behind the `x64_arithmetic` //! feature flag. +use core::fmt; + /// Conversion from decimal string error #[derive(Debug, PartialEq)] pub enum FromDecStrErr { @@ -38,9 +40,8 @@ pub enum FromDecStrErr { InvalidLength, } -#[cfg(feature = "std")] -impl std::fmt::Display for FromDecStrErr { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for FromDecStrErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{}", @@ -55,6 +56,31 @@ impl std::fmt::Display for FromDecStrErr { #[cfg(feature = "std")] impl std::error::Error for FromDecStrErr {} +#[derive(Debug)] +pub struct FromHexError { + inner: hex::FromHexError, +} + +impl fmt::Display for FromHexError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.inner) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for FromHexError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&self.inner) + } +} + +#[doc(hidden)] +impl From for FromHexError { + fn from(inner: hex::FromHexError) -> Self { + Self { inner } + } +} + #[macro_export] #[doc(hidden)] macro_rules! impl_map_from { @@ -1550,31 +1576,34 @@ macro_rules! construct_uint { } } - $crate::impl_std_for_uint!($name, $n_words); - // `$n_words * 8` because macro expects bytes and - // uints use 64 bit (8 byte) words - $crate::impl_quickcheck_arbitrary_for_uint!($name, ($n_words * 8)); - $crate::impl_arbitrary_for_uint!($name, ($n_words * 8)); - } -} - -#[cfg(feature = "std")] -#[macro_export] -#[doc(hidden)] -macro_rules! impl_std_for_uint { - ($name: ident, $n_words: tt) => { impl $crate::core_::str::FromStr for $name { - type Err = $crate::rustc_hex::FromHexError; + type Err = $crate::FromHexError; fn from_str(value: &str) -> $crate::core_::result::Result<$name, Self::Err> { - use $crate::rustc_hex::FromHex; - let bytes: Vec = match value.len() % 2 == 0 { - true => value.from_hex()?, - false => ("0".to_owned() + value).from_hex()?, - }; + const BYTES_LEN: usize = $n_words * 8; + const MAX_ENCODED_LEN: usize = BYTES_LEN * 2; + + let mut bytes = [0_u8; BYTES_LEN]; + + let encoded = value.as_bytes(); - if $n_words * 8 < bytes.len() { - return Err(Self::Err::InvalidHexLength); + if encoded.len() > MAX_ENCODED_LEN { + return Err($crate::hex::FromHexError::InvalidStringLength.into()); + } + + if encoded.len() % 2 == 0 { + let out = &mut bytes[BYTES_LEN - encoded.len() / 2..]; + + $crate::hex::decode_to_slice(encoded, out).map_err(Self::Err::from)?; + } else { + // Prepend '0' by overlaying our value on a scratch buffer filled with '0' characters. + let mut s = [b'0'; MAX_ENCODED_LEN]; + s[MAX_ENCODED_LEN - encoded.len()..].copy_from_slice(encoded); + let encoded = &s[MAX_ENCODED_LEN - encoded.len() - 1..]; + + let out = &mut bytes[BYTES_LEN - encoded.len() / 2..]; + + $crate::hex::decode_to_slice(encoded, out).map_err(Self::Err::from)?; } let bytes_ref: &[u8] = &bytes; @@ -1587,14 +1616,12 @@ macro_rules! impl_std_for_uint { s.parse().unwrap() } } - }; -} -#[cfg(not(feature = "std"))] -#[macro_export] -#[doc(hidden)] -macro_rules! impl_std_for_uint { - ($name: ident, $n_words: tt) => {}; + // `$n_words * 8` because macro expects bytes and + // uints use 64 bit (8 byte) words + $crate::impl_quickcheck_arbitrary_for_uint!($name, ($n_words * 8)); + $crate::impl_arbitrary_for_uint!($name, ($n_words * 8)); + } } #[cfg(feature = "quickcheck")] diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 9319b40e1..539f5f23f 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -134,6 +134,9 @@ fn uint256_from() { // test initializtion from string let sa = U256::from_str("0a").unwrap(); assert_eq!(e, sa); + assert_eq!(U256([0, 0, 0, 0]), U256::from_str("").unwrap()); + assert_eq!(U256([0x1, 0, 0, 0]), U256::from_str("1").unwrap()); + assert_eq!(U256([0x101, 0, 0, 0]), U256::from_str("101").unwrap()); assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_str("1010").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("12f0").unwrap()); assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_str("0000000012f0").unwrap()); @@ -145,6 +148,7 @@ fn uint256_from() { // This string contains more bits than what fits in a U256. assert!(U256::from_str("000000000000000000000000000000000000000000000000000000000000000000").is_err()); + assert!(U256::from_str("100000000000000000000000000000000000000000000000000000000000000000").is_err()); } #[test] From 129912cb440b98f7f20f92b16187a03e021663fa Mon Sep 17 00:00:00 2001 From: Frost Red Lee Date: Thu, 10 Dec 2020 18:57:04 +0800 Subject: [PATCH 182/359] kvdb-rocksdb: replace tempdir with tempfile(#350) (#477) --- kvdb-rocksdb/Cargo.toml | 2 +- kvdb-rocksdb/examples/memtest.rs | 2 +- kvdb-rocksdb/src/lib.rs | 22 +++++++++++----------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 63d033db6..3ad8b0d77 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -29,7 +29,7 @@ criterion = "0.3" ethereum-types = { path = "../ethereum-types" } kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.5" } rand = "0.7.2" -tempdir = "0.3.7" +tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } sysinfo = "0.15.3" ctrlc = "3.1.4" diff --git a/kvdb-rocksdb/examples/memtest.rs b/kvdb-rocksdb/examples/memtest.rs index 3b44de4ee..54c031c5f 100644 --- a/kvdb-rocksdb/examples/memtest.rs +++ b/kvdb-rocksdb/examples/memtest.rs @@ -99,7 +99,7 @@ fn main() { for c in 0..=COLUMN_COUNT { config.memory_budget.insert(c, mb_per_col); } - let dir = tempdir::TempDir::new("rocksdb-example").unwrap(); + let dir = tempfile::Builder::new().prefix("rocksdb-example").tempdir().unwrap(); println!("Database is put in: {} (maybe check if it was deleted)", dir.path().to_string_lossy()); let db = Database::open(&config, &dir.path().to_string_lossy()).unwrap(); diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index d4da1da57..321e1d118 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -788,10 +788,10 @@ mod tests { use super::*; use kvdb_shared_tests as st; use std::io::{self, Read}; - use tempdir::TempDir; + use tempfile::Builder as TempfileBuilder; fn create(columns: u32) -> io::Result { - let tempdir = TempDir::new("")?; + let tempdir = TempfileBuilder::new().prefix("").tempdir()?; let config = DatabaseConfig::with_columns(columns); Database::open(&config, tempdir.path().to_str().expect("tempdir path is valid unicode")) } @@ -846,7 +846,7 @@ mod tests { #[test] fn secondary_db_get() -> io::Result<()> { - let primary = TempDir::new("")?; + let primary = TempfileBuilder::new().prefix("").tempdir()?; let config = DatabaseConfig::with_columns(1); let db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; @@ -856,7 +856,7 @@ mod tests { db.write(transaction)?; let config = DatabaseConfig { - secondary: TempDir::new("")?.path().to_str().map(|s| s.to_string()), + secondary: TempfileBuilder::new().prefix("").tempdir()?.path().to_str().map(|s| s.to_string()), ..DatabaseConfig::with_columns(1) }; let second_db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; @@ -866,12 +866,12 @@ mod tests { #[test] fn secondary_db_catch_up() -> io::Result<()> { - let primary = TempDir::new("")?; + let primary = TempfileBuilder::new().prefix("").tempdir()?; let config = DatabaseConfig::with_columns(1); let db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; let config = DatabaseConfig { - secondary: TempDir::new("")?.path().to_str().map(|s| s.to_string()), + secondary: TempfileBuilder::new().prefix("").tempdir()?.path().to_str().map(|s| s.to_string()), ..DatabaseConfig::with_columns(1) }; let second_db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; @@ -888,7 +888,7 @@ mod tests { #[test] fn mem_tables_size() { - let tempdir = TempDir::new("").unwrap(); + let tempdir = TempfileBuilder::new().prefix("").tempdir().unwrap(); let config = DatabaseConfig { max_open_files: 512, @@ -950,7 +950,7 @@ mod tests { let config_1 = DatabaseConfig::default(); let config_5 = DatabaseConfig::with_columns(5); - let tempdir = TempDir::new("").unwrap(); + let tempdir = TempfileBuilder::new().prefix("").tempdir().unwrap(); // open 1, add 4. { @@ -975,7 +975,7 @@ mod tests { let config_1 = DatabaseConfig::default(); let config_5 = DatabaseConfig::with_columns(5); - let tempdir = TempDir::new("drop_columns").unwrap(); + let tempdir = TempfileBuilder::new().prefix("drop_columns").tempdir().unwrap(); // open 5, remove 4. { @@ -997,7 +997,7 @@ mod tests { #[test] fn test_num_keys() { - let tempdir = TempDir::new("").unwrap(); + let tempdir = TempfileBuilder::new().prefix("").tempdir().unwrap(); let config = DatabaseConfig::with_columns(1); let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); @@ -1059,7 +1059,7 @@ rocksdb.db.get.micros P50 : 2.000000 P95 : 3.000000 P99 : 4.000000 P100 : 5.0000 cfg.compaction.initial_file_size = 102030; cfg.memory_budget = [(0, 30), (1, 300)].iter().cloned().collect(); - let db_path = TempDir::new("config_test").expect("the OS can create tmp dirs"); + let db_path = TempfileBuilder::new().prefix("config_test").tempdir().expect("the OS can create tmp dirs"); let db = Database::open(&cfg, db_path.path().to_str().unwrap()).expect("can open a db"); let mut rocksdb_log = std::fs::File::open(format!("{}/LOG", db_path.path().to_str().unwrap())) .expect("rocksdb creates a LOG file"); From 763c967746f5379bd06428388610713e66ec29d5 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Sun, 13 Dec 2020 21:27:22 +0100 Subject: [PATCH 183/359] chore(deps): cargo upgrade parking_lot --all (#470) * chore(deps): cargo upgrade parking_lot --all * chore(deps): bump versions breaking change. * chore: update changelog * kvdb * kvdb-memorydb * kvdb-rocksdb * parity-util-mem * fix nits * fix: kvdb-web add missing changelog entry * fix: bad merge * fix more nits: use breaking label * [kvdb-memorydb]: add `wasm-bindgen` feature flag * grumbles: remove `wasm-bindgen` feature flag * Add hack only in `kvdb-web` * Remove feature flag `wasm-bindgen` from `kvdb-memorydb` --- kvdb-memorydb/CHANGELOG.md | 2 ++ kvdb-memorydb/Cargo.toml | 13 ++++++++----- kvdb-rocksdb/CHANGELOG.md | 6 ++++-- kvdb-rocksdb/Cargo.toml | 10 +++++----- kvdb-shared-tests/Cargo.toml | 4 ++-- kvdb-web/CHANGELOG.md | 2 ++ kvdb-web/Cargo.toml | 14 +++++++++----- kvdb/CHANGELOG.md | 2 ++ kvdb/Cargo.toml | 4 ++-- parity-util-mem/CHANGELOG.md | 2 ++ parity-util-mem/Cargo.toml | 4 ++-- 11 files changed, 40 insertions(+), 23 deletions(-) diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index 89b2a4ed0..a74210497 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) ## [0.7.0] - 2020-06-24 - Updated `kvdb` to 0.7. [#402](https://github.com/paritytech/parity-common/pull/402) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 1fc78291e..7cf540bd9 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.7.0" +version = "0.8.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -8,9 +8,12 @@ license = "MIT OR Apache-2.0" edition = "2018" [dependencies] -parity-util-mem = { path = "../parity-util-mem", version = "0.7", default-features = false, features = ["std"] } -parking_lot = "0.10.0" -kvdb = { version = "0.7", path = "../kvdb" } +parity-util-mem = { path = "../parity-util-mem", version = "0.8", default-features = false, features = ["std"] } +parking_lot = "0.11.1" +kvdb = { version = "0.8", path = "../kvdb" } [dev-dependencies] -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.5" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.6" } + +[features] +default = [] diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index acf6282fb..a2f14fcd7 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,10 +5,12 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) ## [0.9.1] - 2020-08-26 - Updated rocksdb to 0.15. [#424](https://github.com/paritytech/parity-common/pull/424) -- Set `format_version` to 5. [#395](https://github.com/paritytech/parity-common/pull/395) +- Set `format_version` to 5. [#395](https://github.com/paritytech/parity-common/pull/395) ## [0.9.0] - 2020-06-24 - Updated `kvdb` to 0.7. [#402](https://github.com/paritytech/parity-common/pull/402) @@ -18,7 +20,7 @@ The format is based on [Keep a Changelog]. ### Breaking - Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) - Rename and optimize prefix iteration. [#365](https://github.com/paritytech/parity-common/pull/365) -- Added Secondary Instance API. [#384](https://github.com/paritytech/parity-common/pull/384) +- Added Secondary Instance API. [#384](https://github.com/paritytech/parity-common/pull/384) ## [0.7.0] - 2020-03-16 - Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 3ad8b0d77..e623914d9 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.9.1" +version = "0.10.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -14,20 +14,20 @@ harness = false [dependencies] smallvec = "1.0.0" fs-swap = "0.2.4" -kvdb = { path = "../kvdb", version = "0.7" } +kvdb = { path = "../kvdb", version = "0.8" } log = "0.4.8" num_cpus = "1.10.1" -parking_lot = "0.10.0" +parking_lot = "0.11.1" regex = "1.3.1" rocksdb = { version = "0.15", features = ["snappy"], default-features = false } owning_ref = "0.4.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.7", default-features = false, features = ["std", "smallvec"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.8", default-features = false, features = ["std", "smallvec"] } [dev-dependencies] alloc_counter = "0.0.4" criterion = "0.3" ethereum-types = { path = "../ethereum-types" } -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.5" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.6" } rand = "0.7.2" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index 4e531d5fc..14693f52d 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "kvdb-shared-tests" -version = "0.5.0" +version = "0.6.0" authors = ["Parity Technologies "] edition = "2018" description = "Shared tests for kvdb functionality, to be executed against actual implementations" license = "MIT OR Apache-2.0" [dependencies] -kvdb = { path = "../kvdb", version = "0.7" } +kvdb = { path = "../kvdb", version = "0.8" } diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index 64dd9ba34..ba028010b 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) ## [0.7.0] - 2020-07-06 - Updated `kvdb` to 0.7.0 [#404](https://github.com/paritytech/parity-common/pull/404) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 70080ae9a..7790262f4 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-web" -version = "0.7.0" +version = "0.8.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" @@ -11,12 +11,16 @@ edition = "2018" [dependencies] wasm-bindgen = "0.2.69" js-sys = "0.3.46" -kvdb = { version = "0.7", path = "../kvdb" } -kvdb-memorydb = { version = "0.7", path = "../kvdb-memorydb" } +kvdb = { version = "0.8", path = "../kvdb" } +kvdb-memorydb = { version = "0.8", path = "../kvdb-memorydb" } futures = "0.3.8" log = "0.4.11" send_wrapper = "0.5.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.7", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.8", default-features = false } +# TODO: https://github.com/paritytech/parity-common/issues/479 +# This is hack to enable `wasm-bindgen` feature of `parking_lot` in other dependencies. +# Thus, it's not direct dependency and do not remove until a proper fix exists. +parking_lot = { version = "0.11.1", features = ["wasm-bindgen"] } [dependencies.web-sys] version = "0.3.46" @@ -40,6 +44,6 @@ features = [ [dev-dependencies] console_log = "0.2.0" -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.5" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.6" } wasm-bindgen-test = "0.3.19" wasm-bindgen-futures = "0.4.19" diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index e67725cd7..3577a4a82 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated `parity-util-mem` to 0.8. [#470](https://github.com/paritytech/parity-common/pull/470) ## [0.7.0] - 2020-06-24 - Updated `parity-util-mem` to 0.7. [#402](https://github.com/paritytech/parity-common/pull/402) diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 1accfb38b..4cf0c7657 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.7.0" +version = "0.8.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" @@ -9,4 +9,4 @@ edition = "2018" [dependencies] smallvec = "1.0.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.7", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.8", default-features = false } diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index b8dfe28f9..65d4b0860 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -6,8 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - Updated dlmalloc to 0.2.1. [#452](https://github.com/paritytech/parity-common/pull/452) + ### Breaking - Updated `ethereum-types` to 0.10. [#463](https://github.com/paritytech/parity-common/pull/463) +- Updated `parking_lot` to 0.11.1. [#470](https://github.com/paritytech/parity-common/pull/470) ## [0.7.0] - 2020-06-24 - Added `const_size` to `MallocSizeOf` to optimize it for flat collections. [#398](https://github.com/paritytech/parity-common/pull/398) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index f2ccbfb09..09a8d1c7c 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.7.0" +version = "0.8.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" @@ -27,7 +27,7 @@ impl-trait-for-tuples = "0.1.3" smallvec = { version = "1.0.0", optional = true } ethereum-types = { version = "0.10.0", optional = true, path = "../ethereum-types" } -parking_lot = { version = "0.10.0", optional = true } +parking_lot = { version = "0.11.1", optional = true } primitive-types = { version = "0.8", path = "../primitive-types", default-features = false, optional = true } [target.'cfg(target_os = "windows")'.dependencies] From 935a92cd28130be0a4af587ae9d4d057341cc820 Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Wed, 23 Dec 2020 23:19:32 +0300 Subject: [PATCH 184/359] Bump bytes to 1.0 (#482) --- rlp/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 9a06199eb..6574b3ba2 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -8,7 +8,7 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -bytes = { version = "0.6", default-features = false } +bytes = { version = "1", default-features = false } rustc-hex = { version = "2.0.1", default-features = false } [dev-dependencies] From da4262d6d9434b745efe395563b39e5340089f25 Mon Sep 17 00:00:00 2001 From: Sam Wilson <57262657+SamWilsn@users.noreply.github.com> Date: Fri, 25 Dec 2020 15:33:17 -0500 Subject: [PATCH 185/359] Implement Num from num-traits (#480) --- ethereum-types/Cargo.toml | 1 + ethereum-types/src/lib.rs | 2 + primitive-types/Cargo.toml | 2 + primitive-types/impls/num-traits/Cargo.toml | 16 +++ primitive-types/impls/num-traits/src/lib.rs | 49 +++++++++ primitive-types/src/lib.rs | 11 ++ uint/src/uint.rs | 108 ++++++++++++++++++++ 7 files changed, 189 insertions(+) create mode 100644 primitive-types/impls/num-traits/Cargo.toml create mode 100644 primitive-types/impls/num-traits/src/lib.rs diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index e3b413f20..544d6fa69 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -26,3 +26,4 @@ serialize = ["std", "impl-serde", "primitive-types/serde", "ethbloom/serialize"] arbitrary = ["ethbloom/arbitrary", "fixed-hash/arbitrary", "uint-crate/arbitrary"] rlp = ["impl-rlp", "ethbloom/rlp", "primitive-types/rlp"] codec = ["impl-codec", "ethbloom/codec"] +num-traits = ["primitive-types/num-traits"] diff --git a/ethereum-types/src/lib.rs b/ethereum-types/src/lib.rs index d94ae57c0..ce84e3731 100644 --- a/ethereum-types/src/lib.rs +++ b/ethereum-types/src/lib.rs @@ -13,6 +13,8 @@ mod uint; pub use ethbloom::{Bloom, BloomRef, Input as BloomInput}; pub use hash::{BigEndianHash, H128, H160, H256, H264, H32, H512, H520, H64}; +#[cfg(feature = "num-traits")] +pub use primitive_types::{FromStrRadixErr, FromStrRadixErrKind}; pub use uint::{FromDecStrErr, U128, U256, U512, U64}; pub type Address = H160; diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index d881bce56..4c691d812 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -12,6 +12,7 @@ fixed-hash = { version = "0.6", path = "../fixed-hash", default-features = false uint = { version = "0.8.3", path = "../uint", default-features = false } impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } +impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } scale-info = { version = "0.4", features = ["derive"], default-features = false, optional = true } @@ -26,6 +27,7 @@ codec = ["impl-codec"] rlp = ["impl-rlp"] arbitrary = ["fixed-hash/arbitrary", "uint/arbitrary"] fp-conversion = ["std"] +num-traits = ["impl-num-traits"] [[test]] name = "scale_info" diff --git a/primitive-types/impls/num-traits/Cargo.toml b/primitive-types/impls/num-traits/Cargo.toml new file mode 100644 index 000000000..fed7c898e --- /dev/null +++ b/primitive-types/impls/num-traits/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "impl-num-traits" +version = "0.1.0" +authors = ["Parity Technologies "] +license = "MIT OR Apache-2.0" +homepage = "https://github.com/paritytech/parity-common" +description = "num-traits implementation for uint." +edition = "2018" + +[dependencies] +num-traits = { version = "0.2", default-features = false } +uint = { version = "0.8.5", path = "../../../uint", default-features = false } + +[features] +default = ["std"] +std = ["num-traits/std", "uint/std"] diff --git a/primitive-types/impls/num-traits/src/lib.rs b/primitive-types/impls/num-traits/src/lib.rs new file mode 100644 index 000000000..d5e5e5d8b --- /dev/null +++ b/primitive-types/impls/num-traits/src/lib.rs @@ -0,0 +1,49 @@ +// Copyright 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! num-traits support for uint. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[doc(hidden)] +pub use num_traits; + +pub use uint::{FromStrRadixErr, FromStrRadixErrKind}; + +/// Add num-traits support to an integer created by `construct_uint!`. +#[macro_export] +macro_rules! impl_uint_num_traits { + ($name: ident, $len: expr) => { + impl $crate::num_traits::identities::Zero for $name { + #[inline] + fn zero() -> Self { + Self::zero() + } + + #[inline] + fn is_zero(&self) -> bool { + self.is_zero() + } + } + + impl $crate::num_traits::identities::One for $name { + #[inline] + fn one() -> Self { + Self::one() + } + } + + impl $crate::num_traits::Num for $name { + type FromStrRadixErr = $crate::FromStrRadixErr; + + fn from_str_radix(txt: &str, radix: u32) -> Result { + Self::from_str_radix(txt, radix) + } + } + }; +} diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index fe1eb4ac7..bf9acbdc3 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -22,6 +22,7 @@ use fixed_hash::{construct_fixed_hash, impl_fixed_hash_conversions}; #[cfg(feature = "scale-info")] use scale_info::TypeInfo; use uint::{construct_uint, uint_full_mul_reg}; +pub use uint::{FromStrRadixErr, FromStrRadixErrKind}; /// Error type for conversion. #[derive(Debug, PartialEq, Eq)] @@ -68,6 +69,16 @@ construct_fixed_hash! { pub struct H512(64); } +#[cfg(feature = "num-traits")] +mod num_traits { + use super::*; + use impl_num_traits::impl_uint_num_traits; + + impl_uint_num_traits!(U128, 2); + impl_uint_num_traits!(U256, 4); + impl_uint_num_traits!(U512, 8); +} + #[cfg(feature = "impl-serde")] mod serde { use super::*; diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 0d7a0dcaf..e6b99e537 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -31,6 +31,102 @@ use core::fmt; +/// A list of error categories encountered when parsing numbers. +#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)] +#[non_exhaustive] +pub enum FromStrRadixErrKind { + /// A character in the input string is not valid for the given radix. + InvalidCharacter, + + /// The input length is not valid for the given radix. + InvalidLength, + + /// The given radix is not supported. + UnsupportedRadix, +} + +#[derive(Debug)] +enum FromStrRadixErrSrc { + Hex(FromHexError), + Dec(FromDecStrErr), +} + +impl fmt::Display for FromStrRadixErrSrc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + FromStrRadixErrSrc::Dec(d) => write!(f, "{}", d), + FromStrRadixErrSrc::Hex(h) => write!(f, "{}", h), + } + } +} + +/// The error type for parsing numbers from strings. +#[derive(Debug)] +pub struct FromStrRadixErr { + kind: FromStrRadixErrKind, + source: Option, +} + +impl FromStrRadixErr { + #[doc(hidden)] + pub fn unsupported() -> Self { + Self { kind: FromStrRadixErrKind::UnsupportedRadix, source: None } + } + + /// Returns the corresponding `FromStrRadixErrKind` for this error. + pub fn kind(&self) -> FromStrRadixErrKind { + self.kind + } +} + +impl fmt::Display for FromStrRadixErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(ref src) = self.source { + return write!(f, "{}", src); + } + + match self.kind { + FromStrRadixErrKind::UnsupportedRadix => write!(f, "the given radix is not supported"), + FromStrRadixErrKind::InvalidCharacter => write!(f, "input contains an invalid character"), + FromStrRadixErrKind::InvalidLength => write!(f, "length not supported for radix or type"), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for FromStrRadixErr { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self.source { + Some(FromStrRadixErrSrc::Dec(ref d)) => Some(d), + Some(FromStrRadixErrSrc::Hex(ref h)) => Some(h), + None => None, + } + } +} + +impl From for FromStrRadixErr { + fn from(e: FromDecStrErr) -> Self { + let kind = match e { + FromDecStrErr::InvalidCharacter => FromStrRadixErrKind::InvalidCharacter, + FromDecStrErr::InvalidLength => FromStrRadixErrKind::InvalidLength, + }; + + Self { kind, source: Some(FromStrRadixErrSrc::Dec(e)) } + } +} + +impl From for FromStrRadixErr { + fn from(e: FromHexError) -> Self { + let kind = match e.inner { + hex::FromHexError::InvalidHexCharacter { .. } => FromStrRadixErrKind::InvalidCharacter, + hex::FromHexError::InvalidStringLength => FromStrRadixErrKind::InvalidLength, + hex::FromHexError::OddLength => FromStrRadixErrKind::InvalidLength, + }; + + Self { kind, source: Some(FromStrRadixErrSrc::Hex(e)) } + } +} + /// Conversion from decimal string error #[derive(Debug, PartialEq)] pub enum FromDecStrErr { @@ -493,6 +589,18 @@ macro_rules! construct_uint { /// Maximum value. pub const MAX: $name = $name([u64::max_value(); $n_words]); + /// Converts a string slice in a given base to an integer. Only supports radixes of 10 + /// and 16. + pub fn from_str_radix(txt: &str, radix: u32) -> Result { + let parsed = match radix { + 10 => Self::from_dec_str(txt)?, + 16 => core::str::FromStr::from_str(txt)?, + _ => return Err($crate::FromStrRadixErr::unsupported()), + }; + + Ok(parsed) + } + /// Convert from a decimal string. pub fn from_dec_str(value: &str) -> $crate::core_::result::Result { if !value.bytes().all(|b| b >= 48 && b <= 57) { From 65609490cd017f7a8e5fe5c5b20a49a314371911 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 28 Dec 2020 13:01:08 +0100 Subject: [PATCH 186/359] parity-crypto: remove UB test (#484) * parity-crypto: remove UB test * rlp: fix unused import --- parity-crypto/src/hmac/test.rs | 18 ------------------ rlp/src/traits.rs | 2 -- 2 files changed, 20 deletions(-) diff --git a/parity-crypto/src/hmac/test.rs b/parity-crypto/src/hmac/test.rs index a13174784..8f71bd4a9 100644 --- a/parity-crypto/src/hmac/test.rs +++ b/parity-crypto/src/hmac/test.rs @@ -232,21 +232,3 @@ fn ietf_test_vectors() { ), ); } - -#[test] -fn secrets_are_zeroed_on_drop() { - let ptr: *const KeyInner; - let zeros = KeyInner::Sha256(DisposableBox::from_slice(&[0u8; 6][..])); - let expected = KeyInner::Sha256(DisposableBox::from_slice(b"sikrit")); - { - let secret = b"sikrit"; - let signing_key = SigKey::sha256(secret); - ptr = &signing_key.0; - unsafe { - assert_eq!(*ptr, expected); - } - } - unsafe { - assert_eq!(*ptr, zeros); - } -} diff --git a/rlp/src/traits.rs b/rlp/src/traits.rs index e96cf4c46..b83b2ddcb 100644 --- a/rlp/src/traits.rs +++ b/rlp/src/traits.rs @@ -7,8 +7,6 @@ // except according to those terms. //! Common RLP traits -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; use bytes::BytesMut; use crate::{error::DecoderError, rlpin::Rlp, stream::RlpStream}; From f9d468edf367922e2300e042222fd305fa0424c4 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 28 Dec 2020 13:02:07 +0100 Subject: [PATCH 187/359] parity-crypto: upgrade deps (#483) --- parity-crypto/Cargo.toml | 32 ++++++++++++++++---------------- parity-crypto/src/aes.rs | 4 ++-- parity-crypto/src/digest.rs | 12 ++++++------ parity-crypto/src/error.rs | 6 +++--- parity-crypto/src/hmac/mod.rs | 14 +++++++------- parity-crypto/src/pbkdf2/mod.rs | 4 ++-- 6 files changed, 36 insertions(+), 36 deletions(-) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 33ce92f6f..97bfe1581 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -14,26 +14,26 @@ harness = false required-features = ["publickey"] [dependencies] -aes = "0.4.0" -aes-ctr = "0.4.0" -block-modes = "0.5.0" -digest = "0.8" +aes = "0.6.0" +aes-ctr = "0.6.0" +block-modes = "0.7.0" +digest = "0.9.0" ethereum-types = { version = "0.10.0", optional = true, path = "../ethereum-types" } -hmac = "0.7" -lazy_static = { version = "1.0", optional = true } -pbkdf2 = "0.3.0" -rand = "0.7.2" -ripemd160 = "0.8.0" +hmac = "0.10.1" +lazy_static = { version = "1.4.0", optional = true } +pbkdf2 = "0.6.0" +rand = "0.7.3" +ripemd160 = "0.9.1" rustc-hex = { version = "2.1.0", default-features = false, optional = true } -scrypt = { version = "0.2.0", default-features = false } -secp256k1 = { version = "0.19", optional = true, features = ["global-context", "recovery", "rand-std"] } -sha2 = "0.8.0" -subtle = "2.2.1" -tiny-keccak = { version = "2.0", features = ["keccak"] } -zeroize = { version = "1.0.0", default-features = false } +scrypt = { version = "0.5.0" } +secp256k1 = { version = "0.19.0", optional = true, features = ["global-context", "recovery", "rand-std"] } +sha2 = "0.9.2" +subtle = "2.4.0" +tiny-keccak = { version = "2.0.2", features = ["keccak"] } +zeroize = { version = "1.2.0", default-features = false } [dev-dependencies] -criterion = "0.3.0" +criterion = "0.3.3" hex-literal = "0.3.1" [features] diff --git a/parity-crypto/src/aes.rs b/parity-crypto/src/aes.rs index 3ba869958..c7c860183 100644 --- a/parity-crypto/src/aes.rs +++ b/parity-crypto/src/aes.rs @@ -6,9 +6,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use aes::block_cipher::generic_array::GenericArray; +use aes::cipher::generic_array::GenericArray; use aes::{Aes128, Aes256}; -use aes_ctr::stream_cipher::{NewStreamCipher, SyncStreamCipher}; +use aes_ctr::cipher::stream::{NewStreamCipher, SyncStreamCipher}; use block_modes::{ block_padding::{Pkcs7, ZeroPadding}, BlockMode, Cbc, Ecb, diff --git a/parity-crypto/src/digest.rs b/parity-crypto/src/digest.rs index 9ec5edf56..759613f2f 100644 --- a/parity-crypto/src/digest.rs +++ b/parity-crypto/src/digest.rs @@ -93,17 +93,17 @@ impl Hasher { impl Hasher { pub fn update(&mut self, data: &[u8]) { match self.0 { - Inner::Sha256(ref mut ctx) => ctx.input(data), - Inner::Sha512(ref mut ctx) => ctx.input(data), - Inner::Ripemd160(ref mut ctx) => ctx.input(data), + Inner::Sha256(ref mut ctx) => ctx.update(data), + Inner::Sha512(ref mut ctx) => ctx.update(data), + Inner::Ripemd160(ref mut ctx) => ctx.update(data), } } pub fn finish(self) -> Digest { match self.0 { - Inner::Sha256(ctx) => Digest(InnerDigest::Sha256(ctx.result()), PhantomData), - Inner::Sha512(ctx) => Digest(InnerDigest::Sha512(ctx.result()), PhantomData), - Inner::Ripemd160(ctx) => Digest(InnerDigest::Ripemd160(ctx.result()), PhantomData), + Inner::Sha256(ctx) => Digest(InnerDigest::Sha256(ctx.finalize()), PhantomData), + Inner::Sha512(ctx) => Digest(InnerDigest::Sha512(ctx.finalize()), PhantomData), + Inner::Ripemd160(ctx) => Digest(InnerDigest::Ripemd160(ctx.finalize()), PhantomData), } } } diff --git a/parity-crypto/src/error.rs b/parity-crypto/src/error.rs index e1a3cceb0..6f413a247 100644 --- a/parity-crypto/src/error.rs +++ b/parity-crypto/src/error.rs @@ -30,7 +30,7 @@ pub struct SymmError(PrivSymmErr); #[derive(Debug)] enum PrivSymmErr { BlockMode(block_modes::BlockModeError), - KeyStream(aes_ctr::stream_cipher::LoopError), + KeyStream(aes_ctr::cipher::stream::LoopError), InvalidKeyLength(block_modes::InvalidKeyIvLength), } @@ -111,8 +111,8 @@ impl From for SymmError { } } -impl From for SymmError { - fn from(e: aes_ctr::stream_cipher::LoopError) -> SymmError { +impl From for SymmError { + fn from(e: aes_ctr::cipher::stream::LoopError) -> SymmError { SymmError(PrivSymmErr::KeyStream(e)) } } diff --git a/parity-crypto/src/hmac/mod.rs b/parity-crypto/src/hmac/mod.rs index 6d606fdd7..ca20ae1c1 100644 --- a/parity-crypto/src/hmac/mod.rs +++ b/parity-crypto/src/hmac/mod.rs @@ -13,7 +13,7 @@ use digest::generic_array::{ typenum::{U32, U64}, GenericArray, }; -use hmac::{Hmac, Mac as _}; +use hmac::{Hmac, Mac as _, NewMac as _}; use zeroize::Zeroize; use crate::digest::{Sha256, Sha512}; @@ -113,15 +113,15 @@ impl Signer { pub fn update(&mut self, data: &[u8]) { match &mut self.0 { - SignerInner::Sha256(hmac) => hmac.input(data), - SignerInner::Sha512(hmac) => hmac.input(data), + SignerInner::Sha256(hmac) => hmac.update(data), + SignerInner::Sha512(hmac) => hmac.update(data), } } pub fn sign(self) -> Signature { match self.0 { - SignerInner::Sha256(hmac) => Signature(HashInner::Sha256(hmac.result().code()), PhantomData), - SignerInner::Sha512(hmac) => Signature(HashInner::Sha512(hmac.result().code()), PhantomData), + SignerInner::Sha256(hmac) => Signature(HashInner::Sha256(hmac.finalize().into_bytes()), PhantomData), + SignerInner::Sha512(hmac) => Signature(HashInner::Sha512(hmac.finalize().into_bytes()), PhantomData), } } } @@ -146,12 +146,12 @@ pub fn verify(key: &VerifyKey, data: &[u8], sig: &[u8]) -> bool { match &key.0 { KeyInner::Sha256(key_bytes) => { let mut ctx = Hmac::::new_varkey(&key_bytes.0).expect("always returns Ok; qed"); - ctx.input(data); + ctx.update(data); ctx.verify(sig).is_ok() } KeyInner::Sha512(key_bytes) => { let mut ctx = Hmac::::new_varkey(&key_bytes.0).expect("always returns Ok; qed"); - ctx.input(data); + ctx.update(data); ctx.verify(sig).is_ok() } } diff --git a/parity-crypto/src/pbkdf2/mod.rs b/parity-crypto/src/pbkdf2/mod.rs index a3a06e867..c9f1bd565 100644 --- a/parity-crypto/src/pbkdf2/mod.rs +++ b/parity-crypto/src/pbkdf2/mod.rs @@ -10,11 +10,11 @@ pub struct Salt<'a>(pub &'a [u8]); pub struct Secret<'a>(pub &'a [u8]); pub fn sha256(iter: u32, salt: Salt<'_>, sec: Secret<'_>, out: &mut [u8; 32]) { - pbkdf2::pbkdf2::>(sec.0, salt.0, iter as usize, out) + pbkdf2::pbkdf2::>(sec.0, salt.0, iter, out) } pub fn sha512(iter: u32, salt: Salt<'_>, sec: Secret<'_>, out: &mut [u8; 64]) { - pbkdf2::pbkdf2::>(sec.0, salt.0, iter as usize, out) + pbkdf2::pbkdf2::>(sec.0, salt.0, iter, out) } #[cfg(test)] From e64bf1a4160bed44cd6062e4a41dc7a78ca0003b Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Tue, 29 Dec 2020 22:37:24 +0800 Subject: [PATCH 188/359] update some dev-dependencies (#493) Signed-off-by: koushiro --- keccak-hash/Cargo.toml | 2 +- keccak-hash/src/lib.rs | 2 +- rlp-derive/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 9f1e4523b..54e75b1aa 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -13,7 +13,7 @@ tiny-keccak = { version = "2.0", features = ["keccak"] } primitive-types = { path = "../primitive-types", version = "0.8", default-features = false } [dev-dependencies] -tempdir = "0.3.7" +tempfile = "3.1.0" criterion = "0.3.0" [features] diff --git a/keccak-hash/src/lib.rs b/keccak-hash/src/lib.rs index dbad92af5..e01f6156d 100644 --- a/keccak-hash/src/lib.rs +++ b/keccak-hash/src/lib.rs @@ -189,7 +189,7 @@ mod tests { use std::io::{BufReader, Write}; // given - let tmpdir = tempdir::TempDir::new("keccak").unwrap(); + let tmpdir = tempfile::Builder::new().prefix("keccak").tempdir().unwrap(); let mut path = tmpdir.path().to_owned(); path.push("should_keccak_a_file"); // Prepare file diff --git a/rlp-derive/Cargo.toml b/rlp-derive/Cargo.toml index 5059d6d02..16f7e010e 100644 --- a/rlp-derive/Cargo.toml +++ b/rlp-derive/Cargo.toml @@ -16,4 +16,4 @@ quote = "1.0.2" proc-macro2 = "1.0.8" [dev-dependencies] -rlp = "0.4.4" +rlp = { version = "0.5.0", path = "../rlp" } From 0264ebff65ec61c7507be272443128f909640052 Mon Sep 17 00:00:00 2001 From: Georgios Konstantopoulos Date: Wed, 30 Dec 2020 10:33:30 +0200 Subject: [PATCH 189/359] fix: make from_str parse 0x-prefixed strings (#487) * fix: make from_str parse 0x-prefixed strings * fix(uint): make from_str parse 0x-prefixed strings * chore: address review styling comments * fix: tabs instead of spaces * chore: cargo fmt * fix: use strip_prefix instead of starts_with --- ethereum-types/src/hash.rs | 5 +++++ fixed-hash/src/hash.rs | 1 + uint/src/uint.rs | 1 + uint/tests/uint_tests.rs | 2 ++ 4 files changed, 9 insertions(+) diff --git a/ethereum-types/src/hash.rs b/ethereum-types/src/hash.rs index 68f435756..596f0fede 100644 --- a/ethereum-types/src/hash.rs +++ b/ethereum-types/src/hash.rs @@ -134,6 +134,11 @@ mod tests { } } + #[test] + fn test_parse_0x() { + assert!("0x0000000000000000000000000000000000000000000000000000000000000000".parse::().is_ok()) + } + #[test] fn test_serialize_invalid() { assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"") diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index a971b4136..c84d6bb35 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -588,6 +588,7 @@ macro_rules! impl_rustc_hex_for_fixed_hash { /// - When encountering invalid non hex-digits /// - Upon empty string input or invalid input length in general fn from_str(input: &str) -> $crate::core_::result::Result<$name, $crate::rustc_hex::FromHexError> { + let input = input.strip_prefix("0x").unwrap_or(input); let mut iter = $crate::rustc_hex::FromHexIter::new(input); let mut result = Self::zero(); for byte in result.as_mut() { diff --git a/uint/src/uint.rs b/uint/src/uint.rs index e6b99e537..3d625368e 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1688,6 +1688,7 @@ macro_rules! construct_uint { type Err = $crate::FromHexError; fn from_str(value: &str) -> $crate::core_::result::Result<$name, Self::Err> { + let value = value.strip_prefix("0x").unwrap_or(value); const BYTES_LEN: usize = $n_words * 8; const MAX_ENCODED_LEN: usize = BYTES_LEN * 2; diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 539f5f23f..e9f441ac9 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -133,6 +133,8 @@ fn uint256_from() { // test initializtion from string let sa = U256::from_str("0a").unwrap(); + let sa2 = U256::from_str("0x0a").unwrap(); + assert_eq!(sa2, sa); assert_eq!(e, sa); assert_eq!(U256([0, 0, 0, 0]), U256::from_str("").unwrap()); assert_eq!(U256([0x1, 0, 0, 0]), U256::from_str("1").unwrap()); From b52051d0fd8ef099cd79d6ed5011124958dd340a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Dec 2020 08:35:58 +0000 Subject: [PATCH 190/359] build(deps): update impl-trait-for-tuples requirement (#490) Updates the requirements on [impl-trait-for-tuples](https://github.com/bkchr/impl-trait-for-tuples) to permit the latest version. - [Release notes](https://github.com/bkchr/impl-trait-for-tuples/releases) - [Commits](https://github.com/bkchr/impl-trait-for-tuples/commits/v0.2) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- parity-util-mem/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 09a8d1c7c..6ab5f3e54 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -23,7 +23,7 @@ hashbrown = { version = "0.9", optional = true } mimalloc = { version = "0.1.18", optional = true } libmimalloc-sys = { version = "0.1.14", optional = true } parity-util-mem-derive = { path = "derive", version = "0.1" } -impl-trait-for-tuples = "0.1.3" +impl-trait-for-tuples = "0.2.0" smallvec = { version = "1.0.0", optional = true } ethereum-types = { version = "0.10.0", optional = true, path = "../ethereum-types" } From 36004c91fde89c9d1d401f016bfc1138985da0ad Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 1 Jan 2021 14:42:51 +0100 Subject: [PATCH 191/359] primitive-types: address nits of #480 (#485) * primitive-types: address nits of #480 * fix ethereum-types --- ethereum-types/src/lib.rs | 4 +--- ethereum-types/src/uint.rs | 2 +- primitive-types/impls/num-traits/Cargo.toml | 4 ---- primitive-types/impls/num-traits/src/lib.rs | 7 ++++--- primitive-types/src/lib.rs | 1 - 5 files changed, 6 insertions(+), 12 deletions(-) diff --git a/ethereum-types/src/lib.rs b/ethereum-types/src/lib.rs index ce84e3731..56963f28d 100644 --- a/ethereum-types/src/lib.rs +++ b/ethereum-types/src/lib.rs @@ -13,9 +13,7 @@ mod uint; pub use ethbloom::{Bloom, BloomRef, Input as BloomInput}; pub use hash::{BigEndianHash, H128, H160, H256, H264, H32, H512, H520, H64}; -#[cfg(feature = "num-traits")] -pub use primitive_types::{FromStrRadixErr, FromStrRadixErrKind}; -pub use uint::{FromDecStrErr, U128, U256, U512, U64}; +pub use uint::{FromDecStrErr, FromStrRadixErr, FromStrRadixErrKind, U128, U256, U512, U64}; pub type Address = H160; pub type Secret = H256; diff --git a/ethereum-types/src/uint.rs b/ethereum-types/src/uint.rs index 440221c16..eaf9bbf49 100644 --- a/ethereum-types/src/uint.rs +++ b/ethereum-types/src/uint.rs @@ -14,7 +14,7 @@ use impl_rlp::impl_uint_rlp; use impl_serde::impl_uint_serde; use uint_crate::*; -pub use uint_crate::FromDecStrErr; +pub use uint_crate::{FromDecStrErr, FromStrRadixErr, FromStrRadixErrKind}; construct_uint! { /// Unsigned 64-bit integer. diff --git a/primitive-types/impls/num-traits/Cargo.toml b/primitive-types/impls/num-traits/Cargo.toml index fed7c898e..8e0fef9f3 100644 --- a/primitive-types/impls/num-traits/Cargo.toml +++ b/primitive-types/impls/num-traits/Cargo.toml @@ -10,7 +10,3 @@ edition = "2018" [dependencies] num-traits = { version = "0.2", default-features = false } uint = { version = "0.8.5", path = "../../../uint", default-features = false } - -[features] -default = ["std"] -std = ["num-traits/std", "uint/std"] diff --git a/primitive-types/impls/num-traits/src/lib.rs b/primitive-types/impls/num-traits/src/lib.rs index d5e5e5d8b..094447592 100644 --- a/primitive-types/impls/num-traits/src/lib.rs +++ b/primitive-types/impls/num-traits/src/lib.rs @@ -8,12 +8,13 @@ //! num-traits support for uint. -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] #[doc(hidden)] pub use num_traits; -pub use uint::{FromStrRadixErr, FromStrRadixErrKind}; +#[doc(hidden)] +pub use uint; /// Add num-traits support to an integer created by `construct_uint!`. #[macro_export] @@ -39,7 +40,7 @@ macro_rules! impl_uint_num_traits { } impl $crate::num_traits::Num for $name { - type FromStrRadixErr = $crate::FromStrRadixErr; + type FromStrRadixErr = $crate::uint::FromStrRadixErr; fn from_str_radix(txt: &str, radix: u32) -> Result { Self::from_str_radix(txt, radix) diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index bf9acbdc3..696ac6898 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -22,7 +22,6 @@ use fixed_hash::{construct_fixed_hash, impl_fixed_hash_conversions}; #[cfg(feature = "scale-info")] use scale_info::TypeInfo; use uint::{construct_uint, uint_full_mul_reg}; -pub use uint::{FromStrRadixErr, FromStrRadixErrKind}; /// Error type for conversion. #[derive(Debug, PartialEq, Eq)] From f98a93d7a26703732f7071748c250e39ee82205d Mon Sep 17 00:00:00 2001 From: David Date: Mon, 4 Jan 2021 09:50:17 +0000 Subject: [PATCH 192/359] Remove deprecated FromStr/TryFrom impls for Secret (#495) * Remove deprecated FromStr/TryFrom impls for Secret * update CHANGELOG --- parity-crypto/CHANGELOG.md | 1 + parity-crypto/src/publickey/secret_key.rs | 31 +---------------------- 2 files changed, 2 insertions(+), 30 deletions(-) diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index 42a24d6a5..2d8a372c0 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -8,6 +8,7 @@ The format is based on [Keep a Changelog]. ### Breaking - Bump `rust-secp256k1` to v0.19, always allow zero signatures. [#438](https://github.com/paritytech/parity-common/pull/438) - Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) +- Remove deprecated trait impls `FromStr`/`TryFrom` for `Secret` [#495](https://github.com/paritytech/parity-common/pull/495) ## [0.6.2] - 2020-06-19 - Put `Secret` memory on heap. [#400](https://github.com/paritytech/parity-common/pull/400) diff --git a/parity-crypto/src/publickey/secret_key.rs b/parity-crypto/src/publickey/secret_key.rs index 269afdf3e..80a37a590 100644 --- a/parity-crypto/src/publickey/secret_key.rs +++ b/parity-crypto/src/publickey/secret_key.rs @@ -138,7 +138,7 @@ impl Secret { pub fn dec(&mut self) -> Result<(), Error> { match self.is_zero() { true => { - *self = Secret::try_from(super::MINUS_ONE_KEY) + *self = Self::copy_from_slice(&super::MINUS_ONE_KEY) .expect("Constructing a secret key from a known-good constant works; qed."); Ok(()) } @@ -213,14 +213,6 @@ impl Secret { } } -#[deprecated(since = "0.6.2", note = "please use `copy_from_str` instead, input is not zeroized")] -impl FromStr for Secret { - type Err = Error; - fn from_str(s: &str) -> Result { - Ok(H256::from_str(s).map_err(|e| Error::Custom(format!("{:?}", e)))?.into()) - } -} - impl From<[u8; 32]> for Secret { #[inline(always)] fn from(mut k: [u8; 32]) -> Self { @@ -239,27 +231,6 @@ impl From for Secret { } } -#[deprecated(since = "0.6.2", note = "please use `copy_from_str` instead, input is not zeroized")] -impl TryFrom<&str> for Secret { - type Error = Error; - - fn try_from(s: &str) -> Result { - s.parse().map_err(|e| Error::Custom(format!("{:?}", e))) - } -} - -#[deprecated(since = "0.6.2", note = "please use `copy_from_slice` instead, input is not zeroized")] -impl TryFrom<&[u8]> for Secret { - type Error = Error; - - fn try_from(b: &[u8]) -> Result { - if b.len() != SECP256K1_SECRET_KEY_SIZE { - return Err(Error::InvalidSecretKey); - } - Ok(Self { inner: Box::new(H256::from_slice(b)) }) - } -} - impl From for Secret { #[inline(always)] fn from(key: key::SecretKey) -> Self { From e7a708ac21a191c2c0131022573c6c4068306149 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Jan 2021 11:59:42 +0100 Subject: [PATCH 193/359] build(deps): update secp256k1 requirement from 0.19.0 to 0.20.0 (#496) Updates the requirements on [secp256k1](https://github.com/rust-bitcoin/rust-secp256k1) to permit the latest version. - [Release notes](https://github.com/rust-bitcoin/rust-secp256k1/releases) - [Changelog](https://github.com/rust-bitcoin/rust-secp256k1/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-bitcoin/rust-secp256k1/commits) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andronik Ordian --- parity-crypto/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 97bfe1581..6a2f1a1fb 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -26,7 +26,7 @@ rand = "0.7.3" ripemd160 = "0.9.1" rustc-hex = { version = "2.1.0", default-features = false, optional = true } scrypt = { version = "0.5.0" } -secp256k1 = { version = "0.19.0", optional = true, features = ["global-context", "recovery", "rand-std"] } +secp256k1 = { version = "0.20.0", optional = true, features = ["global-context", "recovery", "rand-std"] } sha2 = "0.9.2" subtle = "2.4.0" tiny-keccak = { version = "2.0.2", features = ["keccak"] } From a7003bce517cf5703ffb235a00005bd65c08cf32 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Jan 2021 13:44:54 +0000 Subject: [PATCH 194/359] build(deps): update smallvec requirement from 0.6.10 to 1.6.0 (#494) Updates the requirements on [smallvec](https://github.com/servo/rust-smallvec) to permit the latest version. - [Release notes](https://github.com/servo/rust-smallvec/releases) - [Commits](https://github.com/servo/rust-smallvec/commits/v1.6.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andronik Ordian --- transaction-pool/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index a0de36b27..fcd193618 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -9,7 +9,7 @@ edition = "2018" [dependencies] log = "0.4.8" -smallvec = "0.6.10" +smallvec = "1.6.0" trace-time = { path = "../trace-time", version = "0.1" } [dev-dependencies] From 2c8cd78d9b03fba2d096abb8b90dc15a7b74d7e5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Jan 2021 14:49:58 +0000 Subject: [PATCH 195/359] build(deps): update rand requirement from 0.7.2 to 0.8.0 (#488) * build(deps): update rand requirement from 0.7.2 to 0.8.0 Updates the requirements on [rand](https://github.com/rust-random/rand) to permit the latest version. - [Release notes](https://github.com/rust-random/rand/releases) - [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-random/rand/compare/0.7.3...0.8.0) Signed-off-by: dependabot[bot] * Seed from u64 * uint: use rand 0.7 for quickcheck feature * kvdb-rocksdb: fix compilation for benches * parity-crypto: remove unused dep and fix a warning * cargo fmt and another unused dep Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andronik Ordian Co-authored-by: David Palm --- ethbloom/Cargo.toml | 2 +- fixed-hash/Cargo.toml | 3 ++- fixed-hash/src/tests.rs | 19 ++----------------- kvdb-rocksdb/Cargo.toml | 2 +- kvdb-rocksdb/benches/bench_read_perf.rs | 2 +- parity-crypto/Cargo.toml | 1 - parity-crypto/src/publickey/secret_key.rs | 1 - transaction-pool/Cargo.toml | 1 - uint/Cargo.toml | 4 ++-- uint/src/lib.rs | 2 +- uint/src/uint.rs | 4 ++-- 11 files changed, 12 insertions(+), 29 deletions(-) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 7d21efd5d..c8223ecf3 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -19,7 +19,7 @@ impl-codec = { version = "0.4.1", path = "../primitive-types/impls/codec", defau [dev-dependencies] criterion = "0.3.0" -rand = "0.7.2" +rand = "0.8.0" hex-literal = "0.3.1" [features] diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index c8e85585f..3fa37cafc 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -16,7 +16,7 @@ features = ["quickcheck", "api-dummy"] [dependencies] byteorder = { version = "1.3.2", optional = true, default-features = false } quickcheck = { version = "0.9.0", optional = true } -rand = { version = "0.7.2", optional = true, default-features = false } +rand = { version = "0.8.0", optional = true, default-features = false } rustc-hex = { version = "2.0.1", optional = true, default-features = false } static_assertions = "1.0.0" arbitrary = { version = "0.4", optional = true } @@ -24,6 +24,7 @@ arbitrary = { version = "0.4", optional = true } [dev-dependencies] rand_xorshift = "0.2.0" criterion = "0.3.0" +rand = { version = "0.8.0", default-features = false, features = ["std_rng"] } [features] default = ["std", "rand", "rustc-hex", "byteorder"] diff --git a/fixed-hash/src/tests.rs b/fixed-hash/src/tests.rs index 33c1956b7..5a5f5d94d 100644 --- a/fixed-hash/src/tests.rs +++ b/fixed-hash/src/tests.rs @@ -244,23 +244,8 @@ mod rand { #[test] fn random() { - let default_seed = ::Seed::default(); - let mut rng = StdRng::from_seed(default_seed); - assert_eq!(H32::random_using(&mut rng), H32::from([0x76, 0xa0, 0x40, 0x53])); - } - - #[test] - fn randomize() { - let default_seed = ::Seed::default(); - let mut rng = StdRng::from_seed(default_seed); - assert_eq!( - { - let mut ret = H32::zero(); - ret.randomize_using(&mut rng); - ret - }, - H32::from([0x76, 0xa0, 0x40, 0x53]) - ) + let mut rng = StdRng::seed_from_u64(123); + assert_eq!(H32::random_using(&mut rng), H32::from([0xeb, 0x96, 0xaf, 0x1c])); } } diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index e623914d9..77ba7aed0 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -28,7 +28,7 @@ alloc_counter = "0.0.4" criterion = "0.3" ethereum-types = { path = "../ethereum-types" } kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.6" } -rand = "0.7.2" +rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } sysinfo = "0.15.3" diff --git a/kvdb-rocksdb/benches/bench_read_perf.rs b/kvdb-rocksdb/benches/bench_read_perf.rs index 3a14ac752..5a956020b 100644 --- a/kvdb-rocksdb/benches/bench_read_perf.rs +++ b/kvdb-rocksdb/benches/bench_read_perf.rs @@ -50,7 +50,7 @@ fn open_db() -> Database { /// an `ElasticArray128` so sometimes we save on allocations. fn n_random_bytes(n: usize) -> Vec { let mut rng = rand::thread_rng(); - let variability: i64 = rng.gen_range(0, (n / 5) as i64); + let variability: i64 = rng.gen_range(0..(n / 5) as i64); let plus_or_minus: i64 = if variability % 2 == 0 { 1 } else { -1 }; let range = Uniform::from(0..u8::max_value()); rng.sample_iter(&range).take((n as i64 + plus_or_minus * variability) as usize).collect() diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 6a2f1a1fb..828886648 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -22,7 +22,6 @@ ethereum-types = { version = "0.10.0", optional = true, path = "../ethereum-type hmac = "0.10.1" lazy_static = { version = "1.4.0", optional = true } pbkdf2 = "0.6.0" -rand = "0.7.3" ripemd160 = "0.9.1" rustc-hex = { version = "2.1.0", default-features = false, optional = true } scrypt = { version = "0.5.0" } diff --git a/parity-crypto/src/publickey/secret_key.rs b/parity-crypto/src/publickey/secret_key.rs index 80a37a590..7f5692f1d 100644 --- a/parity-crypto/src/publickey/secret_key.rs +++ b/parity-crypto/src/publickey/secret_key.rs @@ -8,7 +8,6 @@ //! Secret key implementation. -use std::convert::TryFrom; use std::fmt; use std::ops::Deref; use std::str::FromStr; diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index fcd193618..fd9a4fbd6 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -10,7 +10,6 @@ edition = "2018" [dependencies] log = "0.4.8" smallvec = "1.6.0" -trace-time = { path = "../trace-time", version = "0.1" } [dev-dependencies] ethereum-types = { version = "0.10.0", path = "../ethereum-types" } diff --git a/uint/Cargo.toml b/uint/Cargo.toml index b27645ac6..ca650e72e 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -13,7 +13,7 @@ edition = "2018" byteorder = { version = "1.3.2", default-features = false } crunchy = { version = "0.2.2", default-features = false } qc = { package = "quickcheck", version = "0.9.0", optional = true } -rand = { version = "0.7.2", default-features = false, optional = true } +rand07 = { package = "rand", version = "0.7", default-features = false, optional = true } hex = { version = "0.4", default-features = false } static_assertions = "1.0.0" arbitrary = { version = "0.4", optional = true } @@ -21,7 +21,7 @@ arbitrary = { version = "0.4", optional = true } [features] default = ["std"] std = ["byteorder/std", "crunchy/std", "hex/std"] -quickcheck = ["qc", "rand"] +quickcheck = ["qc", "rand07"] [[example]] name = "modular" diff --git a/uint/src/lib.rs b/uint/src/lib.rs index 0aedc11d8..83ab957a2 100644 --- a/uint/src/lib.rs +++ b/uint/src/lib.rs @@ -27,7 +27,7 @@ pub use qc; #[cfg(feature = "quickcheck")] #[doc(hidden)] -pub use rand; +pub use rand07; #[cfg(feature = "arbitrary")] #[doc(hidden)] diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 3d625368e..27bd0be96 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1742,8 +1742,8 @@ macro_rules! impl_quickcheck_arbitrary_for_uint { fn arbitrary(g: &mut G) -> Self { let mut res = [0u8; $n_bytes]; - use $crate::rand::Rng; - let p: f64 = $crate::rand::rngs::OsRng.gen(); + use $crate::rand07::Rng; + let p: f64 = $crate::rand07::rngs::OsRng.gen(); // make it more likely to generate smaller numbers that // don't use up the full $n_bytes let range = From c2fab006f2f1a534c5fd7a030012f94bc520652d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Jan 2021 15:46:50 +0000 Subject: [PATCH 196/359] build(deps): update rand_xorshift requirement from 0.2.0 to 0.3.0 (#491) Updates the requirements on [rand_xorshift](https://github.com/rust-random/rngs) to permit the latest version. - [Release notes](https://github.com/rust-random/rngs/releases) - [Commits](https://github.com/rust-random/rngs/commits/rand_xorshift-0.3.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- fixed-hash/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 3fa37cafc..5efd9977f 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -22,7 +22,7 @@ static_assertions = "1.0.0" arbitrary = { version = "0.4", optional = true } [dev-dependencies] -rand_xorshift = "0.2.0" +rand_xorshift = "0.3.0" criterion = "0.3.0" rand = { version = "0.8.0", default-features = false, features = ["std_rng"] } From 6fc8030a6c5dd325115e582b3b18afbff161142a Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 5 Jan 2021 18:02:07 +0100 Subject: [PATCH 197/359] update changelogs and bump uint (#486) * update changelogs and bump uint * update ethereum-types changelog * update uint changelog * tabs * fixed-hash: bump to 0.7 * bump keccak-hash to 0.6.0 * contract-address: bump keccak-hash to 0.6 * update changelogs after publishing --- contract-address/CHANGELOG.md | 4 +++- contract-address/Cargo.toml | 2 +- ethbloom/CHANGELOG.md | 2 ++ ethbloom/Cargo.toml | 2 +- ethereum-types/CHANGELOG.md | 3 +++ ethereum-types/Cargo.toml | 4 ++-- fixed-hash/CHANGELOG.md | 4 ++++ fixed-hash/Cargo.toml | 2 +- keccak-hash/CHANGELOG.md | 2 ++ keccak-hash/Cargo.toml | 2 +- kvdb-memorydb/CHANGELOG.md | 2 ++ kvdb-rocksdb/CHANGELOG.md | 2 ++ kvdb-web/CHANGELOG.md | 2 ++ kvdb/CHANGELOG.md | 2 ++ parity-crypto/CHANGELOG.md | 3 +++ parity-util-mem/CHANGELOG.md | 3 ++- primitive-types/CHANGELOG.md | 5 +++++ primitive-types/Cargo.toml | 4 ++-- primitive-types/impls/num-traits/CHANGELOG.md | 7 +++++++ primitive-types/impls/num-traits/Cargo.toml | 2 +- primitive-types/impls/rlp/CHANGELOG.md | 2 ++ primitive-types/impls/serde/Cargo.toml | 2 +- rlp/CHANGELOG.md | 2 ++ uint/CHANGELOG.md | 5 +++++ uint/Cargo.toml | 2 +- uint/src/uint.rs | 2 +- 26 files changed, 60 insertions(+), 14 deletions(-) create mode 100644 primitive-types/impls/num-traits/CHANGELOG.md diff --git a/contract-address/CHANGELOG.md b/contract-address/CHANGELOG.md index 20714d132..44a70eb69 100644 --- a/contract-address/CHANGELOG.md +++ b/contract-address/CHANGELOG.md @@ -5,9 +5,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.5.0] - 2021-01-05 ### Breaking - Updated `ethereum-types` to 0.10. [#463](https://github.com/paritytech/parity-common/pull/463) -## [0.9.0] - 2020-03-16 +## [0.4.0] - 2020-03-16 - License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) diff --git a/contract-address/Cargo.toml b/contract-address/Cargo.toml index 6754f6d63..81cd433d9 100644 --- a/contract-address/Cargo.toml +++ b/contract-address/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" [dependencies] ethereum-types = { version = "0.10.0", path = "../ethereum-types" } rlp = { version = "0.5", path = "../rlp" } -keccak-hash = { version = "0.5", path = "../keccak-hash", default-features = false } +keccak-hash = { version = "0.6", path = "../keccak-hash", default-features = false } [features] default = [] diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 773f5785e..14cb8a7cb 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.10.0] - 2021-01-05 ### Breaking - Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index c8223ecf3..c4a4600c9 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -12,7 +12,7 @@ edition = "2018" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } -fixed-hash = { path = "../fixed-hash", version = "0.6", default-features = false } +fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "../primitive-types/impls/codec", default-features = false, optional = true } diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 19795de50..86d01c3de 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -5,8 +5,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.10.0] - 2021-01-05 ### Breaking - Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) +- Updated `uint` to 0.9. [#486](https://github.com/paritytech/parity-common/pull/486) ## [0.9.2] - 2020-05-18 - Added `codec` feature. [#393](https://github.com/paritytech/parity-common/pull/393) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 544d6fa69..1f9ad0d00 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -9,8 +9,8 @@ edition = "2018" [dependencies] ethbloom = { path = "../ethbloom", version = "0.10", default-features = false } -fixed-hash = { path = "../fixed-hash", version = "0.6", default-features = false, features = ["byteorder", "rustc-hex"] } -uint-crate = { path = "../uint", package = "uint", version = "0.8", default-features = false } +fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false, features = ["byteorder", "rustc-hex"] } +uint-crate = { path = "../uint", package = "uint", version = "0.9", default-features = false } primitive-types = { path = "../primitive-types", version = "0.8", features = ["byteorder", "rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.0", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } diff --git a/fixed-hash/CHANGELOG.md b/fixed-hash/CHANGELOG.md index 6db7b6e76..b74c4b3e4 100644 --- a/fixed-hash/CHANGELOG.md +++ b/fixed-hash/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.7.0] - 2021-01-05 +### Breaking +- Updated `rand` to 0.8. [#488](https://github.com/paritytech/parity-common/pull/488) + ## [0.6.1] - 2020-04-27 - Added `arbitrary` feature. [#378](https://github.com/paritytech/parity-common/pull/378) diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 5efd9977f..b240745df 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fixed-hash" -version = "0.6.1" +version = "0.7.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index 77435e8fd..d702e2348 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.6.0] - 2021-01-05 ### Breaking - Updated `primitive-types` to 0.8. [#463](https://github.com/paritytech/parity-common/pull/463) diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 54e75b1aa..65ae2c4fd 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "keccak-hash" -version = "0.5.1" +version = "0.6.0" description = "`keccak-hash` is a set of utility functions to facilitate working with Keccak hashes (256/512 bits long)." authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index a74210497..c837385bb 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.8.0] - 2021-01-05 ### Breaking - Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index a2f14fcd7..e90a74a84 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.10.0] - 2021-01-05 ### Breaking - Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index ba028010b..7a494b33e 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.8.0] - 2021-01-05 ### Breaking - Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 3577a4a82..99d1c52ea 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.8.0] - 2021-01-05 ### Breaking - Updated `parity-util-mem` to 0.8. [#470](https://github.com/paritytech/parity-common/pull/470) diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index 2d8a372c0..33c190d27 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -5,9 +5,12 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.7.0] - 2021-01-05 ### Breaking - Bump `rust-secp256k1` to v0.19, always allow zero signatures. [#438](https://github.com/paritytech/parity-common/pull/438) - Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) +- Updated dependencies. [#483](https://github.com/paritytech/parity-common/pull/483) - Remove deprecated trait impls `FromStr`/`TryFrom` for `Secret` [#495](https://github.com/paritytech/parity-common/pull/495) ## [0.6.2] - 2020-06-19 diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 65d4b0860..dad32580d 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -5,8 +5,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -- Updated dlmalloc to 0.2.1. [#452](https://github.com/paritytech/parity-common/pull/452) +## [0.8.0] - 2021-01-05 +- Updated dlmalloc to 0.2.1. [#452](https://github.com/paritytech/parity-common/pull/452) ### Breaking - Updated `ethereum-types` to 0.10. [#463](https://github.com/paritytech/parity-common/pull/463) - Updated `parking_lot` to 0.11.1. [#470](https://github.com/paritytech/parity-common/pull/470) diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index d78c908d9..23037abf1 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,7 +5,12 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.8.0] - 2021-01-05 +- Added `num-traits` feature. [#480](https://github.com/paritytech/parity-common/pull/480) +### Breaking - Updated `impl-rlp` to `rlp` 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) +- Updated `uint` to 0.9. [#486](https://github.com/paritytech/parity-common/pull/486) ## [0.7.3] - 2020-11-12 - Added `scale_info` support. [#312](https://github.com/paritytech/parity-common/pull/312) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 4c691d812..6ffe31e66 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -8,8 +8,8 @@ description = "Primitive types shared by Ethereum and Substrate" edition = "2018" [dependencies] -fixed-hash = { version = "0.6", path = "../fixed-hash", default-features = false } -uint = { version = "0.8.3", path = "../uint", default-features = false } +fixed-hash = { version = "0.7", path = "../fixed-hash", default-features = false } +uint = { version = "0.9.0", path = "../uint", default-features = false } impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } diff --git a/primitive-types/impls/num-traits/CHANGELOG.md b/primitive-types/impls/num-traits/CHANGELOG.md new file mode 100644 index 000000000..545cf7dff --- /dev/null +++ b/primitive-types/impls/num-traits/CHANGELOG.md @@ -0,0 +1,7 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [Unreleased] diff --git a/primitive-types/impls/num-traits/Cargo.toml b/primitive-types/impls/num-traits/Cargo.toml index 8e0fef9f3..a77774608 100644 --- a/primitive-types/impls/num-traits/Cargo.toml +++ b/primitive-types/impls/num-traits/Cargo.toml @@ -9,4 +9,4 @@ edition = "2018" [dependencies] num-traits = { version = "0.2", default-features = false } -uint = { version = "0.8.5", path = "../../../uint", default-features = false } +uint = { version = "0.9.0", path = "../../../uint", default-features = false } diff --git a/primitive-types/impls/rlp/CHANGELOG.md b/primitive-types/impls/rlp/CHANGELOG.md index 30dab8dfb..749b49f37 100644 --- a/primitive-types/impls/rlp/CHANGELOG.md +++ b/primitive-types/impls/rlp/CHANGELOG.md @@ -5,5 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.3.0] - 2021-01-05 ### Breaking - Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index a76c0e4d9..e75eeba43 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -18,7 +18,7 @@ serde = { version = "1.0.101", default-features = false, features = ["alloc"] } criterion = "0.3.0" serde_derive = "1.0.101" serde_json = "1.0.41" -uint = { version = "0.8.3", path = "../../../uint" } +uint = { version = "0.9.0", path = "../../../uint" } [[bench]] name = "impl_serde" diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index 4f25e9b9e..afd84d95c 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.5.0] - 2021-01-05 ### Breaking - Use BytesMut for `RlpStream`'s backing buffer. [#453](https://github.com/paritytech/parity-common/pull/453) diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index b856d52fc..3e3ddad3f 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.0] - 2021-01-05 +- Allow `0x` prefix in `from_str`. [#487](https://github.com/paritytech/parity-common/pull/487) +### Breaking +- Optimized FromStr, made it no_std-compatible. [#468](https://github.com/paritytech/parity-common/pull/468) + ## [0.8.5] - 2020-08-12 - Make const matching work again. [#421](https://github.com/paritytech/parity-common/pull/421) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index ca650e72e..1781456b4 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -4,7 +4,7 @@ homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" name = "uint" -version = "0.8.5" +version = "0.9.0" authors = ["Parity Technologies "] readme = "README.md" edition = "2018" diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 27bd0be96..702e59e86 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1688,7 +1688,7 @@ macro_rules! construct_uint { type Err = $crate::FromHexError; fn from_str(value: &str) -> $crate::core_::result::Result<$name, Self::Err> { - let value = value.strip_prefix("0x").unwrap_or(value); + let value = value.strip_prefix("0x").unwrap_or(value); const BYTES_LEN: usize = $n_words * 8; const MAX_ENCODED_LEN: usize = BYTES_LEN * 2; From fd69e629bc9a59fe4353365034d1f19152f93c7e Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Wed, 6 Jan 2021 21:12:55 +1300 Subject: [PATCH 198/359] bump fs-swap (#498) --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 77ba7aed0..beeed32b3 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -13,7 +13,7 @@ harness = false [dependencies] smallvec = "1.0.0" -fs-swap = "0.2.4" +fs-swap = "0.2.5" kvdb = { path = "../kvdb", version = "0.8" } log = "0.4.8" num_cpus = "1.10.1" From e1dfdceeca11efb476f75ae2739e9f8dd0634e58 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 8 Jan 2021 16:21:58 +0100 Subject: [PATCH 199/359] triehash: patch release (#499) --- triehash/CHANGELOG.md | 2 ++ triehash/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/triehash/CHANGELOG.md b/triehash/CHANGELOG.md index b5f0357b1..2f7d72d64 100644 --- a/triehash/CHANGELOG.md +++ b/triehash/CHANGELOG.md @@ -6,6 +6,8 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.8.4] - 2020-01-08 +- Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) ## [0.8.3] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) ## [0.8.2] - 2019-12-15 diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index acceca2f5..5ed55b529 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "triehash" -version = "0.8.3" +version = "0.8.4" authors = ["Parity Technologies "] description = "In-memory patricia trie operations" repository = "https://github.com/paritytech/parity-common" From 24893acd0e9446c0c39cbe6db1f656ae37243ca9 Mon Sep 17 00:00:00 2001 From: honeywest <50997103+honeywest@users.noreply.github.com> Date: Mon, 18 Jan 2021 15:56:35 +0800 Subject: [PATCH 200/359] fix clippy warning (#504) --- fixed-hash/src/hash.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index c84d6bb35..dd80df6d8 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -24,9 +24,9 @@ /// ``` /// use fixed_hash::construct_fixed_hash; /// construct_fixed_hash!{ -/// /// My unformatted 160 bytes sized hash type. -/// #[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] -/// pub struct H160(20); +/// /// My unformatted 160 bytes sized hash type. +/// #[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] +/// pub struct H160(20); /// } /// assert_eq!(std::mem::size_of::(), 20); /// ``` From 1d3de9e42b7bee0bd9b84f924b3e9d02beaeaa0c Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 20 Jan 2021 09:59:08 +0100 Subject: [PATCH 201/359] add a test for #507 (#508) * add a test for #507 * CI: test uint on a big-endian platform * a workaround for gmp * grumbles * bump byteorder to 1.4.2 --- .travis.yml | 1 + fixed-hash/Cargo.toml | 2 +- uint/Cargo.toml | 4 ++-- uint/tests/uint_tests.rs | 9 +++++++++ 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5950b71dc..4ed37599a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,7 @@ matrix: - cargo install cross script: - cross test --target=aarch64-linux-android -p parity-util-mem + - cross test --target=mips64-unknown-linux-gnuabi64 -p uint - os: osx osx_image: xcode11.3 addons: diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index b240745df..23c6c5079 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -14,7 +14,7 @@ edition = "2018" features = ["quickcheck", "api-dummy"] [dependencies] -byteorder = { version = "1.3.2", optional = true, default-features = false } +byteorder = { version = "1.4.2", optional = true, default-features = false } quickcheck = { version = "0.9.0", optional = true } rand = { version = "0.8.0", optional = true, default-features = false } rustc-hex = { version = "2.0.1", optional = true, default-features = false } diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 1781456b4..2e731c06c 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -10,7 +10,7 @@ readme = "README.md" edition = "2018" [dependencies] -byteorder = { version = "1.3.2", default-features = false } +byteorder = { version = "1.4.2", default-features = false } crunchy = { version = "0.2.2", default-features = false } qc = { package = "quickcheck", version = "0.9.0", optional = true } rand07 = { package = "rand", version = "0.7", default-features = false, optional = true } @@ -34,7 +34,7 @@ required-features = ["std"] criterion = "0.3.0" num-bigint = "0.3.1" -[target.'cfg(unix)'.dev-dependencies] +[target.'cfg(all(unix, target_arch = "x86_64"))'.dev-dependencies] rug = { version = "1.6.0", default-features = false, features = ["integer"] } [[bench]] diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index e9f441ac9..318253279 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -1018,6 +1018,15 @@ fn leading_zeros() { assert_eq!(U256::from("0000000000000000000000000000000000000000000000000000000000000000").leading_zeros(), 256); } +#[test] +fn issue_507_roundtrip() { + let mut b32 = <[u8; 32]>::default(); + let a = U256::from(10); + a.to_little_endian(&mut b32); + let b = U256::from_little_endian(&b32[..]); + assert_eq!(a, b); +} + #[test] fn trailing_zeros() { assert_eq!(U256::from("1adbdd6bd6ff027485484b97f8a6a4c7129756dd100000000000000000000000").trailing_zeros(), 92); From f627a422b1ccd5d46b7f92e46d2617c0461e3270 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 20 Jan 2021 11:13:35 +0100 Subject: [PATCH 202/359] ethereum-types: fix wasm builds for serialize feature (#503) * ethbloom: do not pull std for 'serialize' feature * ethereum-types: do not pull std for 'serialize' feature * CI: check wasm builds for ethbloom and ethereum-types * fix wasm target * CI: remove redundant check * CI: fix wasm target install * update changelogs --- .travis.yml | 2 ++ ethbloom/CHANGELOG.md | 2 ++ ethbloom/Cargo.toml | 2 +- ethereum-types/CHANGELOG.md | 2 ++ ethereum-types/Cargo.toml | 2 +- 5 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 4ed37599a..f3d685bb4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -27,6 +27,7 @@ matrix: allow_failures: - rust: nightly install: + - rustup target add wasm32-unknown-unknown - curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | sh - source ~/.nvm/nvm.sh - nvm install --lts @@ -43,6 +44,7 @@ script: cd contract-address/ && cargo test --features=external_doc && cd ..; fi - cd ethbloom/ && cargo test --no-default-features --features="rustc-hex" && cargo check --benches && cd .. + - cd ethereum-types/ && cargo build --no-default-features --features="serialize,rlp" --target=wasm32-unknown-unknown && cd .. - cd fixed-hash/ && cargo test --all-features && cargo test --no-default-features --features="byteorder,rustc-hex" && cd .. - cd uint/ && cargo test --all-features && cargo test --no-default-features && cd .. - cd keccak-hash/ && cargo test --no-default-features && cd .. diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 14cb8a7cb..94a75dcb1 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Potentially-breaking +- `serialize` feature no longer pulls `std`. [#503](https://github.com/paritytech/parity-common/pull/503) ## [0.10.0] - 2021-01-05 ### Breaking diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index c4a4600c9..88777f43b 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -25,7 +25,7 @@ hex-literal = "0.3.1" [features] default = ["std", "rlp", "serialize", "rustc-hex"] std = ["fixed-hash/std", "crunchy/std"] -serialize = ["std", "impl-serde"] +serialize = ["impl-serde"] rustc-hex = ["fixed-hash/rustc-hex"] arbitrary = ["fixed-hash/arbitrary"] rlp = ["impl-rlp"] diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 86d01c3de..e2b349a50 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Potentially-breaking +- `serialize` feature no longer pulls `std`. [#503](https://github.com/paritytech/parity-common/pull/503) ## [0.10.0] - 2021-01-05 ### Breaking diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 1f9ad0d00..c7eb4b449 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -22,7 +22,7 @@ serde_json = "1.0.41" [features] default = ["std", "rlp", "serialize"] std = ["uint-crate/std", "fixed-hash/std", "ethbloom/std", "primitive-types/std"] -serialize = ["std", "impl-serde", "primitive-types/serde", "ethbloom/serialize"] +serialize = ["impl-serde", "primitive-types/serde_no_std", "ethbloom/serialize"] arbitrary = ["ethbloom/arbitrary", "fixed-hash/arbitrary", "uint-crate/arbitrary"] rlp = ["impl-rlp", "ethbloom/rlp", "primitive-types/rlp"] codec = ["impl-codec", "ethbloom/codec"] From 74c7b6d383ff58362d252a86f7a3a26dbf61408c Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 27 Jan 2021 12:54:45 +0100 Subject: [PATCH 203/359] remove parity-runtime (#511) --- Cargo.toml | 1 - runtime/CHANGELOG.md | 15 --- runtime/Cargo.toml | 21 ---- runtime/README.md | 6 -- runtime/examples/simple.rs | 34 ------ runtime/src/lib.rs | 215 ------------------------------------- 6 files changed, 292 deletions(-) delete mode 100644 runtime/CHANGELOG.md delete mode 100644 runtime/Cargo.toml delete mode 100644 runtime/README.md delete mode 100644 runtime/examples/simple.rs delete mode 100644 runtime/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 019b989de..c3380e0da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,6 @@ members = [ "plain_hasher", "rlp", "rlp-derive", - "runtime", "transaction-pool", "trace-time", "triehash", diff --git a/runtime/CHANGELOG.md b/runtime/CHANGELOG.md deleted file mode 100644 index 7c02a7690..000000000 --- a/runtime/CHANGELOG.md +++ /dev/null @@ -1,15 +0,0 @@ -# Changelog - -The format is based on [Keep a Changelog]. - -[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ - -## [Unreleased] - -## [0.1.2] - 2020-07-16 -### Changed -- Port runtime to tokio-compat. [#403](https://github.com/paritytech/parity-common/pull/403) - -## [0.1.1] - 2020-02-11 -### Changed -- Moved to parity common repo, prepared for publishing. [#271](https://github.com/paritytech/parity-common/pull/271) diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml deleted file mode 100644 index 2ab8c8629..000000000 --- a/runtime/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "parity-runtime" -version = "0.1.2" -authors = ["Parity Technologies "] -edition = "2018" - -description = "Tokio runtime wrapper" -license = "GPL-3.0" -readme = "README.md" -homepage = "https://www.parity.io/" -keywords = ["parity", "runtime", "tokio"] -include = ["Cargo.toml", "src/**/*.rs", "README.md", "CHANGELOG.md"] - -[dependencies] -futures = { version = "0.3", default-features = false, features = ["compat"] } -futures01 = { package = "futures", version = "0.1" } -tokio-compat = "0.1" -tokio = { version = "0.2", features = ["full"] } - -[features] -test-helpers = [] diff --git a/runtime/README.md b/runtime/README.md deleted file mode 100644 index 7cda2a31f..000000000 --- a/runtime/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# parity-runtime - -Wrapper over tokio runtime. Provides: -- Customizable runtime with ability to spawn it in different thread models -- Corresponding runtime executor for tasks -- Runtime handle diff --git a/runtime/examples/simple.rs b/runtime/examples/simple.rs deleted file mode 100644 index c037f74b7..000000000 --- a/runtime/examples/simple.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . - -//! Simple example, illustating usage of runtime wrapper. - -use parity_runtime::Runtime; -use std::{thread::park_timeout, time::Duration}; -use tokio::{fs::read_dir, stream::*}; - -/// Read current directory in a future, which is executed in the created runtime -fn main() { - let runtime = Runtime::with_default_thread_count(); - runtime.executor().spawn_std(async move { - let mut dirs = read_dir(".").await.unwrap(); - while let Some(dir) = dirs.try_next().await.expect("Error") { - println!("{:?}", dir.path()); - } - }); - let timeout = Duration::from_secs(3); - park_timeout(timeout); -} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs deleted file mode 100644 index 9284454be..000000000 --- a/runtime/src/lib.rs +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Ethereum. - -// Parity Ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Ethereum. If not, see . - -//! Tokio Runtime wrapper. - -use futures::compat::*; -use futures01::{Future as Future01, IntoFuture as IntoFuture01}; -use std::{fmt, future::Future, thread}; -pub use tokio_compat::runtime::{Builder as TokioRuntimeBuilder, Runtime as TokioRuntime, TaskExecutor}; - -/// Runtime for futures. -/// -/// Runs in a separate thread. -pub struct Runtime { - executor: Executor, - handle: RuntimeHandle, -} - -const RUNTIME_BUILD_PROOF: &str = - "Building a Tokio runtime will only fail when mio components cannot be initialized (catastrophic)"; - -impl Runtime { - fn new(runtime_bldr: &mut TokioRuntimeBuilder) -> Self { - let mut runtime = runtime_bldr.build().expect(RUNTIME_BUILD_PROOF); - - let (stop, stopped) = tokio::sync::oneshot::channel(); - let (tx, rx) = std::sync::mpsc::channel(); - let handle = thread::spawn(move || { - let executor = runtime.executor(); - runtime.block_on_std(async move { - tx.send(executor).expect("Rx is blocking upper thread."); - let _ = stopped.await; - }); - }); - let executor = rx.recv().expect("tx is transfered to a newly spawned thread."); - - Runtime { - executor: Executor { inner: Mode::Tokio(executor) }, - handle: RuntimeHandle { close: Some(stop), handle: Some(handle) }, - } - } - - /// Spawns a new tokio runtime with a default thread count on a background - /// thread and returns a `Runtime` which can be used to spawn tasks via - /// its executor. - pub fn with_default_thread_count() -> Self { - let mut runtime_bldr = TokioRuntimeBuilder::new(); - Self::new(&mut runtime_bldr) - } - - /// Spawns a new tokio runtime with a the specified thread count on a - /// background thread and returns a `Runtime` which can be used to spawn - /// tasks via its executor. - #[cfg(any(test, feature = "test-helpers"))] - pub fn with_thread_count(thread_count: usize) -> Self { - let mut runtime_bldr = TokioRuntimeBuilder::new(); - runtime_bldr.core_threads(thread_count); - - Self::new(&mut runtime_bldr) - } - - /// Returns this runtime raw executor. - #[cfg(any(test, feature = "test-helpers"))] - pub fn raw_executor(&self) -> TaskExecutor { - if let Mode::Tokio(ref executor) = self.executor.inner { - executor.clone() - } else { - panic!("Runtime is not initialized in Tokio mode.") - } - } - - /// Returns runtime executor. - pub fn executor(&self) -> Executor { - self.executor.clone() - } -} - -#[derive(Clone)] -enum Mode { - Tokio(TaskExecutor), - // Mode used in tests - #[allow(dead_code)] - Sync, - // Mode used in tests - #[allow(dead_code)] - ThreadPerFuture, -} - -impl fmt::Debug for Mode { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use self::Mode::*; - - match *self { - Tokio(_) => write!(fmt, "tokio"), - Sync => write!(fmt, "synchronous"), - ThreadPerFuture => write!(fmt, "thread per future"), - } - } -} - -fn block_on + Send + 'static>(r: F) { - tokio::runtime::Builder::new().enable_all().basic_scheduler().build().expect(RUNTIME_BUILD_PROOF).block_on(r) -} - -#[derive(Debug, Clone)] -pub struct Executor { - inner: Mode, -} - -impl Executor { - /// Synchronous executor, used for tests. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_sync() -> Self { - Executor { inner: Mode::Sync } - } - - /// Spawns a new thread for each future (use only for tests). - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_thread_per_future() -> Self { - Executor { inner: Mode::ThreadPerFuture } - } - - /// Spawn a legacy future on this runtime - pub fn spawn(&self, r: R) - where - R: IntoFuture01 + Send + 'static, - R::Future: Send + 'static, - { - self.spawn_std(async move { - let _ = r.into_future().compat().await; - }) - } - - /// Spawn an std future on this runtime - pub fn spawn_std(&self, r: R) - where - R: Future + Send + 'static, - { - match &self.inner { - Mode::Tokio(executor) => { - let _ = executor.spawn_handle_std(r); - } - Mode::Sync => block_on(r), - Mode::ThreadPerFuture => { - thread::spawn(move || block_on(r)); - } - } - } -} - -impl + Send + 'static> futures01::future::Executor for Executor { - fn execute(&self, future: F) -> Result<(), futures01::future::ExecuteError> { - match &self.inner { - Mode::Tokio(executor) => executor.execute(future), - Mode::Sync => { - block_on(async move { - let _ = future.compat().await; - }); - Ok(()) - } - Mode::ThreadPerFuture => { - thread::spawn(move || { - block_on(async move { - let _ = future.compat().await; - }) - }); - Ok(()) - } - } - } -} - -/// A handle to a runtime. Dropping the handle will cause runtime to shutdown. -pub struct RuntimeHandle { - close: Option>, - handle: Option>, -} - -impl From for RuntimeHandle { - fn from(el: Runtime) -> Self { - el.handle - } -} - -impl Drop for RuntimeHandle { - fn drop(&mut self) { - self.close.take().map(|v| v.send(())); - } -} - -impl RuntimeHandle { - /// Blocks current thread and waits until the runtime is finished. - pub fn wait(mut self) -> thread::Result<()> { - self.handle.take().expect("Handle is taken only in `wait`, `wait` is consuming; qed").join() - } - - /// Finishes this runtime. - pub fn close(mut self) { - let _ = - self.close.take().expect("Close is taken only in `close` and `drop`. `close` is consuming; qed").send(()); - } -} From bb7b4376acea4f4cbc8e356d9b8e23de8502b293 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Wed, 27 Jan 2021 17:45:04 +0100 Subject: [PATCH 204/359] Update codec and crates depending (#510) Co-authored-by: Andronik Ordian --- contract-address/CHANGELOG.md | 5 +++++ contract-address/Cargo.toml | 6 +++--- ethbloom/CHANGELOG.md | 5 +++++ ethbloom/Cargo.toml | 4 ++-- ethereum-types/CHANGELOG.md | 7 +++++++ ethereum-types/Cargo.toml | 8 ++++---- keccak-hash/CHANGELOG.md | 4 ++++ keccak-hash/Cargo.toml | 4 ++-- kvdb-memorydb/CHANGELOG.md | 5 +++++ kvdb-memorydb/Cargo.toml | 8 ++++---- kvdb-rocksdb/CHANGELOG.md | 5 +++++ kvdb-rocksdb/Cargo.toml | 8 ++++---- kvdb-shared-tests/CHANGELOG.md | 4 ++++ kvdb-shared-tests/Cargo.toml | 4 ++-- kvdb-web/CHANGELOG.md | 6 ++++++ kvdb-web/Cargo.toml | 10 +++++----- kvdb/CHANGELOG.md | 4 ++++ kvdb/Cargo.toml | 4 ++-- parity-crypto/CHANGELOG.md | 4 ++++ parity-crypto/Cargo.toml | 4 ++-- parity-util-mem/CHANGELOG.md | 5 +++++ parity-util-mem/Cargo.toml | 6 +++--- primitive-types/CHANGELOG.md | 5 +++++ primitive-types/Cargo.toml | 6 +++--- primitive-types/impls/codec/CHANGELOG.md | 6 +++++- primitive-types/impls/codec/Cargo.toml | 4 ++-- primitive-types/tests/scale_info.rs | 10 ++++++---- rlp/Cargo.toml | 2 +- transaction-pool/Cargo.toml | 2 +- triehash/Cargo.toml | 2 +- 30 files changed, 111 insertions(+), 46 deletions(-) diff --git a/contract-address/CHANGELOG.md b/contract-address/CHANGELOG.md index 44a70eb69..583ee8569 100644 --- a/contract-address/CHANGELOG.md +++ b/contract-address/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.6.0] - 2021-01-27 +### Breaking +- Updated `ethereum-types` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `keccak-hash` to 0.7. [#510](https://github.com/paritytech/parity-common/pull/510) + ## [0.5.0] - 2021-01-05 ### Breaking - Updated `ethereum-types` to 0.10. [#463](https://github.com/paritytech/parity-common/pull/463) diff --git a/contract-address/Cargo.toml b/contract-address/Cargo.toml index 81cd433d9..993ffe26c 100644 --- a/contract-address/Cargo.toml +++ b/contract-address/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "contract-address" -version = "0.5.0" +version = "0.6.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -11,9 +11,9 @@ edition = "2018" readme = "README.md" [dependencies] -ethereum-types = { version = "0.10.0", path = "../ethereum-types" } +ethereum-types = { version = "0.11.0", path = "../ethereum-types" } rlp = { version = "0.5", path = "../rlp" } -keccak-hash = { version = "0.6", path = "../keccak-hash", default-features = false } +keccak-hash = { version = "0.7", path = "../keccak-hash", default-features = false } [features] default = [] diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 94a75dcb1..45781b2e4 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -5,6 +5,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.11.0] - 2021-01-27 +### Breaking +- Updated `impl-codec` to 0.5. [#510](https://github.com/paritytech/parity-common/pull/510) + ### Potentially-breaking - `serialize` feature no longer pulls `std`. [#503](https://github.com/paritytech/parity-common/pull/503) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 88777f43b..61a759bad 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.10.0" +version = "0.11.0" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT OR Apache-2.0" @@ -15,7 +15,7 @@ crunchy = { version = "0.2.2", default-features = false, features = ["limit_256" fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } -impl-codec = { version = "0.4.1", path = "../primitive-types/impls/codec", default-features = false, optional = true } +impl-codec = { version = "0.5.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } [dev-dependencies] criterion = "0.3.0" diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index e2b349a50..cfc8d223b 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -5,6 +5,13 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.11.0] - 2021-01-27 +### Breaking +- Updated `ethbloom` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `primitive-types` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `impl-codec` to 0.5. [#510](https://github.com/paritytech/parity-common/pull/510) + ### Potentially-breaking - `serialize` feature no longer pulls `std`. [#503](https://github.com/paritytech/parity-common/pull/503) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index c7eb4b449..dfe86dc75 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.10.0" +version = "0.11.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -8,13 +8,13 @@ description = "Ethereum types" edition = "2018" [dependencies] -ethbloom = { path = "../ethbloom", version = "0.10", default-features = false } +ethbloom = { path = "../ethbloom", version = "0.11", default-features = false } fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.9", default-features = false } -primitive-types = { path = "../primitive-types", version = "0.8", features = ["byteorder", "rustc-hex"], default-features = false } +primitive-types = { path = "../primitive-types", version = "0.9", features = ["byteorder", "rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.0", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } -impl-codec = { version = "0.4.1", path = "../primitive-types/impls/codec", default-features = false, optional = true } +impl-codec = { version = "0.5.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } [dev-dependencies] serde_json = "1.0.41" diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index d702e2348..ce980bbdf 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.7.0] - 2021-01-27 +### Breaking +- Updated `primitive-types` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) + ## [0.6.0] - 2021-01-05 ### Breaking - Updated `primitive-types` to 0.8. [#463](https://github.com/paritytech/parity-common/pull/463) diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 65ae2c4fd..6f1f29ee1 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "keccak-hash" -version = "0.6.0" +version = "0.7.0" description = "`keccak-hash` is a set of utility functions to facilitate working with Keccak hashes (256/512 bits long)." authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" @@ -10,7 +10,7 @@ edition = "2018" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } -primitive-types = { path = "../primitive-types", version = "0.8", default-features = false } +primitive-types = { path = "../primitive-types", version = "0.9", default-features = false } [dev-dependencies] tempfile = "3.1.0" diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index c837385bb..a5d9fa532 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.0] - 2021-01-27 +### Breaking +- Updated `parity-util-mem` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `kvdb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) + ## [0.8.0] - 2021-01-05 ### Breaking - Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 7cf540bd9..9599421f5 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -8,12 +8,12 @@ license = "MIT OR Apache-2.0" edition = "2018" [dependencies] -parity-util-mem = { path = "../parity-util-mem", version = "0.8", default-features = false, features = ["std"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.9", default-features = false, features = ["std"] } parking_lot = "0.11.1" -kvdb = { version = "0.8", path = "../kvdb" } +kvdb = { version = "0.9", path = "../kvdb" } [dev-dependencies] -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.6" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.7" } [features] default = [] diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index e90a74a84..f55d08a57 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.11.0] - 2021-01-27 +### Breaking +- Updated `kvdb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `parity-util-mem` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) + ## [0.10.0] - 2021-01-05 ### Breaking - Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index beeed32b3..9d9b038d5 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.10.0" +version = "0.11.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -14,20 +14,20 @@ harness = false [dependencies] smallvec = "1.0.0" fs-swap = "0.2.5" -kvdb = { path = "../kvdb", version = "0.8" } +kvdb = { path = "../kvdb", version = "0.9" } log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.11.1" regex = "1.3.1" rocksdb = { version = "0.15", features = ["snappy"], default-features = false } owning_ref = "0.4.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.8", default-features = false, features = ["std", "smallvec"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.9", default-features = false, features = ["std", "smallvec"] } [dev-dependencies] alloc_counter = "0.0.4" criterion = "0.3" ethereum-types = { path = "../ethereum-types" } -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.6" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.7" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } diff --git a/kvdb-shared-tests/CHANGELOG.md b/kvdb-shared-tests/CHANGELOG.md index 545cf7dff..f307aa6c3 100644 --- a/kvdb-shared-tests/CHANGELOG.md +++ b/kvdb-shared-tests/CHANGELOG.md @@ -5,3 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.7.0] - 2021-01-27 +### Breaking +- Updated `kvdb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index 14693f52d..4ea00030e 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "kvdb-shared-tests" -version = "0.6.0" +version = "0.7.0" authors = ["Parity Technologies "] edition = "2018" description = "Shared tests for kvdb functionality, to be executed against actual implementations" license = "MIT OR Apache-2.0" [dependencies] -kvdb = { path = "../kvdb", version = "0.8" } +kvdb = { path = "../kvdb", version = "0.9" } diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index 7a494b33e..323f42614 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -6,6 +6,12 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.0] - 2021-01-27 +### Breaking +- Updated `kvdb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `kvdb-memorydb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `parity-util-mem` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) + ## [0.8.0] - 2021-01-05 ### Breaking - Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index 7790262f4..fe04d8c18 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-web" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" @@ -11,12 +11,12 @@ edition = "2018" [dependencies] wasm-bindgen = "0.2.69" js-sys = "0.3.46" -kvdb = { version = "0.8", path = "../kvdb" } -kvdb-memorydb = { version = "0.8", path = "../kvdb-memorydb" } +kvdb = { version = "0.9", path = "../kvdb" } +kvdb-memorydb = { version = "0.9", path = "../kvdb-memorydb" } futures = "0.3.8" log = "0.4.11" send_wrapper = "0.5.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.8", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.9", default-features = false } # TODO: https://github.com/paritytech/parity-common/issues/479 # This is hack to enable `wasm-bindgen` feature of `parking_lot` in other dependencies. # Thus, it's not direct dependency and do not remove until a proper fix exists. @@ -44,6 +44,6 @@ features = [ [dev-dependencies] console_log = "0.2.0" -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.6" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.7" } wasm-bindgen-test = "0.3.19" wasm-bindgen-futures = "0.4.19" diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 99d1c52ea..9c2c444d0 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.0] - 2021-01-27 +### Breaking +- Updated `parity-util-mem` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) + ## [0.8.0] - 2021-01-05 ### Breaking - Updated `parity-util-mem` to 0.8. [#470](https://github.com/paritytech/parity-common/pull/470) diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 4cf0c7657..8f22d0780 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" @@ -9,4 +9,4 @@ edition = "2018" [dependencies] smallvec = "1.0.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.8", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.9", default-features = false } diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index 33c190d27..dfa71058d 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.8.0] - 2021-01-27 +### Breaking +- Updated `ethereum-types` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) + ## [0.7.0] - 2021-01-05 ### Breaking - Bump `rust-secp256k1` to v0.19, always allow zero signatures. [#438](https://github.com/paritytech/parity-common/pull/438) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 828886648..1f7163425 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-crypto" -version = "0.7.0" +version = "0.8.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Crypto utils used by ethstore and network." @@ -18,7 +18,7 @@ aes = "0.6.0" aes-ctr = "0.6.0" block-modes = "0.7.0" digest = "0.9.0" -ethereum-types = { version = "0.10.0", optional = true, path = "../ethereum-types" } +ethereum-types = { version = "0.11.0", optional = true, path = "../ethereum-types" } hmac = "0.10.1" lazy_static = { version = "1.4.0", optional = true } pbkdf2 = "0.6.0" diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index dad32580d..0c88476ca 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.0] - 2021-01-27 +### Breaking +- Updated `ethereum-types` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `primitive-types` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) + ## [0.8.0] - 2021-01-05 - Updated dlmalloc to 0.2.1. [#452](https://github.com/paritytech/parity-common/pull/452) ### Breaking diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 6ab5f3e54..e88ba2eed 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" @@ -26,9 +26,9 @@ parity-util-mem-derive = { path = "derive", version = "0.1" } impl-trait-for-tuples = "0.2.0" smallvec = { version = "1.0.0", optional = true } -ethereum-types = { version = "0.10.0", optional = true, path = "../ethereum-types" } +ethereum-types = { version = "0.11.0", optional = true, path = "../ethereum-types" } parking_lot = { version = "0.11.1", optional = true } -primitive-types = { version = "0.8", path = "../primitive-types", default-features = false, optional = true } +primitive-types = { version = "0.9", path = "../primitive-types", default-features = false, optional = true } [target.'cfg(target_os = "windows")'.dependencies] winapi = { version = "0.3.8", features = ["heapapi"] } diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 23037abf1..df097bc61 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.0] - 2021-01-27 +### Breaking +- Updated `impl-codec` to 0.5. [#510](https://github.com/paritytech/parity-common/pull/510) +- Updated `scale-info` to 0.5. [#510](https://github.com/paritytech/parity-common/pull/510) + ## [0.8.0] - 2021-01-05 - Added `num-traits` feature. [#480](https://github.com/paritytech/parity-common/pull/480) ### Breaking diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 6ffe31e66..9646b344f 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -11,10 +11,10 @@ edition = "2018" fixed-hash = { version = "0.7", path = "../fixed-hash", default-features = false } uint = { version = "0.9.0", path = "../uint", default-features = false } impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false, optional = true } -impl-codec = { version = "0.4.1", path = "impls/codec", default-features = false, optional = true } +impl-codec = { version = "0.5.0", path = "impls/codec", default-features = false, optional = true } impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } -scale-info = { version = "0.4", features = ["derive"], default-features = false, optional = true } +scale-info = { version = "0.5", features = ["derive"], default-features = false, optional = true } [features] default = ["std"] diff --git a/primitive-types/impls/codec/CHANGELOG.md b/primitive-types/impls/codec/CHANGELOG.md index 927c9dc9c..179be164f 100644 --- a/primitive-types/impls/codec/CHANGELOG.md +++ b/primitive-types/impls/codec/CHANGELOG.md @@ -1,7 +1,11 @@ # Changelog -The format is based on [Keep a Changelog]. +The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.5.0] - 2021-01-27 +### Breaking +- Updated `parity-scale-codec` to 2.0. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index df837fd01..27a4aa7b6 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-codec" -version = "0.4.2" +version = "0.5.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -8,7 +8,7 @@ description = "Parity Codec serialization support for uint and fixed hash." edition = "2018" [dependencies] -parity-scale-codec = { version = "1.0.6", default-features = false } +parity-scale-codec = { version = "2.0.0", default-features = false } [features] default = ["std"] diff --git a/primitive-types/tests/scale_info.rs b/primitive-types/tests/scale_info.rs index 5a61ef133..1acecf54e 100644 --- a/primitive-types/tests/scale_info.rs +++ b/primitive-types/tests/scale_info.rs @@ -13,16 +13,18 @@ use scale_info::{build::Fields, Path, Type, TypeInfo}; #[test] fn u256_scale_info() { - let r#type = - Type::builder().path(Path::new("U256", "primitive_types")).composite(Fields::unnamed().field_of::<[u64; 4]>()); + let r#type = Type::builder() + .path(Path::new("U256", "primitive_types")) + .composite(Fields::unnamed().field_of::<[u64; 4]>("[u64; 4]")); assert_eq!(U256::type_info(), r#type.into()); } #[test] fn h256_scale_info() { - let r#type = - Type::builder().path(Path::new("H256", "primitive_types")).composite(Fields::unnamed().field_of::<[u8; 32]>()); + let r#type = Type::builder() + .path(Path::new("H256", "primitive_types")) + .composite(Fields::unnamed().field_of::<[u8; 32]>("[u8; 32]")); assert_eq!(H256::type_info(), r#type.into()); } diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 6574b3ba2..fa1070ec1 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -14,7 +14,7 @@ rustc-hex = { version = "2.0.1", default-features = false } [dev-dependencies] criterion = "0.3.0" hex-literal = "0.3.1" -primitive-types = { path = "../primitive-types", version = "0.8", features = ["impl-rlp"] } +primitive-types = { path = "../primitive-types", version = "0.9", features = ["impl-rlp"] } [features] default = ["std"] diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index fd9a4fbd6..bb3cb6ad3 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -12,4 +12,4 @@ log = "0.4.8" smallvec = "1.6.0" [dev-dependencies] -ethereum-types = { version = "0.10.0", path = "../ethereum-types" } +ethereum-types = { version = "0.11.0", path = "../ethereum-types" } diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index 5ed55b529..0825f7b7e 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -14,7 +14,7 @@ rlp = { version = "0.5", path = "../rlp", default-features = false } [dev-dependencies] criterion = "0.3.0" keccak-hasher = "0.15.2" -ethereum-types = { version = "0.10.0", path = "../ethereum-types" } +ethereum-types = { version = "0.11.0", path = "../ethereum-types" } tiny-keccak = { version = "2.0", features = ["keccak"] } trie-standardmap = "0.15.2" hex-literal = "0.3.1" From 3ad905d35ed5009547747ae9455f949a458123f2 Mon Sep 17 00:00:00 2001 From: Artem Pikulin Date: Mon, 8 Feb 2021 21:50:07 +0700 Subject: [PATCH 205/359] parity-util-mem: use ios compilation conditions same as macos. (#522) * parity-util-mem: use ios compilation conditions same as macos. * Fix fmt. --- parity-util-mem/src/lib.rs | 5 ++++- parity-util-mem/src/malloc_size.rs | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index c1add51a8..bf6334e0e 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -46,7 +46,10 @@ cfg_if::cfg_if! { pub mod allocators; -#[cfg(any(all(target_os = "macos", not(feature = "jemalloc-global"),), feature = "estimate-heapsize"))] +#[cfg(any( + all(any(target_os = "macos", target_os = "ios"), not(feature = "jemalloc-global"),), + feature = "estimate-heapsize" +))] pub mod sizeof; /// This is a copy of patched crate `malloc_size_of` as a module. diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index 476ea9d66..2e8a73eb7 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -216,7 +216,10 @@ pub trait MallocConditionalShallowSizeOf { fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; } -#[cfg(not(any(all(target_os = "macos", not(feature = "jemalloc-global"),), feature = "estimate-heapsize")))] +#[cfg(not(any( + all(any(target_os = "macos", target_os = "ios"), not(feature = "jemalloc-global"),), + feature = "estimate-heapsize" +)))] pub mod inner_allocator_use { use super::*; From 98d62c18438ff2c1d50bb970f25b04475122c9d9 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Tue, 16 Feb 2021 10:20:41 +0100 Subject: [PATCH 206/359] CI: add a workaround for build issue (#527) --- .travis.yml | 3 +++ appveyor.yml | 1 + 2 files changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index f3d685bb4..08e4240cf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,12 +11,14 @@ matrix: - os: linux rust: nightly script: + - cargo generate-lockfile --verbose && cargo update -p funty --precise "1.1.0" --verbose - cargo check --workspace --benches - os: linux rust: stable install: - cargo install cross script: + - cargo generate-lockfile --verbose && cargo update -p funty --precise "1.1.0" --verbose - cross test --target=aarch64-linux-android -p parity-util-mem - cross test --target=mips64-unknown-linux-gnuabi64 -p uint - os: osx @@ -35,6 +37,7 @@ install: - curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh - which geckodriver script: + - cargo generate-lockfile --verbose && cargo update -p funty --precise "1.1.0" --verbose - if [ "$TRAVIS_OS_NAME" == "linux" ]; then cargo fmt -- --check; fi diff --git a/appveyor.yml b/appveyor.yml index 2807de0d7..6a0725a92 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -18,6 +18,7 @@ install: - cargo -vV build_script: + - cargo generate-lockfile --verbose && cargo update -p funty --precise "1.1.0" --verbose - cargo check --tests - cargo build --all From 6e807e87593ae16acf6b2e7a63c8aee0bab7c67c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Feb 2021 13:33:41 +0100 Subject: [PATCH 207/359] build(deps): update sysinfo requirement from 0.15.3 to 0.16.3 (#526) Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Release notes](https://github.com/GuillaumeGomez/sysinfo/releases) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 9d9b038d5..0481f42df 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -31,6 +31,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.7" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.15.3" +sysinfo = "0.16.3" ctrlc = "3.1.4" chrono = "0.4" From db7c985b6dfe11cb7ef44072cbcaeb7022ae2ca4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Feb 2021 14:44:13 +0100 Subject: [PATCH 208/359] build(deps): update scale-info requirement from 0.5 to 0.6 (#519) * build(deps): update scale-info requirement from 0.5 to 0.6 Updates the requirements on [scale-info](https://github.com/paritytech/scale-info) to permit the latest version. - [Release notes](https://github.com/paritytech/scale-info/releases) - [Changelog](https://github.com/paritytech/scale-info/blob/master/CHANGELOG.md) - [Commits](https://github.com/paritytech/scale-info/commits/v0.6.0) Signed-off-by: dependabot[bot] * make it compile * some comments never hurt * organize features properly, so that scale-info works with no_std * rename to scale-info-crate Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andronik Ordian --- .travis.yml | 1 + primitive-types/Cargo.toml | 6 +++++- primitive-types/src/lib.rs | 2 +- primitive-types/tests/scale_info.rs | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 08e4240cf..86b226190 100644 --- a/.travis.yml +++ b/.travis.yml @@ -60,6 +60,7 @@ script: - cd parity-util-mem/ && cargo test --no-default-features --features=dlmalloc-global && cd .. - cd primitive-types/ && cargo test --all-features && cd .. - cd primitive-types/ && cargo test --no-default-features --features=serde_no_std && cd .. + - cd primitive-types/ && cargo test --no-default-features --features=scale-info && cd .. - cd rlp/ && cargo test --no-default-features && cargo check --benches && cd .. - cd triehash/ && cargo check --benches && cd .. - cd kvdb-web/ && wasm-pack test --headless --firefox && cd .. diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 9646b344f..426920aa1 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -14,7 +14,10 @@ impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false impl-codec = { version = "0.5.0", path = "impls/codec", default-features = false, optional = true } impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } -scale-info = { version = "0.5", features = ["derive"], default-features = false, optional = true } +scale-info-crate = { package = "scale-info", version = "0.6", features = ["derive"], default-features = false, optional = true } +# we add parity-scale-codec here directly only because scale-info requires us to do so +# see https://github.com/paritytech/scale-info/pull/61#discussion_r568760753 +parity-scale-codec = { version = "2.0", default-features = false, optional = true } [features] default = ["std"] @@ -24,6 +27,7 @@ rustc-hex = ["fixed-hash/rustc-hex"] serde = ["std", "impl-serde", "impl-serde/std"] serde_no_std = ["impl-serde"] codec = ["impl-codec"] +scale-info = ["codec", "scale-info-crate", "parity-scale-codec"] rlp = ["impl-rlp"] arbitrary = ["fixed-hash/arbitrary", "uint/arbitrary"] fp-conversion = ["std"] diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index 696ac6898..ab3248f60 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -20,7 +20,7 @@ mod fp_conversion; use core::convert::TryFrom; use fixed_hash::{construct_fixed_hash, impl_fixed_hash_conversions}; #[cfg(feature = "scale-info")] -use scale_info::TypeInfo; +use scale_info_crate::TypeInfo; use uint::{construct_uint, uint_full_mul_reg}; /// Error type for conversion. diff --git a/primitive-types/tests/scale_info.rs b/primitive-types/tests/scale_info.rs index 1acecf54e..3774c8448 100644 --- a/primitive-types/tests/scale_info.rs +++ b/primitive-types/tests/scale_info.rs @@ -9,7 +9,7 @@ //! Tests for scale-info feature of primitive-types. use primitive_types::{H256, U256}; -use scale_info::{build::Fields, Path, Type, TypeInfo}; +use scale_info_crate::{build::Fields, Path, Type, TypeInfo}; #[test] fn u256_scale_info() { From 0a7fe8131b95bdf11772ec18858bb1e8e32fd674 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Mar 2021 10:50:51 +0100 Subject: [PATCH 209/359] build(deps): update arbitrary requirement from 0.4 to 1.0 (#530) * build(deps): update arbitrary requirement from 0.4 to 1.0 Updates the requirements on [arbitrary](https://github.com/rust-fuzz/arbitrary) to permit the latest version. - [Release notes](https://github.com/rust-fuzz/arbitrary/releases) - [Changelog](https://github.com/rust-fuzz/arbitrary/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-fuzz/arbitrary/compare/0.4.0...1.0.0) Signed-off-by: dependabot[bot] * Add lifetime Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: David Palm --- fixed-hash/Cargo.toml | 2 +- fixed-hash/src/hash.rs | 2 +- uint/Cargo.toml | 2 +- uint/src/uint.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 23c6c5079..99419c582 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -19,7 +19,7 @@ quickcheck = { version = "0.9.0", optional = true } rand = { version = "0.8.0", optional = true, default-features = false } rustc-hex = { version = "2.0.1", optional = true, default-features = false } static_assertions = "1.0.0" -arbitrary = { version = "0.4", optional = true } +arbitrary = { version = "1.0", optional = true } [dev-dependencies] rand_xorshift = "0.3.0" diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index dd80df6d8..bc64993b5 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -665,7 +665,7 @@ macro_rules! impl_arbitrary_for_fixed_hash { #[doc(hidden)] macro_rules! impl_arbitrary_for_fixed_hash { ( $name:ident ) => { - impl $crate::arbitrary::Arbitrary for $name { + impl $crate::arbitrary::Arbitrary<'_> for $name { fn arbitrary(u: &mut $crate::arbitrary::Unstructured<'_>) -> $crate::arbitrary::Result { let mut res = Self::zero(); u.fill_buffer(&mut res.0)?; diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 2e731c06c..d5d009ffc 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -16,7 +16,7 @@ qc = { package = "quickcheck", version = "0.9.0", optional = true } rand07 = { package = "rand", version = "0.7", default-features = false, optional = true } hex = { version = "0.4", default-features = false } static_assertions = "1.0.0" -arbitrary = { version = "0.4", optional = true } +arbitrary = { version = "1.0", optional = true } [features] default = ["std"] diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 702e59e86..1a2462c0d 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1780,7 +1780,7 @@ macro_rules! impl_quickcheck_arbitrary_for_uint { #[doc(hidden)] macro_rules! impl_arbitrary_for_uint { ($uint: ty, $n_bytes: tt) => { - impl $crate::arbitrary::Arbitrary for $uint { + impl $crate::arbitrary::Arbitrary<'_> for $uint { fn arbitrary(u: &mut $crate::arbitrary::Unstructured<'_>) -> $crate::arbitrary::Result { let mut res = [0u8; $n_bytes]; u.fill_buffer(&mut res)?; From 7ff438a2db97b4f97933f7dd781cce146e5e0ba7 Mon Sep 17 00:00:00 2001 From: mdben <10809772+mdben1247@users.noreply.github.com> Date: Tue, 2 Mar 2021 12:43:49 +0100 Subject: [PATCH 210/359] [kvdb-rocksdb] add DatabaseConfig max_total_wal_size (#528) * Add DatabaseConfig max_total_wal_size * formatting * formatting * fixed test * Update kvdb-rocksdb/src/lib.rs Co-authored-by: Andronik Ordian * carog fmt Co-authored-by: mdben1247 Co-authored-by: Andronik Ordian --- kvdb-rocksdb/src/lib.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 321e1d118..a21f8aed1 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -177,6 +177,9 @@ pub struct DatabaseConfig { /// if the secondary instance reads and applies state changes before the primary instance compacts them. /// More info: https://github.com/facebook/rocksdb/wiki/Secondary-instance pub secondary: Option, + /// Limit the size (in bytes) of write ahead logs + /// More info: https://github.com/facebook/rocksdb/wiki/Write-Ahead-Log + pub max_total_wal_size: Option, } impl DatabaseConfig { @@ -227,6 +230,7 @@ impl Default for DatabaseConfig { keep_log_file_num: 1, enable_statistics: false, secondary: None, + max_total_wal_size: None, } } } @@ -325,6 +329,9 @@ fn generate_options(config: &DatabaseConfig) -> Options { opts.set_bytes_per_sync(1 * MB as u64); opts.set_keep_log_file_num(1); opts.increase_parallelism(cmp::max(1, num_cpus::get() as i32 / 2)); + if let Some(m) = config.max_total_wal_size { + opts.set_max_total_wal_size(m); + } opts } @@ -898,6 +905,7 @@ mod tests { keep_log_file_num: 1, enable_statistics: false, secondary: None, + max_total_wal_size: None, }; let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); From f3b859a1ab36cfd5fc49e8e1147e2220c632e65e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Mar 2021 16:55:33 +0000 Subject: [PATCH 211/359] build(deps): update pbkdf2 requirement from 0.6.0 to 0.7.3 (#523) Updates the requirements on [pbkdf2](https://github.com/RustCrypto/password-hashes) to permit the latest version. - [Release notes](https://github.com/RustCrypto/password-hashes/releases) - [Commits](https://github.com/RustCrypto/password-hashes/compare/scrypt-v0.6.0...pbkdf2-v0.7.3) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- parity-crypto/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 1f7163425..6de817cb1 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -21,7 +21,7 @@ digest = "0.9.0" ethereum-types = { version = "0.11.0", optional = true, path = "../ethereum-types" } hmac = "0.10.1" lazy_static = { version = "1.4.0", optional = true } -pbkdf2 = "0.6.0" +pbkdf2 = "0.7.3" ripemd160 = "0.9.1" rustc-hex = { version = "2.1.0", default-features = false, optional = true } scrypt = { version = "0.5.0" } From 6a4ce4b77233a1bbc7a18b621b7d69efc36ba3a6 Mon Sep 17 00:00:00 2001 From: Sunny Gonnabathula Date: Wed, 3 Mar 2021 10:11:54 -0800 Subject: [PATCH 212/359] impl num_traits::sign::Unsigned (#531) Implements the `Unsigned` marker trait for the uint types created in `primitive-types`. --- primitive-types/impls/num-traits/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/primitive-types/impls/num-traits/src/lib.rs b/primitive-types/impls/num-traits/src/lib.rs index 094447592..11380a8b1 100644 --- a/primitive-types/impls/num-traits/src/lib.rs +++ b/primitive-types/impls/num-traits/src/lib.rs @@ -20,6 +20,8 @@ pub use uint; #[macro_export] macro_rules! impl_uint_num_traits { ($name: ident, $len: expr) => { + impl $crate::num_traits::sign::Unsigned for $name {} + impl $crate::num_traits::identities::Zero for $name { #[inline] fn zero() -> Self { From 50fcd2317dc3b970cefaf523c5529bfffd9a5e2a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Mar 2021 14:01:18 +0100 Subject: [PATCH 213/359] build(deps): update num-bigint requirement from 0.3.1 to 0.4.0 (#532) Updates the requirements on [num-bigint](https://github.com/rust-num/num-bigint) to permit the latest version. - [Release notes](https://github.com/rust-num/num-bigint/releases) - [Changelog](https://github.com/rust-num/num-bigint/blob/master/RELEASES.md) - [Commits](https://github.com/rust-num/num-bigint/compare/num-bigint-0.3.1...num-bigint-0.4.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- uint/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index d5d009ffc..aff098b07 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -32,7 +32,7 @@ required-features = ["std"] [dev-dependencies] criterion = "0.3.0" -num-bigint = "0.3.1" +num-bigint = "0.4.0" [target.'cfg(all(unix, target_arch = "x86_64"))'.dev-dependencies] rug = { version = "1.6.0", default-features = false, features = ["integer"] } From 61fc474c4f14d2291820282aed081256c2391510 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Apr 2021 15:44:24 +0200 Subject: [PATCH 214/359] build(deps): update sysinfo requirement from 0.16.3 to 0.17.0 (#536) Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Release notes](https://github.com/GuillaumeGomez/sysinfo/releases) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 0481f42df..5b44388d9 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -31,6 +31,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.7" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.16.3" +sysinfo = "0.17.0" ctrlc = "3.1.4" chrono = "0.4" From bce6c333676c71ca72c7ceb7badeeef0ae863002 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Apr 2021 14:18:40 +0200 Subject: [PATCH 215/359] build(deps): update rocksdb requirement from 0.15 to 0.16 (#537) Updates the requirements on [rocksdb](https://github.com/rust-rocksdb/rust-rocksdb) to permit the latest version. - [Release notes](https://github.com/rust-rocksdb/rust-rocksdb/releases) - [Changelog](https://github.com/rust-rocksdb/rust-rocksdb/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-rocksdb/rust-rocksdb/compare/v0.15.0...v0.16.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 5b44388d9..d2d947393 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -19,7 +19,7 @@ log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.11.1" regex = "1.3.1" -rocksdb = { version = "0.15", features = ["snappy"], default-features = false } +rocksdb = { version = "0.16", features = ["snappy"], default-features = false } owning_ref = "0.4.0" parity-util-mem = { path = "../parity-util-mem", version = "0.9", default-features = false, features = ["std", "smallvec"] } From 0867e488f6567e0d7fe13e78a14797ff019033f7 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 3 May 2021 20:35:32 +0200 Subject: [PATCH 216/359] bump kvdb-rocksdb and update changelog (#542) --- kvdb-rocksdb/CHANGELOG.md | 3 +++ kvdb-rocksdb/Cargo.toml | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index f55d08a57..72ca223dd 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.11.1] - 2021-05-03 +- Updated `rocksdb` to 0.16. [#537](https://github.com/paritytech/parity-common/pull/537) + ## [0.11.0] - 2021-01-27 ### Breaking - Updated `kvdb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index d2d947393..3b6ff8d9a 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.11.0" +version = "0.11.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -13,7 +13,7 @@ harness = false [dependencies] smallvec = "1.0.0" -fs-swap = "0.2.5" +fs-swap = "0.2.6" kvdb = { path = "../kvdb", version = "0.9" } log = "0.4.8" num_cpus = "1.10.1" From 0ac86ef7159e9fc7695de280f7095c97e5e5ca81 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 24 May 2021 14:48:35 +0200 Subject: [PATCH 217/359] primitive-types: add U128 full_mul (#546) * primitive-types: add U128 full_mul * primitive-types: update the changelog * Apply suggestions from code review Co-authored-by: David Co-authored-by: David --- primitive-types/CHANGELOG.md | 3 +++ primitive-types/src/lib.rs | 13 +++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index df097bc61..3e3357d78 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,6 +5,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Added `U128::full_mul` method. [#546](https://github.com/paritytech/parity-common/pull/546) +### Breaking +- Updated `scale-info` to 0.6. [#519](https://github.com/paritytech/parity-common/pull/519) ## [0.9.0] - 2021-01-27 ### Breaking diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index ab3248f60..133b05c7e 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -125,9 +125,18 @@ mod rlp { impl_fixed_hash_conversions!(H256, H160); +impl U128 { + /// Multiplies two 128-bit integers to produce full 256-bit integer. + /// Overflow is not possible. + #[inline(always)] + pub fn full_mul(self, other: U128) -> U256 { + U256(uint_full_mul_reg!(U128, 2, self, other)) + } +} + impl U256 { - /// Multiplies two 256-bit integers to produce full 512-bit integer - /// No overflow possible + /// Multiplies two 256-bit integers to produce full 512-bit integer. + /// Overflow is not possible. #[inline(always)] pub fn full_mul(self, other: U256) -> U512 { U512(uint_full_mul_reg!(U256, 4, self, other)) From 7c2a9b28e266d0b0adbad4869e2559500fad4859 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Fri, 11 Jun 2021 17:52:40 +0100 Subject: [PATCH 218/359] Simplification (#550) --- uint/src/uint.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 1a2462c0d..ff064ed3a 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -603,12 +603,11 @@ macro_rules! construct_uint { /// Convert from a decimal string. pub fn from_dec_str(value: &str) -> $crate::core_::result::Result { - if !value.bytes().all(|b| b >= 48 && b <= 57) { - return Err($crate::FromDecStrErr::InvalidCharacter) - } - let mut res = Self::default(); - for b in value.bytes().map(|b| b - 48) { + for b in value.bytes().map(|b| b.wrapping_sub(b'0')) { + if b > 9 { + return Err($crate::FromDecStrErr::InvalidCharacter) + } let (r, overflow) = res.overflowing_mul_u64(10); if overflow > 0 { return Err($crate::FromDecStrErr::InvalidLength); From 96909f371e26747f15df89b38fc2a64ec8a4f946 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 28 Jun 2021 20:30:13 +0200 Subject: [PATCH 219/359] parity-util-mem: fix for FreeBSD (#553) * parity-util-mem: fix for FreeBSD * update the changelog --- parity-util-mem/CHANGELOG.md | 1 + parity-util-mem/src/allocators.rs | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 0c88476ca..e4c532662 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Fixed `malloc_usable_size` for FreeBSD. [#553](https://github.com/paritytech/parity-common/pull/553) ## [0.9.0] - 2021-01-27 ### Breaking diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs index fca674ebe..1eadf701b 100644 --- a/parity-util-mem/src/allocators.rs +++ b/parity-util-mem/src/allocators.rs @@ -97,9 +97,12 @@ mod usable_size { libmimalloc_sys::mi_usable_size(ptr as *mut _) } - } else if #[cfg(any(target_os = "linux", target_os = "android"))] { - - /// Linux call system allocator (currently malloc). + } else if #[cfg(any( + target_os = "linux", + target_os = "android", + target_os = "freebsd", + ))] { + /// Linux/BSD call system allocator (currently malloc). extern "C" { pub fn malloc_usable_size(ptr: *const c_void) -> usize; } From 828a5d6a51fc919c6d70c6da8022e4d739a22e48 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 30 Jun 2021 14:25:22 +0200 Subject: [PATCH 220/359] uint: implement integer_sqrt (#554) * uint: implement integer_sqrt * uint: remove wrong assertion * go brrrrrrrrrrrrrr * we don't need to move anymore * impl-num-traits: add integer-sqrt trait support * add missing test * fmt * bump uint --- .travis.yml | 1 + primitive-types/Cargo.toml | 4 ++ primitive-types/impls/num-traits/CHANGELOG.md | 1 + primitive-types/impls/num-traits/Cargo.toml | 3 +- primitive-types/impls/num-traits/src/lib.rs | 9 ++++ primitive-types/tests/num_traits.rs | 17 +++++++ uint/CHANGELOG.md | 1 + uint/Cargo.toml | 2 +- uint/benches/bigint.rs | 35 +++++++++++++ uint/fuzz/Cargo.toml | 4 ++ uint/fuzz/fuzz_targets/div_mod.rs | 4 +- uint/fuzz/fuzz_targets/div_mod_word.rs | 4 +- uint/fuzz/fuzz_targets/isqrt.rs | 50 +++++++++++++++++++ uint/src/uint.rs | 22 ++++++++ uint/tests/uint_tests.rs | 16 ++++++ 15 files changed, 167 insertions(+), 6 deletions(-) create mode 100644 primitive-types/tests/num_traits.rs create mode 100644 uint/fuzz/fuzz_targets/isqrt.rs diff --git a/.travis.yml b/.travis.yml index 86b226190..13e3a218e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -61,6 +61,7 @@ script: - cd primitive-types/ && cargo test --all-features && cd .. - cd primitive-types/ && cargo test --no-default-features --features=serde_no_std && cd .. - cd primitive-types/ && cargo test --no-default-features --features=scale-info && cd .. + - cd primitive-types/ && cargo test --no-default-features --features=num-traits && cd .. - cd rlp/ && cargo test --no-default-features && cargo check --benches && cd .. - cd triehash/ && cargo check --benches && cd .. - cd kvdb-web/ && wasm-pack test --headless --firefox && cd .. diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 426920aa1..a4d01151d 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -40,3 +40,7 @@ required-features = ["scale-info"] [[test]] name = "fp_conversion" required-features = ["fp-conversion"] + +[[test]] +name = "num_traits" +required-features = ["num-traits"] diff --git a/primitive-types/impls/num-traits/CHANGELOG.md b/primitive-types/impls/num-traits/CHANGELOG.md index 545cf7dff..b7385ae89 100644 --- a/primitive-types/impls/num-traits/CHANGELOG.md +++ b/primitive-types/impls/num-traits/CHANGELOG.md @@ -5,3 +5,4 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Added `integer-sqrt` trait support. [#554](https://github.com/paritytech/parity-common/pull/554) diff --git a/primitive-types/impls/num-traits/Cargo.toml b/primitive-types/impls/num-traits/Cargo.toml index a77774608..006d131df 100644 --- a/primitive-types/impls/num-traits/Cargo.toml +++ b/primitive-types/impls/num-traits/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-num-traits" -version = "0.1.0" +version = "0.1.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -9,4 +9,5 @@ edition = "2018" [dependencies] num-traits = { version = "0.2", default-features = false } +integer-sqrt = "0.1" uint = { version = "0.9.0", path = "../../../uint", default-features = false } diff --git a/primitive-types/impls/num-traits/src/lib.rs b/primitive-types/impls/num-traits/src/lib.rs index 11380a8b1..5fa9a76d4 100644 --- a/primitive-types/impls/num-traits/src/lib.rs +++ b/primitive-types/impls/num-traits/src/lib.rs @@ -13,6 +13,9 @@ #[doc(hidden)] pub use num_traits; +#[doc(hidden)] +pub use integer_sqrt; + #[doc(hidden)] pub use uint; @@ -48,5 +51,11 @@ macro_rules! impl_uint_num_traits { Self::from_str_radix(txt, radix) } } + + impl $crate::integer_sqrt::IntegerSquareRoot for $name { + fn integer_sqrt_checked(&self) -> Option { + Some(self.integer_sqrt()) + } + } }; } diff --git a/primitive-types/tests/num_traits.rs b/primitive-types/tests/num_traits.rs new file mode 100644 index 000000000..1d6c8e8a5 --- /dev/null +++ b/primitive-types/tests/num_traits.rs @@ -0,0 +1,17 @@ +// Copyright 2021 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use impl_num_traits::integer_sqrt::IntegerSquareRoot; +use primitive_types::U256; + +#[test] +fn u256_isqrt() { + let x = U256::MAX; + let s = x.integer_sqrt_checked().unwrap(); + assert_eq!(x.integer_sqrt(), s); +} diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 3e3ddad3f..aa83d5e37 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Added `integer_sqrt` method. [#554](https://github.com/paritytech/parity-common/pull/554) ## [0.9.0] - 2021-01-05 - Allow `0x` prefix in `from_str`. [#487](https://github.com/paritytech/parity-common/pull/487) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index aff098b07..1db8710be 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -4,7 +4,7 @@ homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" name = "uint" -version = "0.9.0" +version = "0.9.1" authors = ["Parity Technologies "] readme = "README.md" edition = "2018" diff --git a/uint/benches/bigint.rs b/uint/benches/bigint.rs index a79c01d8e..f41f34123 100644 --- a/uint/benches/bigint.rs +++ b/uint/benches/bigint.rs @@ -44,6 +44,7 @@ criterion_group!( u256_div, u512_div_mod, u256_rem, + u256_integer_sqrt, u256_bit_and, u256_bit_or, u256_bit_xor, @@ -58,6 +59,7 @@ criterion_group!( u512_mul, u512_div, u512_rem, + u512_integer_sqrt, u512_mul_u32_vs_u64, mulmod_u512_vs_biguint_vs_gmp, conversions, @@ -253,6 +255,22 @@ fn u256_rem(c: &mut Criterion) { ); } +fn u256_integer_sqrt(c: &mut Criterion) { + c.bench( + "u256_integer_sqrt", + ParameterizedBenchmark::new( + "", + |b, x| b.iter(|| black_box(x.integer_sqrt().0)), + vec![ + U256::from(u64::MAX), + U256::from(u128::MAX) + 1, + U256::from(u128::MAX - 1) * U256::from(u128::MAX - 1) - 1, + U256::MAX, + ], + ), + ); +} + fn u512_pairs() -> Vec<(U512, U512)> { vec![ (U512::from(1u64), U512::from(0u64)), @@ -286,6 +304,23 @@ fn u512_mul(c: &mut Criterion) { ); } +fn u512_integer_sqrt(c: &mut Criterion) { + c.bench( + "u512_integer_sqrt", + ParameterizedBenchmark::new( + "", + |b, x| b.iter(|| black_box(x.integer_sqrt().0)), + vec![ + U512::from(u32::MAX) + 1, + U512::from(u64::MAX), + (U512::from(u128::MAX) + 1) * (U512::from(u128::MAX) + 1), + U256::MAX.full_mul(U256::MAX) - 1, + U512::MAX, + ], + ), + ); +} + fn u512_div(c: &mut Criterion) { let one = U512([ 8326634216714383706, diff --git a/uint/fuzz/Cargo.toml b/uint/fuzz/Cargo.toml index 151c851cb..b549817a5 100644 --- a/uint/fuzz/Cargo.toml +++ b/uint/fuzz/Cargo.toml @@ -24,3 +24,7 @@ path = "fuzz_targets/div_mod.rs" [[bin]] name = "div_mod_word" path = "fuzz_targets/div_mod_word.rs" + +[[bin]] +name = "isqrt" +path = "fuzz_targets/isqrt.rs" diff --git a/uint/fuzz/fuzz_targets/div_mod.rs b/uint/fuzz/fuzz_targets/div_mod.rs index 102407ecc..fdeaefa86 100644 --- a/uint/fuzz/fuzz_targets/div_mod.rs +++ b/uint/fuzz/fuzz_targets/div_mod.rs @@ -23,7 +23,7 @@ fn from_gmp(x: Integer) -> U512 { } fuzz_target!(|data: &[u8]| { - if data.len() == 128 { + if data.len() == 128 { let x = U512::from_little_endian(&data[..64]); let y = U512::from_little_endian(&data[64..]); let x_gmp = Integer::from_digits(&data[..64], Order::LsfLe); @@ -32,5 +32,5 @@ fuzz_target!(|data: &[u8]| { let (a, b) = x_gmp.div_rem(y_gmp); assert_eq!((from_gmp(a), from_gmp(b)), x.div_mod(y)); } - } + } }); diff --git a/uint/fuzz/fuzz_targets/div_mod_word.rs b/uint/fuzz/fuzz_targets/div_mod_word.rs index 285304944..d1a04ee19 100644 --- a/uint/fuzz/fuzz_targets/div_mod_word.rs +++ b/uint/fuzz/fuzz_targets/div_mod_word.rs @@ -57,7 +57,7 @@ fn div_mod_word(hi: u64, lo: u64, y: u64) -> (u64, u64) { } fuzz_target!(|data: &[u8]| { - if data.len() == 24 { + if data.len() == 24 { let mut buf = [0u8; 8]; buf.copy_from_slice(&data[..8]); let x = u64::from_ne_bytes(buf); @@ -68,5 +68,5 @@ fuzz_target!(|data: &[u8]| { if x < z { assert_eq!(div_mod_word(x, y, z), div_mod_word_u128(x, y, z)); } - } + } }); diff --git a/uint/fuzz/fuzz_targets/isqrt.rs b/uint/fuzz/fuzz_targets/isqrt.rs new file mode 100644 index 000000000..63b28e8e2 --- /dev/null +++ b/uint/fuzz/fuzz_targets/isqrt.rs @@ -0,0 +1,50 @@ +// Copyright 2021 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![no_main] + +use libfuzzer_sys::fuzz_target; +use uint::*; + +construct_uint! { + pub struct U256(4); +} + +fn isqrt(mut me: U256) -> U256 { + let one = U256::one(); + if me <= one { + return me; + } + // the implementation is based on: + // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Binary_numeral_system_(base_2) + + // "bit" starts at the highest power of four <= self. + let max_shift = 4 * 64 as u32 - 1; + let shift: u32 = (max_shift - me.leading_zeros()) & !1; + let mut bit = one << shift; + let mut result = U256::zero(); + while !bit.is_zero() { + let x = result + bit; + result >>= 1; + if me >= x { + me -= x; + result += bit; + } + bit >>= 2; + } + result +} + +fuzz_target!(|data: &[u8]| { + if data.len() == 32 { + let x = U256::from_little_endian(data); + let expected = isqrt(x); + let got = x.integer_sqrt(); + assert_eq!(got, expected); + } +}); diff --git a/uint/src/uint.rs b/uint/src/uint.rs index ff064ed3a..a9d6df3ee 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -975,6 +975,28 @@ macro_rules! construct_uint { self.div_mod_knuth(other, n, m) } + /// Compute the highest `n` such that `n * n <= self`. + pub fn integer_sqrt(&self) -> Self { + let one = Self::one(); + if self <= &one { + return *self; + } + + // the implementation is based on: + // https://en.wikipedia.org/wiki/Integer_square_root#Using_only_integer_division + + // Set the initial guess to something higher than √self. + let shift: u32 = (self.bits() as u32 + 1) / 2; + let mut x_prev = one << shift; + loop { + let x = (x_prev + self / x_prev) >> 1; + if x >= x_prev { + return x_prev; + } + x_prev = x; + } + } + /// Fast exponentiation by squaring /// https://en.wikipedia.org/wiki/Exponentiation_by_squaring /// diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 318253279..57ad732cd 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -1140,6 +1140,22 @@ pub mod laws { } } + quickcheck! { + fn isqrt(x: $uint_ty) -> TestResult { + let s = x.integer_sqrt(); + let higher = s + 1; + if let Some(y) = higher.checked_mul(higher) { + TestResult::from_bool( + (s * s <= x) && (y > x) + ) + } else { + TestResult::from_bool( + s * s <= x + ) + } + } + } + quickcheck! { fn pow_mul(x: $uint_ty) -> TestResult { if x.overflowing_pow($uint_ty::from(2)).1 || x.overflowing_pow($uint_ty::from(3)).1 { From 52d50c748db0c3094f662006ef844227aac71ec5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Jun 2021 14:48:40 +0200 Subject: [PATCH 221/359] build(deps): update hashbrown requirement from 0.9 to 0.11 (#533) Updates the requirements on [hashbrown](https://github.com/rust-lang/hashbrown) to permit the latest version. - [Release notes](https://github.com/rust-lang/hashbrown/releases) - [Changelog](https://github.com/rust-lang/hashbrown/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/hashbrown/compare/v0.9.0...v0.11.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- parity-util-mem/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index e88ba2eed..d07bff98f 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -19,7 +19,7 @@ cfg-if = "1.0.0" dlmalloc = { version = "0.2.1", features = ["global"], optional = true } wee_alloc = { version = "0.4.5", optional = true } lru = { version = "0.6", optional = true } -hashbrown = { version = "0.9", optional = true } +hashbrown = { version = "0.11", optional = true } mimalloc = { version = "0.1.18", optional = true } libmimalloc-sys = { version = "0.1.14", optional = true } parity-util-mem-derive = { path = "derive", version = "0.1" } From 5ac8cdbf96bea2b5c3615a09c0f54dd6cce39c80 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Thu, 1 Jul 2021 15:05:48 +0100 Subject: [PATCH 222/359] primitive-types: update to `scale-info` 0.9 (#556) * Bump scale-info * Bump to 0.9.0 * Bump related crate versions * Revert rlp version bump, and related bumps * Update CHANGELOGs * review: revert unnecessary triehash bump Co-authored-by: Andronik Ordian * review: remove date Co-authored-by: Andronik Ordian * Remove dates from releases * More dates from releases removed * Update parity-util-mem/CHANGELOG.md Co-authored-by: Andronik Ordian * Expand scale-info requirement to include future 1.* releases * Fix scale_info tests Co-authored-by: Andronik Ordian --- contract-address/CHANGELOG.md | 4 ++++ contract-address/Cargo.toml | 6 +++--- ethereum-types/CHANGELOG.md | 3 +++ ethereum-types/Cargo.toml | 4 ++-- keccak-hash/CHANGELOG.md | 3 +++ keccak-hash/Cargo.toml | 4 ++-- kvdb-memorydb/CHANGELOG.md | 4 ++++ kvdb-memorydb/Cargo.toml | 8 ++++---- kvdb-rocksdb/CHANGELOG.md | 4 ++++ kvdb-rocksdb/Cargo.toml | 8 ++++---- kvdb-shared-tests/CHANGELOG.md | 3 +++ kvdb-shared-tests/Cargo.toml | 4 ++-- kvdb-web/CHANGELOG.md | 5 +++++ kvdb-web/Cargo.toml | 10 +++++----- kvdb/CHANGELOG.md | 3 +++ kvdb/Cargo.toml | 4 ++-- parity-crypto/CHANGELOG.md | 3 +++ parity-crypto/Cargo.toml | 4 ++-- parity-util-mem/CHANGELOG.md | 5 +++++ parity-util-mem/Cargo.toml | 6 +++--- primitive-types/CHANGELOG.md | 6 +++++- primitive-types/Cargo.toml | 9 +++------ primitive-types/tests/scale_info.rs | 4 ++-- rlp/Cargo.toml | 2 +- transaction-pool/Cargo.toml | 2 +- triehash/Cargo.toml | 2 +- 26 files changed, 79 insertions(+), 41 deletions(-) diff --git a/contract-address/CHANGELOG.md b/contract-address/CHANGELOG.md index 583ee8569..673568b20 100644 --- a/contract-address/CHANGELOG.md +++ b/contract-address/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +### Breaking +- Updated `ethereum-types` to 0.12. [#556](https://github.com/paritytech/parity-common/pull/556) +- Updated `keccak-hash` to 0.8. [#556](https://github.com/paritytech/parity-common/pull/556) + ## [0.6.0] - 2021-01-27 ### Breaking - Updated `ethereum-types` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/contract-address/Cargo.toml b/contract-address/Cargo.toml index 993ffe26c..b534f058c 100644 --- a/contract-address/Cargo.toml +++ b/contract-address/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "contract-address" -version = "0.6.0" +version = "0.7.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -11,9 +11,9 @@ edition = "2018" readme = "README.md" [dependencies] -ethereum-types = { version = "0.11.0", path = "../ethereum-types" } +ethereum-types = { version = "0.12.0", path = "../ethereum-types" } rlp = { version = "0.5", path = "../rlp" } -keccak-hash = { version = "0.7", path = "../keccak-hash", default-features = false } +keccak-hash = { version = "0.8", path = "../keccak-hash", default-features = false } [features] default = [] diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index cfc8d223b..15d97c7bf 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +### Breaking +- Updated `primitive-types` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + ## [0.11.0] - 2021-01-27 ### Breaking - Updated `ethbloom` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index dfe86dc75..b0c124958 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.11.0" +version = "0.12.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -11,7 +11,7 @@ edition = "2018" ethbloom = { path = "../ethbloom", version = "0.11", default-features = false } fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.9", default-features = false } -primitive-types = { path = "../primitive-types", version = "0.9", features = ["byteorder", "rustc-hex"], default-features = false } +primitive-types = { path = "../primitive-types", version = "0.10", features = ["byteorder", "rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.0", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.5.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index ce980bbdf..455ca717b 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +### Breaking +- Updated `primitive-types` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + ## [0.7.0] - 2021-01-27 ### Breaking - Updated `primitive-types` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 6f1f29ee1..d48ef763e 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "keccak-hash" -version = "0.7.0" +version = "0.8.0" description = "`keccak-hash` is a set of utility functions to facilitate working with Keccak hashes (256/512 bits long)." authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" @@ -10,7 +10,7 @@ edition = "2018" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } -primitive-types = { path = "../primitive-types", version = "0.9", default-features = false } +primitive-types = { path = "../primitive-types", version = "0.10", default-features = false } [dev-dependencies] tempfile = "3.1.0" diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index a5d9fa532..e9f497217 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +### Breaking +- Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) +- Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + ## [0.9.0] - 2021-01-27 ### Breaking - Updated `parity-util-mem` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 9599421f5..c60c48432 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.9.0" +version = "0.10.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -8,12 +8,12 @@ license = "MIT OR Apache-2.0" edition = "2018" [dependencies] -parity-util-mem = { path = "../parity-util-mem", version = "0.9", default-features = false, features = ["std"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.10", default-features = false, features = ["std"] } parking_lot = "0.11.1" -kvdb = { version = "0.9", path = "../kvdb" } +kvdb = { version = "0.10", path = "../kvdb" } [dev-dependencies] -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.7" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.8" } [features] default = [] diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 72ca223dd..495612f0b 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +### Breaking +- Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) +- Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + ## [0.11.1] - 2021-05-03 - Updated `rocksdb` to 0.16. [#537](https://github.com/paritytech/parity-common/pull/537) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 3b6ff8d9a..e14608731 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.11.1" +version = "0.12.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -14,20 +14,20 @@ harness = false [dependencies] smallvec = "1.0.0" fs-swap = "0.2.6" -kvdb = { path = "../kvdb", version = "0.9" } +kvdb = { path = "../kvdb", version = "0.10" } log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.11.1" regex = "1.3.1" rocksdb = { version = "0.16", features = ["snappy"], default-features = false } owning_ref = "0.4.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.9", default-features = false, features = ["std", "smallvec"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.10", default-features = false, features = ["std", "smallvec"] } [dev-dependencies] alloc_counter = "0.0.4" criterion = "0.3" ethereum-types = { path = "../ethereum-types" } -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.7" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.8" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } diff --git a/kvdb-shared-tests/CHANGELOG.md b/kvdb-shared-tests/CHANGELOG.md index f307aa6c3..17f7b2501 100644 --- a/kvdb-shared-tests/CHANGELOG.md +++ b/kvdb-shared-tests/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +### Breaking +- Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + ## [0.7.0] - 2021-01-27 ### Breaking - Updated `kvdb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index 4ea00030e..356b44766 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "kvdb-shared-tests" -version = "0.7.0" +version = "0.8.0" authors = ["Parity Technologies "] edition = "2018" description = "Shared tests for kvdb functionality, to be executed against actual implementations" license = "MIT OR Apache-2.0" [dependencies] -kvdb = { path = "../kvdb", version = "0.9" } +kvdb = { path = "../kvdb", version = "0.10" } diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md index 323f42614..1dc4d7d70 100644 --- a/kvdb-web/CHANGELOG.md +++ b/kvdb-web/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +### Breaking +- Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) +- Updated `kvdb-memorydb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) +- Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + ## [0.9.0] - 2021-01-27 ### Breaking - Updated `kvdb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml index fe04d8c18..7942b6b15 100644 --- a/kvdb-web/Cargo.toml +++ b/kvdb-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-web" -version = "0.9.0" +version = "0.10.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value database for use in browsers" @@ -11,12 +11,12 @@ edition = "2018" [dependencies] wasm-bindgen = "0.2.69" js-sys = "0.3.46" -kvdb = { version = "0.9", path = "../kvdb" } -kvdb-memorydb = { version = "0.9", path = "../kvdb-memorydb" } +kvdb = { version = "0.10", path = "../kvdb" } +kvdb-memorydb = { version = "0.10", path = "../kvdb-memorydb" } futures = "0.3.8" log = "0.4.11" send_wrapper = "0.5.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.9", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.10", default-features = false } # TODO: https://github.com/paritytech/parity-common/issues/479 # This is hack to enable `wasm-bindgen` feature of `parking_lot` in other dependencies. # Thus, it's not direct dependency and do not remove until a proper fix exists. @@ -44,6 +44,6 @@ features = [ [dev-dependencies] console_log = "0.2.0" -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.7" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.8" } wasm-bindgen-test = "0.3.19" wasm-bindgen-futures = "0.4.19" diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 9c2c444d0..15b66dee7 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +### Breaking +- Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) + ## [0.9.0] - 2021-01-27 ### Breaking - Updated `parity-util-mem` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 8f22d0780..66d8a18b6 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.9.0" +version = "0.10.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" @@ -9,4 +9,4 @@ edition = "2018" [dependencies] smallvec = "1.0.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.9", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.10", default-features = false } diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md index dfa71058d..af033f2a0 100644 --- a/parity-crypto/CHANGELOG.md +++ b/parity-crypto/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +### Breaking +- Updated `ethereum-types` to 0.12. [#556](https://github.com/paritytech/parity-common/pull/556) + ## [0.8.0] - 2021-01-27 ### Breaking - Updated `ethereum-types` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml index 6de817cb1..851ad0248 100644 --- a/parity-crypto/Cargo.toml +++ b/parity-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-crypto" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Crypto utils used by ethstore and network." @@ -18,7 +18,7 @@ aes = "0.6.0" aes-ctr = "0.6.0" block-modes = "0.7.0" digest = "0.9.0" -ethereum-types = { version = "0.11.0", optional = true, path = "../ethereum-types" } +ethereum-types = { version = "0.12.0", optional = true, path = "../ethereum-types" } hmac = "0.10.1" lazy_static = { version = "1.4.0", optional = true } pbkdf2 = "0.7.3" diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index e4c532662..c5e13a77c 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -7,6 +7,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - Fixed `malloc_usable_size` for FreeBSD. [#553](https://github.com/paritytech/parity-common/pull/553) +### Breaking +- Updated `ethereum-types` to 0.12. [#556](https://github.com/paritytech/parity-common/pull/556) +- Updated `primitive-types` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) +- Updated `hashbrown` to 0.11. [#533](https://github.com/paritytech/parity-common/pull/533) + ## [0.9.0] - 2021-01-27 ### Breaking - Updated `ethereum-types` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index d07bff98f..ec90c9565 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.9.0" +version = "0.10.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" @@ -26,9 +26,9 @@ parity-util-mem-derive = { path = "derive", version = "0.1" } impl-trait-for-tuples = "0.2.0" smallvec = { version = "1.0.0", optional = true } -ethereum-types = { version = "0.11.0", optional = true, path = "../ethereum-types" } +ethereum-types = { version = "0.12.0", optional = true, path = "../ethereum-types" } parking_lot = { version = "0.11.1", optional = true } -primitive-types = { version = "0.9", path = "../primitive-types", default-features = false, optional = true } +primitive-types = { version = "0.10", path = "../primitive-types", default-features = false, optional = true } [target.'cfg(target_os = "windows")'.dependencies] winapi = { version = "0.3.8", features = ["heapapi"] } diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 3e3357d78..520cc9d43 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,9 +5,13 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +### Added - Added `U128::full_mul` method. [#546](https://github.com/paritytech/parity-common/pull/546) ### Breaking -- Updated `scale-info` to 0.6. [#519](https://github.com/paritytech/parity-common/pull/519) +- Updated `scale-info` to 0.9. [#556](https://github.com/paritytech/parity-common/pull/556) +### Removed +- Removed `parity-scale-codec` direct dependency. [#556](https://github.com/paritytech/parity-common/pull/556) ## [0.9.0] - 2021-01-27 ### Breaking diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index a4d01151d..030db9470 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.9.0" +version = "0.10.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -14,10 +14,7 @@ impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false impl-codec = { version = "0.5.0", path = "impls/codec", default-features = false, optional = true } impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } -scale-info-crate = { package = "scale-info", version = "0.6", features = ["derive"], default-features = false, optional = true } -# we add parity-scale-codec here directly only because scale-info requires us to do so -# see https://github.com/paritytech/scale-info/pull/61#discussion_r568760753 -parity-scale-codec = { version = "2.0", default-features = false, optional = true } +scale-info-crate = { package = "scale-info", version = ">=0.9, <2", features = ["derive"], default-features = false, optional = true } [features] default = ["std"] @@ -27,7 +24,7 @@ rustc-hex = ["fixed-hash/rustc-hex"] serde = ["std", "impl-serde", "impl-serde/std"] serde_no_std = ["impl-serde"] codec = ["impl-codec"] -scale-info = ["codec", "scale-info-crate", "parity-scale-codec"] +scale-info = ["codec", "scale-info-crate"] rlp = ["impl-rlp"] arbitrary = ["fixed-hash/arbitrary", "uint/arbitrary"] fp-conversion = ["std"] diff --git a/primitive-types/tests/scale_info.rs b/primitive-types/tests/scale_info.rs index 3774c8448..a6abd7548 100644 --- a/primitive-types/tests/scale_info.rs +++ b/primitive-types/tests/scale_info.rs @@ -15,7 +15,7 @@ use scale_info_crate::{build::Fields, Path, Type, TypeInfo}; fn u256_scale_info() { let r#type = Type::builder() .path(Path::new("U256", "primitive_types")) - .composite(Fields::unnamed().field_of::<[u64; 4]>("[u64; 4]")); + .composite(Fields::unnamed().field(|f| f.ty::<[u64; 4]>().type_name("[u64; 4]"))); assert_eq!(U256::type_info(), r#type.into()); } @@ -24,7 +24,7 @@ fn u256_scale_info() { fn h256_scale_info() { let r#type = Type::builder() .path(Path::new("H256", "primitive_types")) - .composite(Fields::unnamed().field_of::<[u8; 32]>("[u8; 32]")); + .composite(Fields::unnamed().field(|f| f.ty::<[u8; 32]>().type_name("[u8; 32]"))); assert_eq!(H256::type_info(), r#type.into()); } diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index fa1070ec1..3898e2b3c 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -14,7 +14,7 @@ rustc-hex = { version = "2.0.1", default-features = false } [dev-dependencies] criterion = "0.3.0" hex-literal = "0.3.1" -primitive-types = { path = "../primitive-types", version = "0.9", features = ["impl-rlp"] } +primitive-types = { path = "../primitive-types", version = "0.10", features = ["impl-rlp"] } [features] default = ["std"] diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index bb3cb6ad3..cdf002afc 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -12,4 +12,4 @@ log = "0.4.8" smallvec = "1.6.0" [dev-dependencies] -ethereum-types = { version = "0.11.0", path = "../ethereum-types" } +ethereum-types = { version = "0.12.0", path = "../ethereum-types" } diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml index 0825f7b7e..440c07ab8 100644 --- a/triehash/Cargo.toml +++ b/triehash/Cargo.toml @@ -14,7 +14,7 @@ rlp = { version = "0.5", path = "../rlp", default-features = false } [dev-dependencies] criterion = "0.3.0" keccak-hasher = "0.15.2" -ethereum-types = { version = "0.11.0", path = "../ethereum-types" } +ethereum-types = { version = "0.12.0", path = "../ethereum-types" } tiny-keccak = { version = "2.0", features = ["keccak"] } trie-standardmap = "0.15.2" hex-literal = "0.3.1" From 075a34ee77b02f917ef350d3044acd95102ecc61 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Thu, 1 Jul 2021 16:24:59 +0200 Subject: [PATCH 223/359] update the changelogs (#557) --- primitive-types/impls/num-traits/CHANGELOG.md | 2 ++ primitive-types/impls/num-traits/Cargo.toml | 2 +- uint/CHANGELOG.md | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/primitive-types/impls/num-traits/CHANGELOG.md b/primitive-types/impls/num-traits/CHANGELOG.md index b7385ae89..e0b657b7d 100644 --- a/primitive-types/impls/num-traits/CHANGELOG.md +++ b/primitive-types/impls/num-traits/CHANGELOG.md @@ -5,4 +5,6 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.1.1] - 2021-06-30 - Added `integer-sqrt` trait support. [#554](https://github.com/paritytech/parity-common/pull/554) diff --git a/primitive-types/impls/num-traits/Cargo.toml b/primitive-types/impls/num-traits/Cargo.toml index 006d131df..27a8fc82a 100644 --- a/primitive-types/impls/num-traits/Cargo.toml +++ b/primitive-types/impls/num-traits/Cargo.toml @@ -10,4 +10,4 @@ edition = "2018" [dependencies] num-traits = { version = "0.2", default-features = false } integer-sqrt = "0.1" -uint = { version = "0.9.0", path = "../../../uint", default-features = false } +uint = { version = "0.9.1", path = "../../../uint", default-features = false } diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index aa83d5e37..73456b577 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.9.1] - 2021-06-30 - Added `integer_sqrt` method. [#554](https://github.com/paritytech/parity-common/pull/554) ## [0.9.0] - 2021-01-05 From 9e0e53a473aba2226a5c115f802762378fdf9fe7 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Thu, 1 Jul 2021 17:24:21 +0200 Subject: [PATCH 224/359] github actions ci (#559) * fix no-default-features tests * github actions * remove appveyor and travis configs * fmt * try to fix windows * try single quotes? * try resolver=2 ??? * update the badge * hmmmm * use use-cross * Revert "hmmmm" This reverts commit e909926cd50f846725231bdb162bc3cef5baa047. * final boss * Revert "final boss" This reverts commit c1ac1d3131266e62c4512d41e2eab17018725f3c. * listen * if you had * one shot * one opportunity * 5G * run CI on push to master as well * review feedback * cache cargo check Co-authored-by: Denis Pisarev Co-authored-by: Denis Pisarev --- .github/workflows/ci.yml | 160 ++++++++++++++++++++++++++++++++ .travis.yml | 69 -------------- Cargo.toml | 1 + README.md | 8 +- appveyor.yml | 32 ------- parity-util-mem/tests/derive.rs | 1 + triehash/src/lib.rs | 4 +- 7 files changed, 166 insertions(+), 109 deletions(-) create mode 100644 .github/workflows/ci.yml delete mode 100644 .travis.yml delete mode 100644 appveyor.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..2bebe3d07 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,160 @@ +on: + pull_request: + push: + branches: + - master + +name: Continuous integration + +jobs: + check: + name: Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@v1.3.0 + + - uses: actions-rs/cargo@v1 + with: + command: check + args: --workspace --all-targets + + test: + name: Test + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - ubuntu-latest + - windows-latest + - macOS-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@v1.3.0 + + - name: Remove msys64 # Workaround to resolve link error with C:\msys64\mingw64\bin\libclang.dll + if: runner.os == 'Windows' + run: Remove-Item -LiteralPath "C:\msys64\" -Force -Recurse + + - name: Install dependencies + if: runner.os == 'Windows' + run: choco install llvm -y + + - run: rustup target add wasm32-unknown-unknown + + - name: Test no-default-features + uses: actions-rs/cargo@v1 + with: + command: test + args: --workspace --no-default-features + + - name: Test default features + uses: actions-rs/cargo@v1 + with: + command: test + args: --workspace + + - name: Test uint + uses: actions-rs/cargo@v1 + with: + command: test + args: -p uint --all-features + + - name: Test fixed-hash no_std + run: cargo test -p fixed-hash --no-default-features --features='byteorder,rustc-hex' + + - name: Test fixed-hash all-features + uses: actions-rs/cargo@v1 + with: + command: test + args: -p fixed-hash --all-features + + - name: Test primitive-types no_std + run: cargo test -p primitive-types --no-default-features --features='scale-info,num-traits,serde_no_std' + + - name: Test primitive-types all-features + uses: actions-rs/cargo@v1 + with: + command: test + args: -p primitive-types --all-features + + - name: Build ethereum-types no_std + run: cargo build -p ethereum-types --no-default-features --features='serialize,rlp' --target=wasm32-unknown-unknown + + - name: Test ethereum-types all-features + uses: actions-rs/cargo@v1 + with: + command: test + args: -p ethereum-types --all-features + + - name: Test ethbloom all-features + uses: actions-rs/cargo@v1 + with: + command: test + args: -p ethbloom --all-features + + - name: Test parity-crypto all-features + uses: actions-rs/cargo@v1 + with: + command: test + args: -p parity-crypto --all-features + + - name: Test uint on bigendian + if: runner.os == 'Linux' + uses: actions-rs/cargo@v1 + with: + use-cross: true + command: test + args: -p uint --target=mips64-unknown-linux-gnuabi64 + + - name: Test parity-util-mem on Android + if: runner.os == 'Linux' + uses: actions-rs/cargo@v1 + with: + use-cross: true + command: test + args: -p parity-util-mem --target=aarch64-linux-android + + - name: Test parity-util-mem estimate-heapsize + run: cargo test -p parity-util-mem --features='estimate-heapsize' + + - name: Test parity-util-mem jemalloc-global + run: cargo test -p parity-util-mem --features='jemalloc-global' + + - name: Test parity-util-mem mimalloc-global + if: runner.os != 'Windows' + run: cargo test -p parity-util-mem --features='mimalloc-global' + + - name: Test parity-util-mem dlmalloc-global + if: runner.os != 'Windows' + run: cargo test -p parity-util-mem --no-default-features --features='dlmalloc-global' + + fmt: + name: Rustfmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + - run: rustup component add rustfmt + - uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 13e3a218e..000000000 --- a/.travis.yml +++ /dev/null @@ -1,69 +0,0 @@ -language: rust -branches: - only: - - master -matrix: - include: - - os: linux - rust: stable - before_script: - - rustup component add rustfmt - - os: linux - rust: nightly - script: - - cargo generate-lockfile --verbose && cargo update -p funty --precise "1.1.0" --verbose - - cargo check --workspace --benches - - os: linux - rust: stable - install: - - cargo install cross - script: - - cargo generate-lockfile --verbose && cargo update -p funty --precise "1.1.0" --verbose - - cross test --target=aarch64-linux-android -p parity-util-mem - - cross test --target=mips64-unknown-linux-gnuabi64 -p uint - - os: osx - osx_image: xcode11.3 - addons: - firefox: latest - rust: stable - allow_failures: - - rust: nightly -install: - - rustup target add wasm32-unknown-unknown - - curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | sh - - source ~/.nvm/nvm.sh - - nvm install --lts - - npm install -g geckodriver@1.19.1 # https://github.com/rustwasm/wasm-bindgen/issues/2261 - - curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh - - which geckodriver -script: - - cargo generate-lockfile --verbose && cargo update -p funty --precise "1.1.0" --verbose - - if [ "$TRAVIS_OS_NAME" == "linux" ]; then - cargo fmt -- --check; - fi - - cargo check --workspace --tests --benches - - cargo test --workspace --exclude uint --exclude fixed-hash --exclude parity-crypto - - if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then - cd contract-address/ && cargo test --features=external_doc && cd ..; - fi - - cd ethbloom/ && cargo test --no-default-features --features="rustc-hex" && cargo check --benches && cd .. - - cd ethereum-types/ && cargo build --no-default-features --features="serialize,rlp" --target=wasm32-unknown-unknown && cd .. - - cd fixed-hash/ && cargo test --all-features && cargo test --no-default-features --features="byteorder,rustc-hex" && cd .. - - cd uint/ && cargo test --all-features && cargo test --no-default-features && cd .. - - cd keccak-hash/ && cargo test --no-default-features && cd .. - - cd plain_hasher/ && cargo test --no-default-features && cargo check --benches && cd .. - - cd parity-bytes/ && cargo test --no-default-features && cd .. - - cd parity-crypto/ && cargo test --all-features && cd .. - - cd parity-util-mem/ && cargo test --features=estimate-heapsize && cd .. - - cd parity-util-mem/ && cargo test --features=jemalloc-global && cd .. - - cd parity-util-mem/ && cargo test --features=mimalloc-global && cd .. - - cd parity-util-mem/ && cargo test --no-default-features --features=dlmalloc-global && cd .. - - cd primitive-types/ && cargo test --all-features && cd .. - - cd primitive-types/ && cargo test --no-default-features --features=serde_no_std && cd .. - - cd primitive-types/ && cargo test --no-default-features --features=scale-info && cd .. - - cd primitive-types/ && cargo test --no-default-features --features=num-traits && cd .. - - cd rlp/ && cargo test --no-default-features && cargo check --benches && cd .. - - cd triehash/ && cargo check --benches && cd .. - - cd kvdb-web/ && wasm-pack test --headless --firefox && cd .. - - cd ethbloom/ && cargo test --all-features && cd .. - - cd ethereum-types/ && cargo test --all-features && cd .. diff --git a/Cargo.toml b/Cargo.toml index c3380e0da..3092027a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ [workspace] +resolver = "2" members = [ "contract-address", "fixed-hash", diff --git a/README.md b/README.md index b4647e7e9..2a97d14e0 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,4 @@ -[![Build Status travis][travis-image]][travis-url] -[![Build Status appveyor][appveyor-image]][appveyor-url] - -[travis-image]: https://travis-ci.org/paritytech/parity-common.svg?branch=master -[travis-url]: https://travis-ci.org/paritytech/parity-common -[appveyor-image]: https://ci.appveyor.com/api/projects/status/github/paritytech/parity-common?branch=master&svg=true -[appveyor-url]: https://ci.appveyor.com/project/paritytech/parity-common/branch/master +[![Continuous integration](https://github.com/paritytech/parity-common/actions/workflows/ci.yml/badge.svg)](https://github.com/paritytech/parity-common/actions/workflows/ci.yml) # parity-common Collection of crates used in [Parity Technologies](https://www.paritytech.io/) projects diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 6a0725a92..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -environment: - matrix: - - FEATURES: "" - -platform: - - x86_64-pc-windows-msvc - -# avoid running tests twice -branches: - only: - - master - -install: - - curl -sSf -o rustup-init.exe https://win.rustup.rs/ - - rustup-init.exe -y --default-host %PLATFORM% - - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin - - rustc -vV - - cargo -vV - -build_script: - - cargo generate-lockfile --verbose && cargo update -p funty --precise "1.1.0" --verbose - - cargo check --tests - - cargo build --all - -test_script: - - cargo test --all --exclude uint --exclude fixed-hash - - cd fixed-hash/ && cargo test --all-features && cd .. - - cd uint/ && cargo test --features=std,quickcheck --release && cd .. - - cd plain_hasher/ && cargo test --no-default-features && cd .. - - cd parity-util-mem/ && cargo test --no-default-features && cd .. - - cd parity-util-mem/ && cargo test --features=estimate-heapsize && cd .. - - cd parity-util-mem/ && cargo test && cd .. diff --git a/parity-util-mem/tests/derive.rs b/parity-util-mem/tests/derive.rs index 4fb5f7328..63825ba61 100644 --- a/parity-util-mem/tests/derive.rs +++ b/parity-util-mem/tests/derive.rs @@ -53,6 +53,7 @@ fn derive_ignore() { } #[test] +#[cfg(all(feature = "lru", feature = "hashbrown"))] fn derive_morecomplex() { #[derive(MallocSizeOf)] struct Trivia { diff --git a/triehash/src/lib.rs b/triehash/src/lib.rs index a60a24998..63f93a5f6 100644 --- a/triehash/src/lib.rs +++ b/triehash/src/lib.rs @@ -260,7 +260,9 @@ where #[cfg(test)] mod tests { - use super::{hex_prefix_encode, shared_prefix_len, trie_root}; + use super::*; + #[cfg(not(feature = "std"))] + use alloc::vec; use ethereum_types::H256; use hex_literal::hex; use keccak_hasher::KeccakHasher; From 84a7d26c1be94748202a7536c8426ac1c9e23523 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 2 Jul 2021 14:20:21 +0200 Subject: [PATCH 225/359] start removing stuff (#561) * remove deprecated crates * improve windows CI * ok, not all features --- .github/workflows/ci.yml | 31 +- Cargo.toml | 7 - contract-address/CHANGELOG.md | 24 - contract-address/Cargo.toml | 26 - contract-address/README.md | 17 - contract-address/src/lib.rs | 106 ---- kvdb-web/CHANGELOG.md | 52 -- kvdb-web/Cargo.toml | 49 -- kvdb-web/src/error.rs | 46 -- kvdb-web/src/indexed_db.rs | 238 -------- kvdb-web/src/lib.rs | 128 ----- kvdb-web/tests/indexed_db.rs | 94 ---- parity-path/CHANGELOG.md | 10 - parity-path/Cargo.toml | 11 - parity-path/src/lib.rs | 97 ---- plain_hasher/CHANGELOG.md | 19 - plain_hasher/Cargo.toml | 22 - plain_hasher/README.md | 5 - plain_hasher/benches/bench.rs | 37 -- plain_hasher/src/lib.rs | 55 -- trace-time/CHANGELOG.md | 15 - trace-time/Cargo.toml | 11 - trace-time/src/lib.rs | 41 -- transaction-pool/CHANGELOG.md | 15 - transaction-pool/Cargo.toml | 15 - transaction-pool/src/error.rs | 56 -- transaction-pool/src/lib.rs | 107 ---- transaction-pool/src/listener.rs | 81 --- transaction-pool/src/options.rs | 24 - transaction-pool/src/pool.rs | 624 --------------------- transaction-pool/src/ready.rs | 50 -- transaction-pool/src/replace.rs | 41 -- transaction-pool/src/scoring.rs | 163 ------ transaction-pool/src/status.rs | 32 -- transaction-pool/src/tests/helpers.rs | 111 ---- transaction-pool/src/tests/mod.rs | 670 ----------------------- transaction-pool/src/tests/tx_builder.rs | 59 -- transaction-pool/src/transactions.rs | 204 ------- transaction-pool/src/verifier.rs | 23 - triehash/CHANGELOG.md | 18 - triehash/Cargo.toml | 32 -- triehash/README.md | 2 - triehash/benches/triehash.rs | 124 ----- triehash/src/lib.rs | 350 ------------ 44 files changed, 20 insertions(+), 3922 deletions(-) delete mode 100644 contract-address/CHANGELOG.md delete mode 100644 contract-address/Cargo.toml delete mode 100644 contract-address/README.md delete mode 100644 contract-address/src/lib.rs delete mode 100644 kvdb-web/CHANGELOG.md delete mode 100644 kvdb-web/Cargo.toml delete mode 100644 kvdb-web/src/error.rs delete mode 100644 kvdb-web/src/indexed_db.rs delete mode 100644 kvdb-web/src/lib.rs delete mode 100644 kvdb-web/tests/indexed_db.rs delete mode 100644 parity-path/CHANGELOG.md delete mode 100644 parity-path/Cargo.toml delete mode 100644 parity-path/src/lib.rs delete mode 100644 plain_hasher/CHANGELOG.md delete mode 100644 plain_hasher/Cargo.toml delete mode 100644 plain_hasher/README.md delete mode 100644 plain_hasher/benches/bench.rs delete mode 100644 plain_hasher/src/lib.rs delete mode 100644 trace-time/CHANGELOG.md delete mode 100644 trace-time/Cargo.toml delete mode 100644 trace-time/src/lib.rs delete mode 100644 transaction-pool/CHANGELOG.md delete mode 100644 transaction-pool/Cargo.toml delete mode 100644 transaction-pool/src/error.rs delete mode 100644 transaction-pool/src/lib.rs delete mode 100644 transaction-pool/src/listener.rs delete mode 100644 transaction-pool/src/options.rs delete mode 100644 transaction-pool/src/pool.rs delete mode 100644 transaction-pool/src/ready.rs delete mode 100644 transaction-pool/src/replace.rs delete mode 100644 transaction-pool/src/scoring.rs delete mode 100644 transaction-pool/src/status.rs delete mode 100644 transaction-pool/src/tests/helpers.rs delete mode 100644 transaction-pool/src/tests/mod.rs delete mode 100644 transaction-pool/src/tests/tx_builder.rs delete mode 100644 transaction-pool/src/transactions.rs delete mode 100644 transaction-pool/src/verifier.rs delete mode 100644 triehash/CHANGELOG.md delete mode 100644 triehash/Cargo.toml delete mode 100644 triehash/README.md delete mode 100644 triehash/benches/triehash.rs delete mode 100644 triehash/src/lib.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2bebe3d07..115bec02f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -33,7 +33,6 @@ jobs: matrix: os: - ubuntu-latest - - windows-latest - macOS-latest steps: - uses: actions/checkout@v2 @@ -46,14 +45,6 @@ jobs: - name: Rust Cache uses: Swatinem/rust-cache@v1.3.0 - - name: Remove msys64 # Workaround to resolve link error with C:\msys64\mingw64\bin\libclang.dll - if: runner.os == 'Windows' - run: Remove-Item -LiteralPath "C:\msys64\" -Force -Recurse - - - name: Install dependencies - if: runner.os == 'Windows' - run: choco install llvm -y - - run: rustup target add wasm32-unknown-unknown - name: Test no-default-features @@ -136,13 +127,31 @@ jobs: run: cargo test -p parity-util-mem --features='jemalloc-global' - name: Test parity-util-mem mimalloc-global - if: runner.os != 'Windows' run: cargo test -p parity-util-mem --features='mimalloc-global' - name: Test parity-util-mem dlmalloc-global - if: runner.os != 'Windows' run: cargo test -p parity-util-mem --no-default-features --features='dlmalloc-global' + test_windows: + name: Test Windows + runs-on: windows-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Rust Cache + uses: Swatinem/rust-cache@v1.3.0 + + - uses: actions-rs/cargo@v1 + with: + command: test + args: --workspace --exclude kvdb-rocksdb + + fmt: name: Rustfmt runs-on: ubuntu-latest diff --git a/Cargo.toml b/Cargo.toml index 3092027a5..91bb0fee1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,23 +1,16 @@ [workspace] resolver = "2" members = [ - "contract-address", "fixed-hash", "keccak-hash", "kvdb", "kvdb-memorydb", "kvdb-rocksdb", "kvdb-shared-tests", - "kvdb-web", "parity-bytes", "parity-crypto", - "parity-path", - "plain_hasher", "rlp", "rlp-derive", - "transaction-pool", - "trace-time", - "triehash", "uint", "parity-util-mem", "primitive-types", diff --git a/contract-address/CHANGELOG.md b/contract-address/CHANGELOG.md deleted file mode 100644 index 673568b20..000000000 --- a/contract-address/CHANGELOG.md +++ /dev/null @@ -1,24 +0,0 @@ -# Changelog - -The format is based on [Keep a Changelog]. - -[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ - -## [Unreleased] - -### Breaking -- Updated `ethereum-types` to 0.12. [#556](https://github.com/paritytech/parity-common/pull/556) -- Updated `keccak-hash` to 0.8. [#556](https://github.com/paritytech/parity-common/pull/556) - -## [0.6.0] - 2021-01-27 -### Breaking -- Updated `ethereum-types` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) -- Updated `keccak-hash` to 0.7. [#510](https://github.com/paritytech/parity-common/pull/510) - -## [0.5.0] - 2021-01-05 -### Breaking -- Updated `ethereum-types` to 0.10. [#463](https://github.com/paritytech/parity-common/pull/463) - -## [0.4.0] - 2020-03-16 -- License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) -- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) diff --git a/contract-address/Cargo.toml b/contract-address/Cargo.toml deleted file mode 100644 index b534f058c..000000000 --- a/contract-address/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "contract-address" -version = "0.7.0" -authors = ["Parity Technologies "] -license = "MIT OR Apache-2.0" -homepage = "https://github.com/paritytech/parity-common" -repository = "https://github.com/paritytech/parity-common" -description = "A utility crate to create an ethereum contract address" -documentation = "https://docs.rs/contract-address/" -edition = "2018" -readme = "README.md" - -[dependencies] -ethereum-types = { version = "0.12.0", path = "../ethereum-types" } -rlp = { version = "0.5", path = "../rlp" } -keccak-hash = { version = "0.8", path = "../keccak-hash", default-features = false } - -[features] -default = [] -# this uses a nightly-only feature -# to embed REAMDE.md into lib.rs module docs -external_doc = [] - -[package.metadata.docs.rs] -# docs.rs builds the docs with nightly rust -features = ["external_doc"] diff --git a/contract-address/README.md b/contract-address/README.md deleted file mode 100644 index 8bf029d0b..000000000 --- a/contract-address/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Contract address - -Provides a function to create an ethereum contract address. - -## Examples - -Create an ethereum address from sender and nonce. - -```rust -use contract_address::{ - Address, U256, ContractAddress -}; -use std::str::FromStr; - -let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); -let contract_address = ContractAddress::from_sender_and_nonce(&sender, &U256::zero()); -``` diff --git a/contract-address/src/lib.rs b/contract-address/src/lib.rs deleted file mode 100644 index 787f8b06f..000000000 --- a/contract-address/src/lib.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![cfg_attr(feature = "external_doc", feature(external_doc))] -#![cfg_attr(feature = "external_doc", doc(include = "../README.md"))] - -pub use ethereum_types::{Address, H256, U256}; -use keccak_hash::keccak; -use rlp::RlpStream; -use std::ops::Deref; - -/// Represents an ethereum contract address -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -pub struct ContractAddress(Address); - -impl ContractAddress { - /// Computes the address of a contract from the sender's address and the transaction nonce - pub fn from_sender_and_nonce(sender: &Address, nonce: &U256) -> Self { - let mut stream = RlpStream::new_list(2); - stream.append(sender); - stream.append(nonce); - - ContractAddress(Address::from(keccak(stream.as_raw()))) - } - - /// Computes the address of a contract from the sender's address, the salt and code hash - /// - /// pWASM `create2` scheme and EIP-1014 CREATE2 scheme - pub fn from_sender_salt_and_code(sender: &Address, salt: H256, code_hash: H256) -> Self { - let mut buffer = [0u8; 1 + 20 + 32 + 32]; - buffer[0] = 0xff; - &mut buffer[1..(1 + 20)].copy_from_slice(&sender[..]); - &mut buffer[(1 + 20)..(1 + 20 + 32)].copy_from_slice(&salt[..]); - &mut buffer[(1 + 20 + 32)..].copy_from_slice(&code_hash[..]); - - ContractAddress(Address::from(keccak(&buffer[..]))) - } - - /// Computes the address of a contract from the sender's address and the code hash - /// - /// Used by pwasm create ext. - pub fn from_sender_and_code(sender: &Address, code_hash: H256) -> Self { - let mut buffer = [0u8; 20 + 32]; - &mut buffer[..20].copy_from_slice(&sender[..]); - &mut buffer[20..].copy_from_slice(&code_hash[..]); - - ContractAddress(Address::from(keccak(&buffer[..]))) - } -} - -impl Deref for ContractAddress { - type Target = Address; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl From for Address { - fn from(contract_address: ContractAddress) -> Self { - contract_address.0 - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::str::FromStr; - - #[test] - fn test_from_sender_and_nonce() { - let sender = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); - let expected = Address::from_str("3f09c73a5ed19289fb9bdc72f1742566df146f56").unwrap(); - - let actual = ContractAddress::from_sender_and_nonce(&sender, &U256::from(88)); - - assert_eq!(Address::from(actual), expected); - } - - #[test] - fn test_from_sender_salt_and_code_hash() { - let sender = Address::zero(); - let code_hash = H256::from_str("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").unwrap(); - let expected_address = Address::from_str("e33c0c7f7df4809055c3eba6c09cfe4baf1bd9e0").unwrap(); - - let contract_address = ContractAddress::from_sender_salt_and_code(&sender, H256::zero(), code_hash); - - assert_eq!(Address::from(contract_address), expected_address); - } - - #[test] - fn test_from_sender_and_code_hash() { - let sender = Address::from_str("0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d").unwrap(); - let code_hash = H256::from_str("d98f2e8134922f73748703c8e7084d42f13d2fa1439936ef5a3abcf5646fe83f").unwrap(); - let expected_address = Address::from_str("064417880f5680b141ed7fcac031aad40df080b0").unwrap(); - - let contract_address = ContractAddress::from_sender_and_code(&sender, code_hash); - - assert_eq!(Address::from(contract_address), expected_address); - } -} diff --git a/kvdb-web/CHANGELOG.md b/kvdb-web/CHANGELOG.md deleted file mode 100644 index 1dc4d7d70..000000000 --- a/kvdb-web/CHANGELOG.md +++ /dev/null @@ -1,52 +0,0 @@ -# Changelog - -The format is based on [Keep a Changelog]. - -[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ - -## [Unreleased] - -### Breaking -- Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) -- Updated `kvdb-memorydb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) -- Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) - -## [0.9.0] - 2021-01-27 -### Breaking -- Updated `kvdb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) -- Updated `kvdb-memorydb` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) -- Updated `parity-util-mem` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) - -## [0.8.0] - 2021-01-05 -### Breaking -- Updated dependencies. [#470](https://github.com/paritytech/parity-common/pull/470) - -## [0.7.0] - 2020-07-06 -- Updated `kvdb` to 0.7.0 [#404](https://github.com/paritytech/parity-common/pull/404) - -## [0.6.0] - 2020-05-05 -### Breaking -- Updated to the new `kvdb` interface. [#313](https://github.com/paritytech/parity-common/pull/313) - -## [0.5.0] - 2020-03-16 -- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) -- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) - -## [0.4.0] - 2019-02-05 -- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) - -## [0.3.1] - 2019-01-06 -- Updated features and feature dependencies. [#307](https://github.com/paritytech/parity-common/pull/307) - -## [0.3.0] - 2019-01-04 -- Updated to new `kvdb` and `parity-util-mem` versions. [#299](https://github.com/paritytech/parity-common/pull/299) - -## [0.2.0] - 2019-12-19 -### Changed -- Default column support removed from the API - - Column argument type changed from `Option` to `u32` - - Migration `None` -> unsupported, `Some(0)` -> `0`, `Some(1)` -> `1`, etc. - -## [0.1.1] - 2019-10-24 -### Dependencies -- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/kvdb-web/Cargo.toml b/kvdb-web/Cargo.toml deleted file mode 100644 index 7942b6b15..000000000 --- a/kvdb-web/Cargo.toml +++ /dev/null @@ -1,49 +0,0 @@ -[package] -name = "kvdb-web" -version = "0.10.0" -authors = ["Parity Technologies "] -repository = "https://github.com/paritytech/parity-common" -description = "A key-value database for use in browsers" -documentation = "https://docs.rs/kvdb-web/" -license = "MIT OR Apache-2.0" -edition = "2018" - -[dependencies] -wasm-bindgen = "0.2.69" -js-sys = "0.3.46" -kvdb = { version = "0.10", path = "../kvdb" } -kvdb-memorydb = { version = "0.10", path = "../kvdb-memorydb" } -futures = "0.3.8" -log = "0.4.11" -send_wrapper = "0.5.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.10", default-features = false } -# TODO: https://github.com/paritytech/parity-common/issues/479 -# This is hack to enable `wasm-bindgen` feature of `parking_lot` in other dependencies. -# Thus, it's not direct dependency and do not remove until a proper fix exists. -parking_lot = { version = "0.11.1", features = ["wasm-bindgen"] } - -[dependencies.web-sys] -version = "0.3.46" -features = [ - 'console', - 'Window', - 'IdbFactory', - 'IdbDatabase', - 'IdbTransaction', - 'IdbTransactionMode', - 'IdbOpenDbRequest', - 'IdbRequest', - 'IdbObjectStore', - 'Event', - 'EventTarget', - 'IdbCursor', - 'IdbCursorWithValue', - 'IdbKeyRange', - 'DomStringList', -] - -[dev-dependencies] -console_log = "0.2.0" -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.8" } -wasm-bindgen-test = "0.3.19" -wasm-bindgen-futures = "0.4.19" diff --git a/kvdb-web/src/error.rs b/kvdb-web/src/error.rs deleted file mode 100644 index e88d44636..000000000 --- a/kvdb-web/src/error.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Errors that can occur when working with IndexedDB. - -use std::fmt; - -/// An error that occurred when working with IndexedDB. -#[derive(Clone, PartialEq, Debug)] -pub enum Error { - /// Accessing a Window has failed. - /// Are we in a WebWorker? - WindowNotAvailable, - /// IndexedDB is not supported by your browser. - NotSupported(String), - /// This enum may grow additional variants, - /// so this makes sure clients don't count on exhaustive matching. - /// (Otherwise, adding a new variant could break existing code.) - #[doc(hidden)] - __Nonexhaustive, -} - -impl std::error::Error for Error { - fn description(&self) -> &str { - match *self { - Error::WindowNotAvailable => "Accessing a Window has failed", - Error::NotSupported(_) => "IndexedDB is not supported by your browser", - Error::__Nonexhaustive => unreachable!(), - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Error::WindowNotAvailable => write!(f, "Accessing a Window has failed"), - Error::NotSupported(ref err) => write!(f, "IndexedDB is not supported by your browser: {}", err,), - Error::__Nonexhaustive => unreachable!(), - } - } -} diff --git a/kvdb-web/src/indexed_db.rs b/kvdb-web/src/indexed_db.rs deleted file mode 100644 index 15a7713e6..000000000 --- a/kvdb-web/src/indexed_db.rs +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Utility functions to interact with IndexedDB browser API. - -use js_sys::{Array, ArrayBuffer, Uint8Array}; -use wasm_bindgen::{closure::Closure, JsCast, JsValue}; -use web_sys::{Event, IdbCursorWithValue, IdbDatabase, IdbKeyRange, IdbOpenDbRequest, IdbRequest, IdbTransactionMode}; - -use futures::channel; -use futures::prelude::*; - -use kvdb::{DBOp, DBTransaction}; - -use log::{debug, warn}; -use std::ops::Deref; - -use crate::error::Error; - -pub struct IndexedDB { - pub version: u32, - pub columns: u32, - pub inner: super::SendWrapper, -} - -/// Opens the IndexedDB with the given name, version and the specified number of columns -/// (including the default one). -pub fn open(name: &str, version: Option, columns: u32) -> impl Future> { - let (tx, rx) = channel::oneshot::channel::(); - - let window = match web_sys::window() { - Some(window) => window, - None => return future::Either::Right(future::err(Error::WindowNotAvailable)), - }; - let idb_factory = window.indexed_db(); - - let idb_factory = match idb_factory { - Ok(idb_factory) => idb_factory.expect("We can't get a null pointer back; qed"), - Err(err) => return future::Either::Right(future::err(Error::NotSupported(format!("{:?}", err)))), - }; - - let open_request = match version { - Some(version) => idb_factory.open_with_u32(name, version).expect("TypeError is not possible with Rust; qed"), - None => idb_factory.open(name).expect("TypeError is not possible with Rust; qed"), - }; - - try_create_missing_stores(&open_request, columns, version); - - let on_success = Closure::once(move |event: &Event| { - // Extract database handle from the event - let target = event.target().expect("Event should have a target; qed"); - let req = target.dyn_ref::().expect("Event target is IdbRequest; qed"); - - let result = req.result().expect("IndexedDB.onsuccess should have a valid result; qed"); - assert!(result.is_instance_of::()); - - let db = IdbDatabase::from(result); - // JS returns version as f64 - let version = db.version().round() as u32; - let columns = db.object_store_names().length(); - - // errors if the receiving end was dropped before this call - let _ = tx.send(IndexedDB { version, columns, inner: super::SendWrapper::new(db) }); - }); - open_request.set_onsuccess(Some(on_success.as_ref().unchecked_ref())); - on_success.forget(); - - future::Either::Left(rx.then(|r| future::ok(r.expect("Sender isn't dropped; qed")))) -} - -fn store_name(num: u32) -> String { - format!("col{}", num) -} - -// Returns js objects representing store names for each column -fn store_names_js(columns: u32) -> Array { - let column_names = (0..columns).map(store_name); - - let js_array = Array::new(); - for name in column_names { - js_array.push(&JsValue::from(name)); - } - - js_array -} - -fn try_create_missing_stores(req: &IdbOpenDbRequest, columns: u32, version: Option) { - let on_upgradeneeded = Closure::once(move |event: &Event| { - debug!("Upgrading or creating the database to version {:?}, columns {}", version, columns); - // Extract database handle from the event - let target = event.target().expect("Event should have a target; qed"); - let req = target.dyn_ref::().expect("Event target is IdbRequest; qed"); - let result = req.result().expect("IdbRequest should have a result; qed"); - let db: &IdbDatabase = result.unchecked_ref(); - - let previous_columns = db.object_store_names().length(); - debug!("Previous version: {}, columns {}", db.version(), previous_columns); - - for name in (previous_columns..=columns).map(store_name) { - let res = db.create_object_store(name.as_str()); - if let Err(err) = res { - debug!("error creating object store {}: {:?}", name, err); - } - } - }); - - req.set_onupgradeneeded(Some(on_upgradeneeded.as_ref().unchecked_ref())); - on_upgradeneeded.forget(); -} - -/// Commit a transaction to the IndexedDB. -pub fn idb_commit_transaction(idb: &IdbDatabase, txn: &DBTransaction, columns: u32) -> impl Future { - let store_names_js = store_names_js(columns); - - // Create a transaction - let mode = IdbTransactionMode::Readwrite; - let idb_txn = idb - .transaction_with_str_sequence_and_mode(&store_names_js, mode) - .expect("The provided mode and store names are valid; qed"); - - // Open object stores (columns) - let object_stores = (0..columns) - .map(|n| { - idb_txn - .object_store(store_name(n).as_str()) - .expect("Object stores were created in try_create_object_stores; qed") - }) - .collect::>(); - - for op in &txn.ops { - match op { - DBOp::Insert { col, key, value } => { - let column = *col as usize; - // Convert rust bytes to js arrays - let key_js = Uint8Array::from(key.as_ref()); - let val_js = Uint8Array::from(value.as_ref()); - - // Insert key/value pair into the object store - let res = object_stores[column].put_with_key(val_js.as_ref(), key_js.as_ref()); - if let Err(err) = res { - warn!("error inserting key/values into col_{}: {:?}", column, err); - } - } - DBOp::Delete { col, key } => { - let column = *col as usize; - // Convert rust bytes to js arrays - let key_js = Uint8Array::from(key.as_ref()); - - // Delete key/value pair from the object store - let res = object_stores[column].delete(key_js.as_ref()); - if let Err(err) = res { - warn!("error deleting key from col_{}: {:?}", column, err); - } - } - DBOp::DeletePrefix { col, prefix } => { - let column = *col as usize; - // Convert rust bytes to js arrays - let prefix_js_start = Uint8Array::from(prefix.as_ref()); - let prefix_js_end = Uint8Array::from(prefix.as_ref()); - - let range = IdbKeyRange::bound(prefix_js_start.as_ref(), prefix_js_end.as_ref()) - .expect("Starting and ending at same value is valid bound; qed"); - let res = object_stores[column].delete(range.as_ref()); - if let Err(err) = res { - warn!("error deleting prefix from col_{}: {:?}", column, err); - } - } - } - } - - let (tx, rx) = channel::oneshot::channel::<()>(); - - let on_complete = Closure::once(move || { - let _ = tx.send(()); - }); - idb_txn.set_oncomplete(Some(on_complete.as_ref().unchecked_ref())); - on_complete.forget(); - - let on_error = Closure::once(move || { - warn!("Failed to commit a transaction to IndexedDB"); - }); - idb_txn.set_onerror(Some(on_error.as_ref().unchecked_ref())); - on_error.forget(); - - rx.map(|_| ()) -} - -/// Returns a cursor to a database column with the given column number. -pub fn idb_cursor(idb: &IdbDatabase, col: u32) -> impl Stream, Vec)> { - // TODO: we could read all the columns in one db transaction - let store_name = store_name(col); - let store_name = store_name.as_str(); - let txn = idb.transaction_with_str(store_name).expect("The stores were created on open: {}; qed"); - - let store = txn.object_store(store_name).expect("Opening a store shouldn't fail; qed"); - let cursor = store.open_cursor().expect("Opening a cursor shouldn't fail; qed"); - - let (tx, rx) = channel::mpsc::unbounded(); - - let on_cursor = Closure::wrap(Box::new(move |event: &Event| { - // Extract the cursor from the event - let target = event.target().expect("on_cursor should have a target; qed"); - let req = target.dyn_ref::().expect("target should be IdbRequest; qed"); - let result = req.result().expect("IdbRequest should have a result; qed"); - let cursor: &IdbCursorWithValue = result.unchecked_ref(); - - if let (Ok(key), Ok(value)) = (cursor.deref().key(), cursor.value()) { - let k: &ArrayBuffer = key.unchecked_ref(); - let v: &Uint8Array = value.unchecked_ref(); - - // Copy js arrays into rust `Vec`s - let mut kv = vec![0u8; k.byte_length() as usize]; - let mut vv = vec![0u8; v.byte_length() as usize]; - Uint8Array::new(k).copy_to(&mut kv[..]); - v.copy_to(&mut vv[..]); - - if let Err(e) = tx.unbounded_send((kv, vv)) { - warn!("on_cursor: error sending to a channel {:?}", e); - } - if let Err(e) = cursor.deref().continue_() { - warn!("cursor advancement has failed {:?}", e); - } - } else { - // we're done - tx.close_channel(); - } - }) as Box); - - cursor.set_onsuccess(Some(on_cursor.as_ref().unchecked_ref())); - on_cursor.forget(); - - rx -} diff --git a/kvdb-web/src/lib.rs b/kvdb-web/src/lib.rs deleted file mode 100644 index 49540e35e..000000000 --- a/kvdb-web/src/lib.rs +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A key-value database for use in browsers -//! -//! Writes data both into memory and IndexedDB, reads the whole database in memory -//! from the IndexedDB on `open`. - -#![deny(missing_docs)] - -mod error; -mod indexed_db; - -use kvdb::{DBTransaction, DBValue}; -use kvdb_memorydb::{self as in_memory, InMemory}; -use send_wrapper::SendWrapper; -use std::io; - -pub use error::Error; -pub use kvdb::KeyValueDB; - -use futures::prelude::*; - -use web_sys::IdbDatabase; - -/// Database backed by both IndexedDB and in memory implementation. -pub struct Database { - name: String, - version: u32, - columns: u32, - in_memory: InMemory, - indexed_db: SendWrapper, -} - -// TODO: implement when web-based implementation need memory stats -parity_util_mem::malloc_size_of_is_0!(Database); - -impl Database { - /// Opens the database with the given name, - /// and the specified number of columns (not including the default one). - pub async fn open(name: String, columns: u32) -> Result { - let name_clone = name.clone(); - // let's try to open the latest version of the db first - let db = indexed_db::open(name.as_str(), None, columns).await?; - - // If we need more column than the latest version has, - // then bump the version (+ 1 for the default column). - // In order to bump the version, we close the database - // and reopen it with a higher version than it was opened with previously. - // cf. https://github.com/paritytech/parity-common/pull/202#discussion_r321221751 - let db = if columns + 1 > db.columns { - let next_version = db.version + 1; - drop(db); - indexed_db::open(name.as_str(), Some(next_version), columns).await? - } else { - db - }; - // populate the in_memory db from the IndexedDB - let indexed_db::IndexedDB { version, inner, .. } = db; - let in_memory = in_memory::create(columns); - // read the columns from the IndexedDB - for column in 0..columns { - let mut txn = DBTransaction::new(); - let mut stream = indexed_db::idb_cursor(&*inner, column); - while let Some((key, value)) = stream.next().await { - txn.put_vec(column, key.as_ref(), value); - } - // write each column into memory - in_memory.write(txn).expect("writing in memory always succeeds; qed"); - } - Ok(Database { name: name_clone, version, columns, in_memory, indexed_db: inner }) - } - - /// Get the database name. - pub fn name(&self) -> &str { - self.name.as_str() - } - - /// Get the database version. - pub fn version(&self) -> u32 { - self.version - } -} - -impl Drop for Database { - fn drop(&mut self) { - self.indexed_db.close(); - } -} - -impl KeyValueDB for Database { - fn get(&self, col: u32, key: &[u8]) -> io::Result> { - self.in_memory.get(col, key) - } - - fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { - self.in_memory.get_by_prefix(col, prefix) - } - - fn write(&self, transaction: DBTransaction) -> io::Result<()> { - let _ = indexed_db::idb_commit_transaction(&*self.indexed_db, &transaction, self.columns); - self.in_memory.write(transaction) - } - - // NOTE: clones the whole db - fn iter<'a>(&'a self, col: u32) -> Box, Box<[u8]>)> + 'a> { - self.in_memory.iter(col) - } - - // NOTE: clones the whole db - fn iter_with_prefix<'a>( - &'a self, - col: u32, - prefix: &'a [u8], - ) -> Box, Box<[u8]>)> + 'a> { - self.in_memory.iter_with_prefix(col, prefix) - } - - // NOTE: not supported - fn restore(&self, _new_db: &str) -> std::io::Result<()> { - Err(io::Error::new(io::ErrorKind::Other, "Not supported yet")) - } -} diff --git a/kvdb-web/tests/indexed_db.rs b/kvdb-web/tests/indexed_db.rs deleted file mode 100644 index 2becc48f4..000000000 --- a/kvdb-web/tests/indexed_db.rs +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! IndexedDB tests. - -use futures::future::TryFutureExt as _; - -use kvdb_shared_tests as st; -use kvdb_web::{Database, KeyValueDB as _}; - -use wasm_bindgen_test::*; - -wasm_bindgen_test_configure!(run_in_browser); - -async fn open_db(col: u32, name: &str) -> Database { - Database::open(name.into(), col).unwrap_or_else(|err| panic!("{}", err)).await -} - -#[wasm_bindgen_test] -async fn get_fails_with_non_existing_column() { - let db = open_db(1, "get_fails_with_non_existing_column").await; - st::test_get_fails_with_non_existing_column(&db).unwrap() -} - -#[wasm_bindgen_test] -async fn put_and_get() { - let db = open_db(1, "put_and_get").await; - st::test_put_and_get(&db).unwrap() -} - -#[wasm_bindgen_test] -async fn delete_and_get() { - let db = open_db(1, "delete_and_get").await; - st::test_delete_and_get(&db).unwrap() -} - -#[wasm_bindgen_test] -async fn delete_prefix() { - let db = open_db(st::DELETE_PREFIX_NUM_COLUMNS, "delete_prefix").await; - st::test_delete_prefix(&db).unwrap() -} - -#[wasm_bindgen_test] -async fn iter() { - let db = open_db(1, "iter").await; - st::test_iter(&db).unwrap() -} - -#[wasm_bindgen_test] -async fn iter_with_prefix() { - let db = open_db(1, "iter_with_prefix").await; - st::test_iter_with_prefix(&db).unwrap() -} - -#[wasm_bindgen_test] -async fn complex() { - let db = open_db(1, "complex").await; - st::test_complex(&db).unwrap() -} - -#[wasm_bindgen_test] -async fn reopen_the_database_with_more_columns() { - let _ = console_log::init_with_level(log::Level::Trace); - - let db = open_db(1, "reopen_the_database_with_more_columns").await; - - // Write a value into the database - let mut batch = db.transaction(); - batch.put(0, b"hello", b"world"); - db.write(batch).unwrap(); - - assert_eq!(db.get(0, b"hello").unwrap().unwrap(), b"world"); - - // Check the database version - assert_eq!(db.version(), 1); - - // Close the database - drop(db); - - // Reopen it again with 3 columns - let db = open_db(3, "reopen_the_database_with_more_columns").await; - - // The value should still be present - assert_eq!(db.get(0, b"hello").unwrap().unwrap(), b"world"); - assert!(db.get(0, b"trash").unwrap().is_none()); - - // The version should be bumped - assert_eq!(db.version(), 2); -} diff --git a/parity-path/CHANGELOG.md b/parity-path/CHANGELOG.md deleted file mode 100644 index ed9aa8162..000000000 --- a/parity-path/CHANGELOG.md +++ /dev/null @@ -1,10 +0,0 @@ -# Changelog - -The format is based on [Keep a Changelog]. - -[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ - -## [Unreleased] - -## [0.1.3] - 2020-03-16 -- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/parity-path/Cargo.toml b/parity-path/Cargo.toml deleted file mode 100644 index d23442c8f..000000000 --- a/parity-path/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "parity-path" -version = "0.1.3" -authors = ["Parity Technologies "] -repository = "https://github.com/paritytech/parity-common" -description = "Path utilities" -license = "MIT OR Apache-2.0" -edition = "2018" - -[dependencies] -home = "0.5.1" diff --git a/parity-path/src/lib.rs b/parity-path/src/lib.rs deleted file mode 100644 index 997a040be..000000000 --- a/parity-path/src/lib.rs +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Path utilities -use std::path::Path; -use std::path::PathBuf; - -use home::home_dir; - -#[cfg(target_os = "macos")] -/// Get the config path for application `name`. -/// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`. -pub fn config_path(name: &str) -> PathBuf { - let mut home = home_dir().expect("Failed to get home dir"); - home.push("Library"); - home.push(name); - home -} - -#[cfg(windows)] -/// Get the config path for application `name`. -/// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`. -pub fn config_path(name: &str) -> PathBuf { - let mut home = home_dir().expect("Failed to get home dir"); - home.push("AppData"); - home.push("Roaming"); - home.push(name); - home -} - -#[cfg(not(any(target_os = "macos", windows)))] -/// Get the config path for application `name`. -/// `name` should be capitalized, e.g. `"Ethereum"`, `"Parity"`. -pub fn config_path(name: &str) -> PathBuf { - let mut home = home_dir().expect("Failed to get home dir"); - home.push(format!(".{}", name.to_lowercase())); - home -} - -/// Get the specific folder inside a config path. -pub fn config_path_with(name: &str, then: &str) -> PathBuf { - let mut path = config_path(name); - path.push(then); - path -} - -/// Default ethereum paths -pub mod ethereum { - use std::path::PathBuf; - - /// Default path for ethereum installation on Mac Os - pub fn default() -> PathBuf { - super::config_path("Ethereum") - } - - /// Default path for ethereum installation (testnet) - pub fn test() -> PathBuf { - let mut path = default(); - path.push("testnet"); - path - } - - /// Get the specific folder inside default ethereum installation - pub fn with_default(s: &str) -> PathBuf { - let mut path = default(); - path.push(s); - path - } - - /// Get the specific folder inside default ethereum installation configured for testnet - pub fn with_testnet(s: &str) -> PathBuf { - let mut path = default(); - path.push("testnet"); - path.push(s); - path - } -} - -/// Restricts the permissions of given path only to the owner. -#[cfg(unix)] -pub fn restrict_permissions_owner(file_path: &Path, write: bool, executable: bool) -> Result<(), String> { - let perms = - ::std::os::unix::fs::PermissionsExt::from_mode(0o400 + write as u32 * 0o200 + executable as u32 * 0o100); - ::std::fs::set_permissions(file_path, perms).map_err(|e| format!("{:?}", e)) -} - -/// Restricts the permissions of given path only to the owner. -#[cfg(not(unix))] -pub fn restrict_permissions_owner(_file_path: &Path, _write: bool, _executable: bool) -> Result<(), String> { - //TODO: implement me - Ok(()) -} diff --git a/plain_hasher/CHANGELOG.md b/plain_hasher/CHANGELOG.md deleted file mode 100644 index 09fa2352d..000000000 --- a/plain_hasher/CHANGELOG.md +++ /dev/null @@ -1,19 +0,0 @@ -# Changelog - -The format is based on [Keep a Changelog]. - -[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ - -## [Unreleased] - -## [0.3.0] - 2020-07-27 -- Add support for big-endian platforms. [#407](https://github.com/paritytech/parity-common/pull/407) -- Remove unsafe code and `std` feature. [#410](https://github.com/paritytech/parity-common/pull/410) - -## [0.2.3] - 2020-03-16 -- License changed from MIT to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - -## [0.2.2] - 2019-10-24 -- Migrated to 2018 edition. [#213](https://github.com/paritytech/parity-common/pull/213) -### Dependencies -- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/plain_hasher/Cargo.toml b/plain_hasher/Cargo.toml deleted file mode 100644 index f0c51c7c5..000000000 --- a/plain_hasher/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "plain_hasher" -description = "Hasher for 32-byte keys." -version = "0.3.0" -authors = ["Parity Technologies "] -license = "MIT OR Apache-2.0" -keywords = ["hash", "hasher"] -homepage = "https://github.com/paritytech/parity-common" -categories = ["no-std"] -edition = "2018" - -[dependencies] -crunchy = { version = "0.2.2", default-features = false } - -[dev-dependencies] -criterion = "0.3.0" - -[features] - -[[bench]] -name = "bench" -harness = false diff --git a/plain_hasher/README.md b/plain_hasher/README.md deleted file mode 100644 index ec5082999..000000000 --- a/plain_hasher/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Specialized Hasher for 32-byte keys - -Provides `PlainHasher`, a specialized `core::hash::Hasher` that takes just 8 bytes of the provided value and may only be used for keys which are 32 bytes. - -The crate is `no_std`-compatible. diff --git a/plain_hasher/benches/bench.rs b/plain_hasher/benches/bench.rs deleted file mode 100644 index 8e496d50d..000000000 --- a/plain_hasher/benches/bench.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::collections::hash_map::DefaultHasher; -use std::hash::Hasher; - -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use plain_hasher::PlainHasher; - -fn bench_write_hasher(c: &mut Criterion) { - c.bench_function("write_plain_hasher", |b| { - b.iter(|| { - (0..100u8).fold(PlainHasher::default(), |mut old, new| { - let bb = black_box([new; 32]); - old.write(&bb); - old - }); - }) - }); - c.bench_function("write_default_hasher", |b| { - b.iter(|| { - (0..100u8).fold(DefaultHasher::default(), |mut old, new| { - let bb = black_box([new; 32]); - old.write(&bb); - old - }); - }) - }); -} - -criterion_group!(benches, bench_write_hasher); -criterion_main!(benches); diff --git a/plain_hasher/src/lib.rs b/plain_hasher/src/lib.rs deleted file mode 100644 index 6084b449f..000000000 --- a/plain_hasher/src/lib.rs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![no_std] - -use core::hash::Hasher; - -use crunchy::unroll; - -/// Hasher that just takes 8 bytes of the provided value. -/// May only be used for keys which are 32 bytes. -#[derive(Default)] -pub struct PlainHasher { - prefix: u64, -} - -impl Hasher for PlainHasher { - #[inline] - fn finish(&self) -> u64 { - self.prefix - } - - #[inline] - fn write(&mut self, bytes: &[u8]) { - debug_assert!(bytes.len() == 32); - let mut prefix_bytes = self.prefix.to_le_bytes(); - - unroll! { - for i in 0..8 { - prefix_bytes[i] ^= (bytes[i] ^ bytes[i + 8]) ^ (bytes[i + 16] ^ bytes[i + 24]); - } - } - - self.prefix = u64::from_le_bytes(prefix_bytes); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_works() { - let mut bytes = [32u8; 32]; - bytes[0] = 15; - let mut hasher = PlainHasher::default(); - hasher.write(&bytes); - assert_eq!(hasher.prefix, 47); - } -} diff --git a/trace-time/CHANGELOG.md b/trace-time/CHANGELOG.md deleted file mode 100644 index 0f666b438..000000000 --- a/trace-time/CHANGELOG.md +++ /dev/null @@ -1,15 +0,0 @@ -# Changelog - -The format is based on [Keep a Changelog]. - -[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ - -## [Unreleased] - -## [0.1.3] - 2020-03-16 -- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - -## [0.1.2] - 2019-10-24 -- Migrated to 2018 edition. [#232](https://github.com/paritytech/parity-common/pull/232) -### Dependencies -- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/trace-time/Cargo.toml b/trace-time/Cargo.toml deleted file mode 100644 index 81877e186..000000000 --- a/trace-time/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "trace-time" -description = "Easily trace time to execute a scope." -version = "0.1.3" -authors = ["Parity Technologies "] -repository = "https://github.com/paritytech/parity-common" -license = "MIT OR Apache-2.0" -edition = "2018" - -[dependencies] -log = "0.4.8" diff --git a/trace-time/src/lib.rs b/trace-time/src/lib.rs deleted file mode 100644 index e5ecf2d09..000000000 --- a/trace-time/src/lib.rs +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Performance timer with logging - -use log::trace; -use std::time::Instant; - -#[macro_export] -macro_rules! trace_time { - ($name: expr) => { - let _timer = $crate::PerfTimer::new($name); - }; -} - -/// Performance timer with logging. Starts measuring time in the constructor, prints -/// elapsed time in the destructor or when `stop` is called. -pub struct PerfTimer { - name: &'static str, - start: Instant, -} - -impl PerfTimer { - /// Create an instance with given name. - pub fn new(name: &'static str) -> PerfTimer { - PerfTimer { name, start: Instant::now() } - } -} - -impl Drop for PerfTimer { - fn drop(&mut self) { - let elapsed = self.start.elapsed(); - let ms = elapsed.as_millis(); - trace!(target: "perf", "{}: {:.2}ms", self.name, ms); - } -} diff --git a/transaction-pool/CHANGELOG.md b/transaction-pool/CHANGELOG.md deleted file mode 100644 index 334625f2d..000000000 --- a/transaction-pool/CHANGELOG.md +++ /dev/null @@ -1,15 +0,0 @@ -# Changelog - -The format is based on [Keep a Changelog]. - -[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ - -## [Unreleased] - -## [2.0.3] - 2020-03-16 -- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) - -## [2.0.2] - 2019-10-24 -- Updated to 2018 edition idioms. [#237](https://github.com/paritytech/parity-common/pull/237) -### Dependencies -- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml deleted file mode 100644 index cdf002afc..000000000 --- a/transaction-pool/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -description = "Generic transaction pool." -name = "transaction-pool" -version = "2.0.3" -license = "MIT OR Apache-2.0" -authors = ["Parity Technologies "] -repository = "https://github.com/paritytech/parity-common" -edition = "2018" - -[dependencies] -log = "0.4.8" -smallvec = "1.6.0" - -[dev-dependencies] -ethereum-types = { version = "0.12.0", path = "../ethereum-types" } diff --git a/transaction-pool/src/error.rs b/transaction-pool/src/error.rs deleted file mode 100644 index 20935c23b..000000000 --- a/transaction-pool/src/error.rs +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::{error, fmt, result}; - -/// Transaction Pool Error -#[derive(Debug)] -pub enum Error { - /// Transaction is already imported - AlreadyImported(Hash), - /// Transaction is too cheap to enter the queue - TooCheapToEnter(Hash, String), - /// Transaction is too cheap to replace existing transaction that occupies the same slot. - TooCheapToReplace(Hash, Hash), -} - -/// Transaction Pool Result -pub type Result = result::Result>; - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Error::AlreadyImported(h) => write!(f, "[{:?}] already imported", h), - Error::TooCheapToEnter(hash, min_score) => { - write!(f, "[{:x}] too cheap to enter the pool. Min score: {}", hash, min_score) - } - Error::TooCheapToReplace(old_hash, hash) => write!(f, "[{:x}] too cheap to replace: {:x}", hash, old_hash), - } - } -} - -impl error::Error for Error {} - -#[cfg(test)] -impl PartialEq for Error -where - H: PartialEq, -{ - fn eq(&self, other: &Self) -> bool { - use self::Error::*; - - match (self, other) { - (&AlreadyImported(ref h1), &AlreadyImported(ref h2)) => h1 == h2, - (&TooCheapToEnter(ref h1, ref s1), &TooCheapToEnter(ref h2, ref s2)) => h1 == h2 && s1 == s2, - (&TooCheapToReplace(ref old1, ref new1), &TooCheapToReplace(ref old2, ref new2)) => { - old1 == old2 && new1 == new2 - } - _ => false, - } - } -} diff --git a/transaction-pool/src/lib.rs b/transaction-pool/src/lib.rs deleted file mode 100644 index dd49fb3a8..000000000 --- a/transaction-pool/src/lib.rs +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Generic Transaction Pool -//! -//! An extensible and performant implementation of Ethereum Transaction Pool. -//! The pool stores ordered, verified transactions according to some pluggable -//! `Scoring` implementation. -//! The pool also allows you to construct a set of `pending` transactions according -//! to some notion of `Readiness` (pluggable). -//! -//! The pool is generic over transactions and should make no assumptions about them. -//! The only thing we can rely on is the `Scoring` that defines: -//! - the ordering of transactions from a single sender -//! - the priority of the transaction compared to other transactions from different senders -//! -//! NOTE: the transactions from a single sender are not ordered by priority, -//! but still when constructing pending set we always need to maintain the ordering -//! (i.e. `txs[1]` always needs to be included after `txs[0]` even if it has higher priority) -//! -//! ### Design Details -//! -//! Performance assumptions: -//! - Possibility to handle tens of thousands of transactions -//! - Fast insertions and replacements `O(per-sender + log(senders))` -//! - Reasonably fast removal of stalled transactions `O(per-sender)` -//! - Reasonably fast construction of pending set `O(txs * (log(senders) + log(per-sender))` -//! -//! The removal performance could be improved by trading some memory. Currently `SmallVec` is used -//! to store senders transactions, instead we could use `VecDeque` and efficiently `pop_front` -//! the best transactions. -//! -//! The pending set construction and insertion complexity could be reduced by introducing -//! a notion of `nonce` - an absolute, numeric ordering of transactions. -//! We don't do that because of possible implications of EIP208 where nonce might not be -//! explicitly available. -//! -//! 1. The pool groups transactions from particular sender together -//! and stores them ordered by `Scoring` within that group -//! i.e. `HashMap>`. -//! 2. Additionally we maintain the best and the worst transaction from each sender -//! (by `Scoring` not `priority`) ordered by `priority`. -//! It means that we can easily identify the best transaction inside the entire pool -//! and the worst transaction. -//! 3. Whenever new transaction is inserted to the queue: -//! - first check all the limits (overall, memory, per-sender) -//! - retrieve all transactions from a sender -//! - binary search for position to insert the transaction -//! - decide if we are replacing existing transaction (3 outcomes: drop, replace, insert) -//! - update best and worst transaction from that sender if affected -//! 4. Pending List construction: -//! - Take the best transaction (by priority) from all senders to the List -//! - Replace the transaction with next transaction (by ordering) from that sender (if any) -//! - Repeat - -#![warn(missing_docs)] - -#[cfg(test)] -mod tests; - -mod error; -mod listener; -mod options; -mod pool; -mod ready; -mod replace; -mod status; -mod transactions; -mod verifier; - -pub mod scoring; - -pub use self::error::Error; -pub use self::listener::{Listener, NoopListener}; -pub use self::options::Options; -pub use self::pool::{PendingIterator, Pool, Transaction, UnorderedIterator}; -pub use self::ready::{Readiness, Ready}; -pub use self::replace::{ReplaceTransaction, ShouldReplace}; -pub use self::scoring::Scoring; -pub use self::status::{LightStatus, Status}; -pub use self::verifier::Verifier; - -use std::fmt; -use std::hash::Hash; - -/// Already verified transaction that can be safely queued. -pub trait VerifiedTransaction: fmt::Debug { - /// Transaction hash type. - type Hash: fmt::Debug + fmt::LowerHex + Eq + Clone + Hash; - - /// Transaction sender type. - type Sender: fmt::Debug + Eq + Clone + Hash + Send; - - /// Transaction hash - fn hash(&self) -> &Self::Hash; - - /// Memory usage - fn mem_usage(&self) -> usize; - - /// Transaction sender - fn sender(&self) -> &Self::Sender; -} diff --git a/transaction-pool/src/listener.rs b/transaction-pool/src/listener.rs deleted file mode 100644 index 5a3f1a0c7..000000000 --- a/transaction-pool/src/listener.rs +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use crate::error::Error; -use std::{ - fmt::{Debug, LowerHex}, - sync::Arc, -}; - -/// Transaction pool listener. -/// -/// Listener is being notified about status of every transaction in the pool. -pub trait Listener { - /// The transaction has been successfully added to the pool. - /// If second argument is `Some` the transaction has took place of some other transaction - /// which was already in pool. - /// NOTE: You won't be notified about drop of `old` transaction separately. - fn added(&mut self, _tx: &Arc, _old: Option<&Arc>) {} - - /// The transaction was rejected from the pool. - /// It means that it was too cheap to replace any transaction already in the pool. - fn rejected(&mut self, _tx: &Arc, _reason: &Error) {} - - /// The transaction was pushed out from the pool because of the limit. - fn dropped(&mut self, _tx: &Arc, _by: Option<&T>) {} - - /// The transaction was marked as invalid by executor. - fn invalid(&mut self, _tx: &Arc) {} - - /// The transaction has been canceled. - fn canceled(&mut self, _tx: &Arc) {} - - /// The transaction has been culled from the pool. - fn culled(&mut self, _tx: &Arc) {} -} - -/// A no-op implementation of `Listener`. -#[derive(Debug)] -pub struct NoopListener; -impl Listener for NoopListener {} - -impl Listener for (A, B) -where - A: Listener, - B: Listener, -{ - fn added(&mut self, tx: &Arc, old: Option<&Arc>) { - self.0.added(tx, old); - self.1.added(tx, old); - } - - fn rejected(&mut self, tx: &Arc, reason: &Error) { - self.0.rejected(tx, reason); - self.1.rejected(tx, reason); - } - - fn dropped(&mut self, tx: &Arc, by: Option<&T>) { - self.0.dropped(tx, by); - self.1.dropped(tx, by); - } - - fn invalid(&mut self, tx: &Arc) { - self.0.invalid(tx); - self.1.invalid(tx); - } - - fn canceled(&mut self, tx: &Arc) { - self.0.canceled(tx); - self.1.canceled(tx); - } - - fn culled(&mut self, tx: &Arc) { - self.0.culled(tx); - self.1.culled(tx); - } -} diff --git a/transaction-pool/src/options.rs b/transaction-pool/src/options.rs deleted file mode 100644 index 947af30a9..000000000 --- a/transaction-pool/src/options.rs +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/// Transaction Pool options. -#[derive(Clone, Debug, PartialEq)] -pub struct Options { - /// Maximal number of transactions in the pool. - pub max_count: usize, - /// Maximal number of transactions from single sender. - pub max_per_sender: usize, - /// Maximal memory usage. - pub max_mem_usage: usize, -} - -impl Default for Options { - fn default() -> Self { - Options { max_count: 1024, max_per_sender: 16, max_mem_usage: 8 * 1024 * 1024 } - } -} diff --git a/transaction-pool/src/pool.rs b/transaction-pool/src/pool.rs deleted file mode 100644 index 2eb324020..000000000 --- a/transaction-pool/src/pool.rs +++ /dev/null @@ -1,624 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use log::{trace, warn}; -use std::collections::{hash_map, BTreeSet, HashMap}; -use std::slice; -use std::sync::Arc; - -use crate::{ - error, - listener::{Listener, NoopListener}, - options::Options, - ready::{Readiness, Ready}, - replace::{ReplaceTransaction, ShouldReplace}, - scoring::{self, ScoreWithRef, Scoring}, - status::{LightStatus, Status}, - transactions::{AddResult, Transactions}, - VerifiedTransaction, -}; - -/// Internal representation of transaction. -/// -/// Includes unique insertion id that can be used for scoring explicitly, -/// but internally is used to resolve conflicts in case of equal scoring -/// (newer transactions are preferred). -#[derive(Debug)] -pub struct Transaction { - /// Sequential id of the transaction - pub insertion_id: u64, - /// Shared transaction - pub transaction: Arc, -} - -impl Clone for Transaction { - fn clone(&self) -> Self { - Transaction { insertion_id: self.insertion_id, transaction: self.transaction.clone() } - } -} - -impl ::std::ops::Deref for Transaction { - type Target = Arc; - - fn deref(&self) -> &Self::Target { - &self.transaction - } -} - -/// A transaction pool. -#[derive(Debug)] -pub struct Pool, L = NoopListener> { - listener: L, - scoring: S, - options: Options, - mem_usage: usize, - - transactions: HashMap>, - by_hash: HashMap>, - - best_transactions: BTreeSet>, - worst_transactions: BTreeSet>, - - insertion_id: u64, -} - -impl + Default> Default for Pool { - fn default() -> Self { - Self::with_scoring(S::default(), Options::default()) - } -} - -impl + Default> Pool { - /// Creates a new `Pool` with given options - /// and default `Scoring` and `Listener`. - pub fn with_options(options: Options) -> Self { - Self::with_scoring(S::default(), options) - } -} - -impl> Pool { - /// Creates a new `Pool` with given `Scoring` and options. - pub fn with_scoring(scoring: S, options: Options) -> Self { - Self::new(NoopListener, scoring, options) - } -} - -const INITIAL_NUMBER_OF_SENDERS: usize = 16; - -impl Pool -where - T: VerifiedTransaction, - S: Scoring, - L: Listener, -{ - /// Creates new `Pool` with given `Scoring`, `Listener` and options. - pub fn new(listener: L, scoring: S, options: Options) -> Self { - let transactions = HashMap::with_capacity(INITIAL_NUMBER_OF_SENDERS); - let by_hash = HashMap::with_capacity(options.max_count / 16); - - Pool { - listener, - scoring, - options, - mem_usage: 0, - transactions, - by_hash, - best_transactions: Default::default(), - worst_transactions: Default::default(), - insertion_id: 0, - } - } - - /// Attempts to import new transaction to the pool, returns a `Arc` or an `Error`. - /// - /// NOTE: Since `Ready`ness is separate from the pool it's possible to import stalled transactions. - /// It's the caller responsibility to make sure that's not the case. - /// - /// NOTE: The transaction may push out some other transactions from the pool - /// either because of limits (see `Options`) or because `Scoring` decides that the transaction - /// replaces an existing transaction from that sender. - /// - /// If any limit is reached the transaction with the lowest `Score` will be compared with the - /// new transaction via the supplied `ShouldReplace` implementation and may be evicted. - /// - /// The `Listener` will be informed on any drops or rejections. - pub fn import(&mut self, transaction: T, replace: &dyn ShouldReplace) -> error::Result, T::Hash> { - let mem_usage = transaction.mem_usage(); - - if self.by_hash.contains_key(transaction.hash()) { - return Err(error::Error::AlreadyImported(transaction.hash().clone())); - } - - self.insertion_id += 1; - let transaction = Transaction { insertion_id: self.insertion_id, transaction: Arc::new(transaction) }; - - // TODO [ToDr] Most likely move this after the transaction is inserted. - // Avoid using should_replace, but rather use scoring for that. - { - let remove_worst = |s: &mut Self, transaction| match s.remove_worst(transaction, replace) { - Err(err) => { - s.listener.rejected(transaction, &err); - Err(err) - } - Ok(None) => Ok(false), - Ok(Some(removed)) => { - s.listener.dropped(&removed, Some(transaction)); - s.finalize_remove(removed.hash()); - Ok(true) - } - }; - - while self.by_hash.len() + 1 > self.options.max_count { - trace!("Count limit reached: {} > {}", self.by_hash.len() + 1, self.options.max_count); - if !remove_worst(self, &transaction)? { - break; - } - } - - while self.mem_usage + mem_usage > self.options.max_mem_usage { - trace!("Mem limit reached: {} > {}", self.mem_usage + mem_usage, self.options.max_mem_usage); - if !remove_worst(self, &transaction)? { - break; - } - } - } - - let (result, prev_state, current_state) = { - let transactions = - self.transactions.entry(transaction.sender().clone()).or_insert_with(Transactions::default); - // get worst and best transactions for comparison - let prev = transactions.worst_and_best(); - let result = transactions.add(transaction, &self.scoring, self.options.max_per_sender); - let current = transactions.worst_and_best(); - (result, prev, current) - }; - - // update best and worst transactions from this sender (if required) - self.update_senders_worst_and_best(prev_state, current_state); - - match result { - AddResult::Ok(tx) => { - self.listener.added(&tx, None); - self.finalize_insert(&tx, None); - Ok(tx.transaction) - } - AddResult::PushedOut { new, old } | AddResult::Replaced { new, old } => { - self.listener.added(&new, Some(&old)); - self.finalize_insert(&new, Some(&old)); - Ok(new.transaction) - } - AddResult::TooCheap { new, old } => { - let error = error::Error::TooCheapToReplace(old.hash().clone(), new.hash().clone()); - self.listener.rejected(&new, &error); - return Err(error); - } - AddResult::TooCheapToEnter(new, score) => { - let error = error::Error::TooCheapToEnter(new.hash().clone(), format!("{:#x}", score)); - self.listener.rejected(&new, &error); - return Err(error); - } - } - } - - /// Updates state of the pool statistics if the transaction was added to a set. - fn finalize_insert(&mut self, new: &Transaction, old: Option<&Transaction>) { - self.mem_usage += new.mem_usage(); - self.by_hash.insert(new.hash().clone(), new.clone()); - - if let Some(old) = old { - self.finalize_remove(old.hash()); - } - } - - /// Updates the pool statistics if transaction was removed. - fn finalize_remove(&mut self, hash: &T::Hash) -> Option> { - self.by_hash.remove(hash).map(|old| { - self.mem_usage -= old.transaction.mem_usage(); - old.transaction - }) - } - - /// Updates best and worst transactions from a sender. - fn update_senders_worst_and_best( - &mut self, - previous: Option<((S::Score, Transaction), (S::Score, Transaction))>, - current: Option<((S::Score, Transaction), (S::Score, Transaction))>, - ) { - let worst_collection = &mut self.worst_transactions; - let best_collection = &mut self.best_transactions; - - let is_same = - |a: &(S::Score, Transaction), b: &(S::Score, Transaction)| a.0 == b.0 && a.1.hash() == b.1.hash(); - - let update = |collection: &mut BTreeSet<_>, (score, tx), remove| { - if remove { - collection.remove(&ScoreWithRef::new(score, tx)); - } else { - collection.insert(ScoreWithRef::new(score, tx)); - } - }; - - match (previous, current) { - (None, Some((worst, best))) => { - update(worst_collection, worst, false); - update(best_collection, best, false); - } - (Some((worst, best)), None) => { - // all transactions from that sender has been removed. - // We can clear a hashmap entry. - self.transactions.remove(worst.1.sender()); - update(worst_collection, worst, true); - update(best_collection, best, true); - } - (Some((w1, b1)), Some((w2, b2))) => { - if !is_same(&w1, &w2) { - update(worst_collection, w1, true); - update(worst_collection, w2, false); - } - if !is_same(&b1, &b2) { - update(best_collection, b1, true); - update(best_collection, b2, false); - } - } - (None, None) => {} - } - } - - /// Attempts to remove the worst transaction from the pool if it's worse than the given one. - /// - /// Returns `None` in case we couldn't decide if the transaction should replace the worst transaction or not. - /// In such case we will accept the transaction even though it is going to exceed the limit. - fn remove_worst( - &mut self, - transaction: &Transaction, - replace: &dyn ShouldReplace, - ) -> error::Result>, T::Hash> { - let to_remove = match self.worst_transactions.iter().next_back() { - // No elements to remove? and the pool is still full? - None => { - warn!("The pool is full but there are no transactions to remove."); - return Err(error::Error::TooCheapToEnter(transaction.hash().clone(), "unknown".into())); - } - Some(old) => { - let txs = &self.transactions; - let get_replace_tx = |tx| { - let sender_txs = txs.get(transaction.sender()).map(|txs| txs.iter().as_slice()); - ReplaceTransaction::new(tx, sender_txs) - }; - let old_replace = get_replace_tx(&old.transaction); - let new_replace = get_replace_tx(transaction); - - match replace.should_replace(&old_replace, &new_replace) { - // We can't decide which of them should be removed, so accept both. - scoring::Choice::InsertNew => None, - // New transaction is better than the worst one so we can replace it. - scoring::Choice::ReplaceOld => Some(old.clone()), - // otherwise fail - scoring::Choice::RejectNew => { - return Err(error::Error::TooCheapToEnter( - transaction.hash().clone(), - format!("{:#x}", old.score), - )) - } - } - } - }; - - if let Some(to_remove) = to_remove { - // Remove from transaction set - self.remove_from_set(to_remove.transaction.sender(), |set, scoring| { - set.remove(&to_remove.transaction, scoring) - }); - - Ok(Some(to_remove.transaction)) - } else { - Ok(None) - } - } - - /// Removes transaction from sender's transaction `HashMap`. - fn remove_from_set, &S) -> R>( - &mut self, - sender: &T::Sender, - f: F, - ) -> Option { - let (prev, next, result) = if let Some(set) = self.transactions.get_mut(sender) { - let prev = set.worst_and_best(); - let result = f(set, &self.scoring); - (prev, set.worst_and_best(), result) - } else { - return None; - }; - - self.update_senders_worst_and_best(prev, next); - Some(result) - } - - /// Clears pool from all transactions. - /// This causes a listener notification that all transactions were dropped. - /// NOTE: the drop-notification order will be arbitrary. - pub fn clear(&mut self) { - self.mem_usage = 0; - self.transactions.clear(); - self.best_transactions.clear(); - self.worst_transactions.clear(); - - for (_hash, tx) in self.by_hash.drain() { - self.listener.dropped(&tx.transaction, None) - } - } - - /// Removes single transaction from the pool. - /// Depending on the `is_invalid` flag the listener - /// will either get a `cancelled` or `invalid` notification. - pub fn remove(&mut self, hash: &T::Hash, is_invalid: bool) -> Option> { - if let Some(tx) = self.finalize_remove(hash) { - self.remove_from_set(tx.sender(), |set, scoring| set.remove(&tx, scoring)); - if is_invalid { - self.listener.invalid(&tx); - } else { - self.listener.canceled(&tx); - } - Some(tx) - } else { - None - } - } - - /// Removes all stalled transactions from given sender. - fn remove_stalled>(&mut self, sender: &T::Sender, ready: &mut R) -> usize { - let removed_from_set = self.remove_from_set(sender, |transactions, scoring| transactions.cull(ready, scoring)); - - match removed_from_set { - Some(removed) => { - let len = removed.len(); - for tx in removed { - self.finalize_remove(tx.hash()); - self.listener.culled(&tx); - } - len - } - None => 0, - } - } - - /// Removes all stalled transactions from given sender list (or from all senders). - pub fn cull>(&mut self, senders: Option<&[T::Sender]>, mut ready: R) -> usize { - let mut removed = 0; - match senders { - Some(senders) => { - for sender in senders { - removed += self.remove_stalled(sender, &mut ready); - } - } - None => { - let senders = self.transactions.keys().cloned().collect::>(); - for sender in senders { - removed += self.remove_stalled(&sender, &mut ready); - } - } - } - - removed - } - - /// Returns a transaction if it's part of the pool or `None` otherwise. - pub fn find(&self, hash: &T::Hash) -> Option> { - self.by_hash.get(hash).map(|t| t.transaction.clone()) - } - - /// Returns worst transaction in the queue (if any). - pub fn worst_transaction(&self) -> Option> { - self.worst_transactions.iter().next_back().map(|x| x.transaction.transaction.clone()) - } - - /// Returns true if the pool is at it's capacity. - pub fn is_full(&self) -> bool { - self.by_hash.len() >= self.options.max_count || self.mem_usage >= self.options.max_mem_usage - } - - /// Returns senders ordered by priority of their transactions. - pub fn senders(&self) -> impl Iterator { - self.best_transactions.iter().map(|tx| tx.transaction.sender()) - } - - /// Returns an iterator of pending (ready) transactions. - pub fn pending>(&self, ready: R) -> PendingIterator<'_, T, R, S, L> { - PendingIterator { ready, best_transactions: self.best_transactions.clone(), pool: self } - } - - /// Returns pending (ready) transactions from given sender. - pub fn pending_from_sender>(&self, ready: R, sender: &T::Sender) -> PendingIterator<'_, T, R, S, L> { - let best_transactions = self - .transactions - .get(sender) - .and_then(|transactions| transactions.worst_and_best()) - .map(|(_, best)| ScoreWithRef::new(best.0, best.1)) - .map(|s| { - let mut set = BTreeSet::new(); - set.insert(s); - set - }) - .unwrap_or_default(); - - PendingIterator { ready, best_transactions, pool: self } - } - - /// Returns unprioritized list of ready transactions. - pub fn unordered_pending>(&self, ready: R) -> UnorderedIterator<'_, T, R, S> { - UnorderedIterator { ready, senders: self.transactions.iter(), transactions: None } - } - - /// Update score of transactions of a particular sender. - pub fn update_scores(&mut self, sender: &T::Sender, event: S::Event) { - let res = if let Some(set) = self.transactions.get_mut(sender) { - let prev = set.worst_and_best(); - set.update_scores(&self.scoring, event); - let current = set.worst_and_best(); - Some((prev, current)) - } else { - None - }; - - if let Some((prev, current)) = res { - self.update_senders_worst_and_best(prev, current); - } - } - - /// Computes the full status of the pool (including readiness). - pub fn status>(&self, mut ready: R) -> Status { - let mut status = Status::default(); - - for (_sender, transactions) in &self.transactions { - let len = transactions.len(); - for (idx, tx) in transactions.iter().enumerate() { - match ready.is_ready(tx) { - Readiness::Stale => status.stalled += 1, - Readiness::Ready => status.pending += 1, - Readiness::Future => { - status.future += len - idx; - break; - } - } - } - } - - status - } - - /// Returns light status of the pool. - pub fn light_status(&self) -> LightStatus { - LightStatus { - mem_usage: self.mem_usage, - transaction_count: self.by_hash.len(), - senders: self.transactions.len(), - } - } - - /// Returns current pool options. - pub fn options(&self) -> Options { - self.options.clone() - } - - /// Borrows listener instance. - pub fn listener(&self) -> &L { - &self.listener - } - - /// Borrows scoring instance. - pub fn scoring(&self) -> &S { - &self.scoring - } - - /// Borrows listener mutably. - pub fn listener_mut(&mut self) -> &mut L { - &mut self.listener - } -} - -/// An iterator over all pending (ready) transactions in unoredered fashion. -/// -/// NOTE: Current implementation will iterate over all transactions from particular sender -/// ordered by nonce, but that might change in the future. -/// -/// NOTE: the transactions are not removed from the queue. -/// You might remove them later by calling `cull`. -pub struct UnorderedIterator<'a, T, R, S> -where - T: VerifiedTransaction + 'a, - S: Scoring + 'a, -{ - ready: R, - senders: hash_map::Iter<'a, T::Sender, Transactions>, - transactions: Option>>, -} - -impl<'a, T, R, S> Iterator for UnorderedIterator<'a, T, R, S> -where - T: VerifiedTransaction, - R: Ready, - S: Scoring, -{ - type Item = Arc; - - fn next(&mut self) -> Option { - loop { - if let Some(transactions) = self.transactions.as_mut() { - if let Some(tx) = transactions.next() { - match self.ready.is_ready(&tx) { - Readiness::Ready => { - return Some(tx.transaction.clone()); - } - state => trace!("[{:?}] Ignoring {:?} transaction.", tx.hash(), state), - } - } - } - - // otherwise fallback and try next sender - let next_sender = self.senders.next()?; - self.transactions = Some(next_sender.1.iter()); - } - } -} - -/// An iterator over all pending (ready) transactions. -/// NOTE: the transactions are not removed from the queue. -/// You might remove them later by calling `cull`. -pub struct PendingIterator<'a, T, R, S, L> -where - T: VerifiedTransaction + 'a, - S: Scoring + 'a, - L: 'a, -{ - ready: R, - best_transactions: BTreeSet>, - pool: &'a Pool, -} - -impl<'a, T, R, S, L> Iterator for PendingIterator<'a, T, R, S, L> -where - T: VerifiedTransaction, - R: Ready, - S: Scoring, -{ - type Item = Arc; - - fn next(&mut self) -> Option { - while !self.best_transactions.is_empty() { - let best = { - let best = self.best_transactions.iter().next().expect("current_best is not empty; qed").clone(); - self.best_transactions.take(&best).expect("Just taken from iterator; qed") - }; - - let tx_state = self.ready.is_ready(&best.transaction); - // Add the next best sender's transaction when applicable - match tx_state { - Readiness::Ready | Readiness::Stale => { - // retrieve next one from the same sender. - let next = self - .pool - .transactions - .get(best.transaction.sender()) - .and_then(|s| s.find_next(&best.transaction, &self.pool.scoring)); - if let Some((score, tx)) = next { - self.best_transactions.insert(ScoreWithRef::new(score, tx)); - } - } - _ => (), - } - - if tx_state == Readiness::Ready { - return Some(best.transaction.transaction); - } - - trace!("[{:?}] Ignoring {:?} transaction.", best.transaction.hash(), tx_state); - } - - None - } -} diff --git a/transaction-pool/src/ready.rs b/transaction-pool/src/ready.rs deleted file mode 100644 index 009eae273..000000000 --- a/transaction-pool/src/ready.rs +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/// Transaction readiness. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Readiness { - /// The transaction is stale (and should/will be removed from the pool). - Stale, - /// The transaction is ready to be included in pending set. - Ready, - /// The transaction is not yet ready. - Future, -} - -/// A readiness indicator. -pub trait Ready { - /// Returns true if transaction is ready to be included in pending block, - /// given all previous transactions that were ready are already included. - /// - /// NOTE: readiness of transactions will be checked according to `Score` ordering, - /// the implementation should maintain a state of already checked transactions. - fn is_ready(&mut self, tx: &T) -> Readiness; -} - -impl Ready for F -where - F: FnMut(&T) -> Readiness, -{ - fn is_ready(&mut self, tx: &T) -> Readiness { - (*self)(tx) - } -} - -impl Ready for (A, B) -where - A: Ready, - B: Ready, -{ - fn is_ready(&mut self, tx: &T) -> Readiness { - match self.0.is_ready(tx) { - Readiness::Ready => self.1.is_ready(tx), - r => r, - } - } -} diff --git a/transaction-pool/src/replace.rs b/transaction-pool/src/replace.rs deleted file mode 100644 index cbae6319b..000000000 --- a/transaction-pool/src/replace.rs +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! When queue limits are reached, decide whether to replace an existing transaction from the pool - -use crate::{pool::Transaction, scoring::Choice}; - -/// Encapsulates a transaction to be compared, along with pooled transactions from the same sender -pub struct ReplaceTransaction<'a, T> { - /// The transaction to be compared for replacement - pub transaction: &'a Transaction, - /// Other transactions currently in the pool for the same sender - pub pooled_by_sender: Option<&'a [Transaction]>, -} - -impl<'a, T> ReplaceTransaction<'a, T> { - /// Creates a new `ReplaceTransaction` - pub fn new(transaction: &'a Transaction, pooled_by_sender: Option<&'a [Transaction]>) -> Self { - ReplaceTransaction { transaction, pooled_by_sender } - } -} - -impl<'a, T> ::std::ops::Deref for ReplaceTransaction<'a, T> { - type Target = Transaction; - fn deref(&self) -> &Self::Target { - &self.transaction - } -} - -/// Chooses whether a new transaction should replace an existing transaction if the pool is full. -pub trait ShouldReplace { - /// Decides if `new` should push out `old` transaction from the pool. - /// - /// NOTE returning `InsertNew` here can lead to some transactions being accepted above pool limits. - fn should_replace(&self, old: &ReplaceTransaction<'_, T>, new: &ReplaceTransaction<'_, T>) -> Choice; -} diff --git a/transaction-pool/src/scoring.rs b/transaction-pool/src/scoring.rs deleted file mode 100644 index b7f75e7fc..000000000 --- a/transaction-pool/src/scoring.rs +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A transactions ordering abstraction. - -use crate::pool::Transaction; -use std::{cmp, fmt}; - -/// Represents a decision what to do with -/// a new transaction that tries to enter the pool. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Choice { - /// New transaction should be rejected - /// (i.e. the old transaction that occupies the same spot - /// is better). - RejectNew, - /// The old transaction should be dropped - /// in favour of the new one. - ReplaceOld, - /// The new transaction should be inserted - /// and both (old and new) should stay in the pool. - InsertNew, -} - -/// Describes a reason why the `Score` of transactions -/// should be updated. -/// The `Scoring` implementations can use this information -/// to update the `Score` table more efficiently. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Change { - /// New transaction has been inserted at given index. - /// The Score at that index is initialized with default value - /// and needs to be filled in. - InsertedAt(usize), - /// The transaction has been removed at given index and other transactions - /// shifted to it's place. - /// The scores were removed and shifted as well. - /// For simple scoring algorithms no action is required here. - RemovedAt(usize), - /// The transaction at given index has replaced a previous transaction. - /// The score at that index needs to be update (it contains value from previous transaction). - ReplacedAt(usize), - /// Given number of stalled transactions has been culled from the beginning. - /// The scores has been removed from the beginning as well. - /// For simple scoring algorithms no action is required here. - Culled(usize), - /// Custom event to update the score triggered outside of the pool. - /// Handling this event is up to scoring implementation. - Event(T), -} - -/// A transaction ordering. -/// -/// The implementation should decide on order of transactions in the pool. -/// Each transaction should also get assigned a `Score` which is used to later -/// prioritize transactions in the pending set. -/// -/// Implementation notes: -/// - Returned `Score`s should match ordering of `compare` method. -/// - `compare` will be called only within a context of transactions from the same sender. -/// - `choose` may be called even if `compare` returns `Ordering::Equal` -/// - `Score`s and `compare` should align with `Ready` implementation. -/// -/// Example: Natural ordering of Ethereum transactions. -/// - `compare`: compares transaction `nonce` () -/// - `choose`: compares transactions `gasPrice` (decides if old transaction should be replaced) -/// - `update_scores`: score defined as `gasPrice` if `n==0` and `max(scores[n-1], gasPrice)` if `n>0` -/// -pub trait Scoring: fmt::Debug { - /// A score of a transaction. - type Score: cmp::Ord + Clone + Default + fmt::Debug + Send + fmt::LowerHex; - /// Custom scoring update event type. - type Event: fmt::Debug; - - /// Decides on ordering of `T`s from a particular sender. - fn compare(&self, old: &T, other: &T) -> cmp::Ordering; - - /// Decides how to deal with two transactions from a sender that seem to occupy the same slot in the queue. - fn choose(&self, old: &T, new: &T) -> Choice; - - /// Updates the transaction scores given a list of transactions and a change to previous scoring. - /// NOTE: you can safely assume that both slices have the same length. - /// (i.e. score at index `i` represents transaction at the same index) - fn update_scores(&self, txs: &[Transaction], scores: &mut [Self::Score], change: Change); - - /// Decides if the transaction should ignore per-sender limit in the pool. - /// - /// If you return `true` for given transaction it's going to be accepted even though - /// the per-sender limit is exceeded. - fn should_ignore_sender_limit(&self, _new: &T) -> bool { - false - } -} - -/// A score with a reference to the transaction. -#[derive(Debug)] -pub struct ScoreWithRef { - /// Score - pub score: S, - /// Shared transaction - pub transaction: Transaction, -} - -impl ScoreWithRef { - /// Creates a new `ScoreWithRef` - pub fn new(score: S, transaction: Transaction) -> Self { - ScoreWithRef { score, transaction } - } -} - -impl Clone for ScoreWithRef { - fn clone(&self) -> Self { - ScoreWithRef { score: self.score.clone(), transaction: self.transaction.clone() } - } -} - -impl Ord for ScoreWithRef { - fn cmp(&self, other: &Self) -> cmp::Ordering { - other.score.cmp(&self.score).then(self.transaction.insertion_id.cmp(&other.transaction.insertion_id)) - } -} - -impl PartialOrd for ScoreWithRef { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialEq for ScoreWithRef { - fn eq(&self, other: &Self) -> bool { - self.score == other.score && self.transaction.insertion_id == other.transaction.insertion_id - } -} - -impl Eq for ScoreWithRef {} - -#[cfg(test)] -mod tests { - use super::*; - - fn score(score: u64, insertion_id: u64) -> ScoreWithRef<(), u64> { - ScoreWithRef { score, transaction: Transaction { insertion_id, transaction: Default::default() } } - } - - #[test] - fn scoring_comparison() { - // the higher the score the better - assert_eq!(score(10, 0).cmp(&score(0, 0)), cmp::Ordering::Less); - assert_eq!(score(0, 0).cmp(&score(10, 0)), cmp::Ordering::Greater); - - // equal is equal - assert_eq!(score(0, 0).cmp(&score(0, 0)), cmp::Ordering::Equal); - - // lower insertion id is better - assert_eq!(score(0, 0).cmp(&score(0, 10)), cmp::Ordering::Less); - assert_eq!(score(0, 10).cmp(&score(0, 0)), cmp::Ordering::Greater); - } -} diff --git a/transaction-pool/src/status.rs b/transaction-pool/src/status.rs deleted file mode 100644 index 615e40cb7..000000000 --- a/transaction-pool/src/status.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/// Light pool status. -/// This status is cheap to compute and can be called frequently. -#[derive(Default, Debug, Clone, PartialEq, Eq)] -pub struct LightStatus { - /// Memory usage in bytes. - pub mem_usage: usize, - /// Total number of transactions in the pool. - pub transaction_count: usize, - /// Number of unique senders in the pool. - pub senders: usize, -} - -/// A full queue status. -/// To compute this status it is required to provide `Ready`. -/// NOTE: To compute the status we need to visit each transaction in the pool. -#[derive(Default, Debug, Clone, PartialEq, Eq)] -pub struct Status { - /// Number of stalled transactions. - pub stalled: usize, - /// Number of pending (ready) transactions. - pub pending: usize, - /// Number of future (not ready) transactions. - pub future: usize, -} diff --git a/transaction-pool/src/tests/helpers.rs b/transaction-pool/src/tests/helpers.rs deleted file mode 100644 index 8f6e5fb99..000000000 --- a/transaction-pool/src/tests/helpers.rs +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::cmp; -use std::collections::HashMap; - -use super::Transaction; -use crate::{pool, scoring, Readiness, Ready, ReplaceTransaction, Scoring, ShouldReplace}; -use ethereum_types::{H160 as Sender, U256}; - -#[derive(Debug, Default)] -pub struct DummyScoring { - always_insert: bool, -} - -impl DummyScoring { - pub fn always_insert() -> Self { - DummyScoring { always_insert: true } - } -} - -impl Scoring for DummyScoring { - type Score = U256; - type Event = (); - - fn compare(&self, old: &Transaction, new: &Transaction) -> cmp::Ordering { - old.nonce.cmp(&new.nonce) - } - - fn choose(&self, old: &Transaction, new: &Transaction) -> scoring::Choice { - if old.nonce == new.nonce { - if new.gas_price > old.gas_price { - scoring::Choice::ReplaceOld - } else { - scoring::Choice::RejectNew - } - } else { - scoring::Choice::InsertNew - } - } - - fn update_scores( - &self, - txs: &[pool::Transaction], - scores: &mut [Self::Score], - change: scoring::Change, - ) { - if let scoring::Change::Event(_) = change { - // In case of event reset all scores to 0 - for i in 0..txs.len() { - scores[i] = 0.into(); - } - } else { - // Set to a gas price otherwise - for i in 0..txs.len() { - scores[i] = txs[i].gas_price; - } - } - } - - fn should_ignore_sender_limit(&self, _new: &Transaction) -> bool { - self.always_insert - } -} - -impl ShouldReplace for DummyScoring { - fn should_replace( - &self, - old: &ReplaceTransaction<'_, Transaction>, - new: &ReplaceTransaction<'_, Transaction>, - ) -> scoring::Choice { - if self.always_insert { - scoring::Choice::InsertNew - } else if new.gas_price > old.gas_price { - scoring::Choice::ReplaceOld - } else { - scoring::Choice::RejectNew - } - } -} - -#[derive(Default)] -pub struct NonceReady(HashMap, U256); - -impl NonceReady { - pub fn new>(min: T) -> Self { - let mut n = NonceReady::default(); - n.1 = min.into(); - n - } -} - -impl Ready for NonceReady { - fn is_ready(&mut self, tx: &Transaction) -> Readiness { - let min = self.1; - let nonce = self.0.entry(tx.sender).or_insert_with(|| min); - match tx.nonce.cmp(nonce) { - cmp::Ordering::Greater => Readiness::Future, - cmp::Ordering::Equal => { - *nonce += 1.into(); - Readiness::Ready - } - cmp::Ordering::Less => Readiness::Stale, - } - } -} diff --git a/transaction-pool/src/tests/mod.rs b/transaction-pool/src/tests/mod.rs deleted file mode 100644 index 2d80b4a3d..000000000 --- a/transaction-pool/src/tests/mod.rs +++ /dev/null @@ -1,670 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -mod helpers; -mod tx_builder; - -use self::helpers::{DummyScoring, NonceReady}; -use self::tx_builder::TransactionBuilder; - -use std::sync::Arc; - -use super::*; -use ethereum_types::{Address, H256, U256}; - -#[derive(Debug, PartialEq)] -pub struct Transaction { - pub hash: H256, - pub nonce: U256, - pub gas_price: U256, - pub gas: U256, - pub sender: Address, - pub mem_usage: usize, -} - -impl VerifiedTransaction for Transaction { - type Hash = H256; - type Sender = Address; - - fn hash(&self) -> &H256 { - &self.hash - } - fn mem_usage(&self) -> usize { - self.mem_usage - } - fn sender(&self) -> &Address { - &self.sender - } -} - -pub type SharedTransaction = Arc; - -type TestPool = Pool; - -impl TestPool { - pub fn with_limit(max_count: usize) -> Self { - Self::with_options(Options { max_count, ..Default::default() }) - } -} - -fn import, L: Listener>( - txq: &mut Pool, - tx: Transaction, -) -> Result, Error<::Hash>> { - txq.import(tx, &mut DummyScoring::default()) -} - -#[test] -fn should_clear_queue() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - assert_eq!(txq.light_status(), LightStatus { mem_usage: 0, transaction_count: 0, senders: 0 }); - let tx1 = b.tx().nonce(0).new(); - let tx2 = b.tx().nonce(1).mem_usage(1).new(); - - // add - import(&mut txq, tx1).unwrap(); - import(&mut txq, tx2).unwrap(); - assert_eq!(txq.light_status(), LightStatus { mem_usage: 1, transaction_count: 2, senders: 1 }); - - // when - txq.clear(); - - // then - assert_eq!(txq.light_status(), LightStatus { mem_usage: 0, transaction_count: 0, senders: 0 }); -} - -#[test] -fn should_not_allow_same_transaction_twice() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - let tx1 = b.tx().nonce(0).new(); - let tx2 = b.tx().nonce(0).new(); - - // when - import(&mut txq, tx1).unwrap(); - import(&mut txq, tx2).unwrap_err(); - - // then - assert_eq!(txq.light_status().transaction_count, 1); -} - -#[test] -fn should_replace_transaction() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - let tx1 = b.tx().nonce(0).gas_price(1).new(); - let tx2 = b.tx().nonce(0).gas_price(2).new(); - - // when - import(&mut txq, tx1).unwrap(); - import(&mut txq, tx2).unwrap(); - - // then - assert_eq!(txq.light_status().transaction_count, 1); -} - -#[test] -fn should_reject_if_above_count() { - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_options(Options { max_count: 1, ..Default::default() }); - - // Reject second - let tx1 = b.tx().nonce(0).new(); - let tx2 = b.tx().nonce(1).new(); - let hash = tx2.hash.clone(); - import(&mut txq, tx1).unwrap(); - assert_eq!(import(&mut txq, tx2).unwrap_err(), error::Error::TooCheapToEnter(hash, "0x0".into())); - assert_eq!(txq.light_status().transaction_count, 1); - - txq.clear(); - - // Replace first - let tx1 = b.tx().nonce(0).new(); - let tx2 = b.tx().nonce(0).sender(1).gas_price(2).new(); - import(&mut txq, tx1).unwrap(); - import(&mut txq, tx2).unwrap(); - assert_eq!(txq.light_status().transaction_count, 1); -} - -#[test] -fn should_reject_if_above_mem_usage() { - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_options(Options { max_mem_usage: 1, ..Default::default() }); - - // Reject second - let tx1 = b.tx().nonce(1).mem_usage(1).new(); - let tx2 = b.tx().nonce(2).mem_usage(2).new(); - let hash = tx2.hash.clone(); - import(&mut txq, tx1).unwrap(); - assert_eq!(import(&mut txq, tx2).unwrap_err(), error::Error::TooCheapToEnter(hash, "0x0".into())); - assert_eq!(txq.light_status().transaction_count, 1); - - txq.clear(); - - // Replace first - let tx1 = b.tx().nonce(1).mem_usage(1).new(); - let tx2 = b.tx().nonce(1).sender(1).gas_price(2).mem_usage(1).new(); - import(&mut txq, tx1).unwrap(); - import(&mut txq, tx2).unwrap(); - assert_eq!(txq.light_status().transaction_count, 1); -} - -#[test] -fn should_reject_if_above_sender_count() { - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_options(Options { max_per_sender: 1, ..Default::default() }); - - // Reject second - let tx1 = b.tx().nonce(1).new(); - let tx2 = b.tx().nonce(2).new(); - let hash = tx2.hash.clone(); - import(&mut txq, tx1).unwrap(); - assert_eq!(import(&mut txq, tx2).unwrap_err(), error::Error::TooCheapToEnter(hash, "0x0".into())); - assert_eq!(txq.light_status().transaction_count, 1); - - txq.clear(); - - // Replace first - let tx1 = b.tx().nonce(1).new(); - let tx2 = b.tx().nonce(2).gas_price(2).new(); - let hash = tx2.hash.clone(); - import(&mut txq, tx1).unwrap(); - // This results in error because we also compare nonces - assert_eq!(import(&mut txq, tx2).unwrap_err(), error::Error::TooCheapToEnter(hash, "0x0".into())); - assert_eq!(txq.light_status().transaction_count, 1); -} - -#[test] -fn should_construct_pending() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - let tx0 = import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - let tx1 = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap(); - - let tx9 = import(&mut txq, b.tx().sender(2).nonce(0).new()).unwrap(); - - let tx5 = import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - let tx6 = import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - let tx7 = import(&mut txq, b.tx().sender(1).nonce(2).new()).unwrap(); - let tx8 = import(&mut txq, b.tx().sender(1).nonce(3).gas_price(4).new()).unwrap(); - - let tx2 = import(&mut txq, b.tx().nonce(2).new()).unwrap(); - // this transaction doesn't get to the block despite high gas price - // because of block gas limit and simplistic ordering algorithm. - import(&mut txq, b.tx().nonce(3).gas_price(4).new()).unwrap(); - //gap - import(&mut txq, b.tx().nonce(5).new()).unwrap(); - - // gap - import(&mut txq, b.tx().sender(1).nonce(5).new()).unwrap(); - - assert_eq!(txq.light_status().transaction_count, 11); - assert_eq!(txq.status(NonceReady::default()), Status { stalled: 0, pending: 9, future: 2 }); - assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 3, pending: 6, future: 2 }); - - // when - let mut current_gas = U256::zero(); - let limit = (21_000 * 8).into(); - let mut pending = txq.pending(NonceReady::default()).take_while(|tx| { - let should_take = tx.gas + current_gas <= limit; - if should_take { - current_gas = current_gas + tx.gas - } - should_take - }); - - assert_eq!(pending.next(), Some(tx0)); - assert_eq!(pending.next(), Some(tx1)); - assert_eq!(pending.next(), Some(tx9)); - assert_eq!(pending.next(), Some(tx5)); - assert_eq!(pending.next(), Some(tx6)); - assert_eq!(pending.next(), Some(tx7)); - assert_eq!(pending.next(), Some(tx8)); - assert_eq!(pending.next(), Some(tx2)); - assert_eq!(pending.next(), None); -} - -#[test] -fn should_skip_staled_pending_transactions() { - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - let _tx0 = import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - let tx2 = import(&mut txq, b.tx().nonce(2).gas_price(5).new()).unwrap(); - let _tx1 = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap(); - - // tx0 and tx1 are Stale, tx2 is Ready - let mut pending = txq.pending(NonceReady::new(2)); - - // tx0 and tx1 should be skipped, tx2 should be the next Ready - assert_eq!(pending.next(), Some(tx2)); - assert_eq!(pending.next(), None); -} - -#[test] -fn should_return_unordered_iterator() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - let tx0 = import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - let tx1 = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap(); - let tx2 = import(&mut txq, b.tx().nonce(2).new()).unwrap(); - let tx3 = import(&mut txq, b.tx().nonce(3).gas_price(4).new()).unwrap(); - //gap - import(&mut txq, b.tx().nonce(5).new()).unwrap(); - - let tx5 = import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - let tx6 = import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - let tx7 = import(&mut txq, b.tx().sender(1).nonce(2).new()).unwrap(); - let tx8 = import(&mut txq, b.tx().sender(1).nonce(3).gas_price(4).new()).unwrap(); - // gap - import(&mut txq, b.tx().sender(1).nonce(5).new()).unwrap(); - - let tx9 = import(&mut txq, b.tx().sender(2).nonce(0).new()).unwrap(); - assert_eq!(txq.light_status().transaction_count, 11); - assert_eq!(txq.status(NonceReady::default()), Status { stalled: 0, pending: 9, future: 2 }); - assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 3, pending: 6, future: 2 }); - - // when - let all: Vec<_> = txq.unordered_pending(NonceReady::default()).collect(); - - let chain1 = vec![tx0, tx1, tx2, tx3]; - let chain2 = vec![tx5, tx6, tx7, tx8]; - let chain3 = vec![tx9]; - - assert_eq!(all.len(), chain1.len() + chain2.len() + chain3.len()); - - let mut options = vec![ - vec![chain1.clone(), chain2.clone(), chain3.clone()], - vec![chain2.clone(), chain1.clone(), chain3.clone()], - vec![chain2.clone(), chain3.clone(), chain1.clone()], - vec![chain3.clone(), chain2.clone(), chain1.clone()], - vec![chain3.clone(), chain1.clone(), chain2.clone()], - vec![chain1.clone(), chain3.clone(), chain2.clone()], - ] - .into_iter() - .map(|mut v| { - let mut first = v.pop().unwrap(); - for mut x in v { - first.append(&mut x); - } - first - }); - - assert!(options.any(|opt| all == opt)); -} - -#[test] -fn should_update_scoring_correctly() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - let tx9 = import(&mut txq, b.tx().sender(2).nonce(0).new()).unwrap(); - - let tx5 = import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - let tx6 = import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - let tx7 = import(&mut txq, b.tx().sender(1).nonce(2).new()).unwrap(); - let tx8 = import(&mut txq, b.tx().sender(1).nonce(3).gas_price(4).new()).unwrap(); - - let tx0 = import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - let tx1 = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap(); - let tx2 = import(&mut txq, b.tx().nonce(2).new()).unwrap(); - // this transaction doesn't get to the block despite high gas price - // because of block gas limit and simplistic ordering algorithm. - import(&mut txq, b.tx().nonce(3).gas_price(4).new()).unwrap(); - //gap - import(&mut txq, b.tx().nonce(5).new()).unwrap(); - - // gap - import(&mut txq, b.tx().sender(1).nonce(5).new()).unwrap(); - - assert_eq!(txq.light_status().transaction_count, 11); - assert_eq!(txq.status(NonceReady::default()), Status { stalled: 0, pending: 9, future: 2 }); - assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 3, pending: 6, future: 2 }); - - txq.update_scores(&Address::zero(), ()); - - // when - let mut current_gas = U256::zero(); - let limit = (21_000 * 8).into(); - let mut pending = txq.pending(NonceReady::default()).take_while(|tx| { - let should_take = tx.gas + current_gas <= limit; - if should_take { - current_gas = current_gas + tx.gas - } - should_take - }); - - assert_eq!(pending.next(), Some(tx9)); - assert_eq!(pending.next(), Some(tx5)); - assert_eq!(pending.next(), Some(tx6)); - assert_eq!(pending.next(), Some(tx7)); - assert_eq!(pending.next(), Some(tx8)); - // penalized transactions - assert_eq!(pending.next(), Some(tx0)); - assert_eq!(pending.next(), Some(tx1)); - assert_eq!(pending.next(), Some(tx2)); - assert_eq!(pending.next(), None); -} - -#[test] -fn should_remove_transaction() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - let tx1 = import(&mut txq, b.tx().nonce(0).new()).unwrap(); - let tx2 = import(&mut txq, b.tx().nonce(1).new()).unwrap(); - import(&mut txq, b.tx().nonce(2).new()).unwrap(); - assert_eq!(txq.light_status().transaction_count, 3); - - // when - assert!(txq.remove(&tx2.hash(), false).is_some()); - - // then - assert_eq!(txq.light_status().transaction_count, 2); - let mut pending = txq.pending(NonceReady::default()); - assert_eq!(pending.next(), Some(tx1)); - assert_eq!(pending.next(), None); -} - -#[test] -fn should_cull_stalled_transactions() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - import(&mut txq, b.tx().nonce(3).new()).unwrap(); - - import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(5).new()).unwrap(); - - assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 2, pending: 2, future: 2 }); - - // when - assert_eq!(txq.cull(None, NonceReady::new(1)), 2); - - // then - assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 0, pending: 2, future: 2 }); - assert_eq!(txq.light_status(), LightStatus { transaction_count: 4, senders: 2, mem_usage: 0 }); -} - -#[test] -fn should_cull_stalled_transactions_from_a_sender() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - - import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(2).new()).unwrap(); - - assert_eq!(txq.status(NonceReady::new(2)), Status { stalled: 4, pending: 1, future: 0 }); - - // when - let sender = Address::zero(); - assert_eq!(txq.cull(Some(&[sender]), NonceReady::new(2)), 2); - - // then - assert_eq!(txq.status(NonceReady::new(2)), Status { stalled: 2, pending: 1, future: 0 }); - assert_eq!(txq.light_status(), LightStatus { transaction_count: 3, senders: 1, mem_usage: 0 }); -} - -#[test] -fn should_re_insert_after_cull() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(1).new()).unwrap(); - assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 2, pending: 2, future: 0 }); - - // when - assert_eq!(txq.cull(None, NonceReady::new(1)), 2); - assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 0, pending: 2, future: 0 }); - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(0).new()).unwrap(); - - assert_eq!(txq.status(NonceReady::new(1)), Status { stalled: 2, pending: 2, future: 0 }); -} - -#[test] -fn should_return_worst_transaction() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::default(); - assert!(txq.worst_transaction().is_none()); - - // when - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - import(&mut txq, b.tx().sender(1).nonce(0).gas_price(4).new()).unwrap(); - - // then - assert_eq!(txq.worst_transaction().unwrap().gas_price, 4.into()); -} - -#[test] -fn should_return_is_full() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_limit(2); - assert!(!txq.is_full()); - - // when - import(&mut txq, b.tx().nonce(0).gas_price(110).new()).unwrap(); - assert!(!txq.is_full()); - - import(&mut txq, b.tx().sender(1).nonce(0).gas_price(100).new()).unwrap(); - - // then - assert!(txq.is_full()); -} - -#[test] -fn should_import_even_if_limit_is_reached_and_should_replace_returns_insert_new() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_scoring(DummyScoring::always_insert(), Options { max_count: 1, ..Default::default() }); - txq.import(b.tx().nonce(0).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); - assert_eq!(txq.light_status(), LightStatus { transaction_count: 1, senders: 1, mem_usage: 0 }); - - // when - txq.import(b.tx().nonce(1).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); - - // then - assert_eq!(txq.light_status(), LightStatus { transaction_count: 2, senders: 1, mem_usage: 0 }); -} - -#[test] -fn should_not_import_even_if_limit_is_reached_and_should_replace_returns_false() { - use std::str::FromStr; - - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_scoring(DummyScoring::default(), Options { max_count: 1, ..Default::default() }); - import(&mut txq, b.tx().nonce(0).gas_price(5).new()).unwrap(); - assert_eq!(txq.light_status(), LightStatus { transaction_count: 1, senders: 1, mem_usage: 0 }); - - // when - let err = import(&mut txq, b.tx().nonce(1).gas_price(5).new()).unwrap_err(); - - // then - assert_eq!( - err, - error::Error::TooCheapToEnter( - H256::from_str("00000000000000000000000000000000000000000000000000000000000001f5").unwrap(), - "0x5".into() - ) - ); - assert_eq!(txq.light_status(), LightStatus { transaction_count: 1, senders: 1, mem_usage: 0 }); -} - -#[test] -fn should_import_even_if_sender_limit_is_reached() { - // given - let b = TransactionBuilder::default(); - let mut txq = TestPool::with_scoring( - DummyScoring::always_insert(), - Options { max_count: 1, max_per_sender: 1, ..Default::default() }, - ); - txq.import(b.tx().nonce(0).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); - assert_eq!(txq.light_status(), LightStatus { transaction_count: 1, senders: 1, mem_usage: 0 }); - - // when - txq.import(b.tx().nonce(1).gas_price(5).new(), &mut DummyScoring::always_insert()).unwrap(); - - // then - assert_eq!(txq.light_status(), LightStatus { transaction_count: 2, senders: 1, mem_usage: 0 }); -} - -mod listener { - use std::cell::RefCell; - use std::fmt; - use std::rc::Rc; - - use super::*; - - #[derive(Default)] - struct MyListener(pub Rc>>); - - impl Listener for MyListener { - fn added(&mut self, _tx: &SharedTransaction, old: Option<&SharedTransaction>) { - self.0.borrow_mut().push(if old.is_some() { "replaced" } else { "added" }); - } - - fn rejected(&mut self, _tx: &SharedTransaction, _reason: &error::Error) { - self.0.borrow_mut().push("rejected".into()); - } - - fn dropped(&mut self, _tx: &SharedTransaction, _new: Option<&Transaction>) { - self.0.borrow_mut().push("dropped".into()); - } - - fn invalid(&mut self, _tx: &SharedTransaction) { - self.0.borrow_mut().push("invalid".into()); - } - - fn canceled(&mut self, _tx: &SharedTransaction) { - self.0.borrow_mut().push("canceled".into()); - } - - fn culled(&mut self, _tx: &SharedTransaction) { - self.0.borrow_mut().push("culled".into()); - } - } - - #[test] - fn insert_transaction() { - let b = TransactionBuilder::default(); - let listener = MyListener::default(); - let results = listener.0.clone(); - let mut txq = Pool::new( - listener, - DummyScoring::default(), - Options { max_per_sender: 1, max_count: 2, ..Default::default() }, - ); - assert!(results.borrow().is_empty()); - - // Regular import - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - assert_eq!(*results.borrow(), &["added"]); - // Already present (no notification) - import(&mut txq, b.tx().nonce(1).new()).unwrap_err(); - assert_eq!(*results.borrow(), &["added"]); - // Push out the first one - import(&mut txq, b.tx().nonce(1).gas_price(1).new()).unwrap(); - assert_eq!(*results.borrow(), &["added", "replaced"]); - // Reject - import(&mut txq, b.tx().nonce(1).new()).unwrap_err(); - assert_eq!(*results.borrow(), &["added", "replaced", "rejected"]); - results.borrow_mut().clear(); - // Different sender (accept) - import(&mut txq, b.tx().sender(1).nonce(1).gas_price(2).new()).unwrap(); - assert_eq!(*results.borrow(), &["added"]); - // Third sender push out low gas price - import(&mut txq, b.tx().sender(2).nonce(1).gas_price(4).new()).unwrap(); - assert_eq!(*results.borrow(), &["added", "dropped", "added"]); - // Reject (too cheap) - import(&mut txq, b.tx().sender(2).nonce(1).gas_price(2).new()).unwrap_err(); - assert_eq!(*results.borrow(), &["added", "dropped", "added", "rejected"]); - - assert_eq!(txq.light_status().transaction_count, 2); - } - - #[test] - fn remove_transaction() { - let b = TransactionBuilder::default(); - let listener = MyListener::default(); - let results = listener.0.clone(); - let mut txq = Pool::new(listener, DummyScoring::default(), Options::default()); - - // insert - let tx1 = import(&mut txq, b.tx().nonce(1).new()).unwrap(); - let tx2 = import(&mut txq, b.tx().nonce(2).new()).unwrap(); - - // then - txq.remove(&tx1.hash(), false); - assert_eq!(*results.borrow(), &["added", "added", "canceled"]); - txq.remove(&tx2.hash(), true); - assert_eq!(*results.borrow(), &["added", "added", "canceled", "invalid"]); - assert_eq!(txq.light_status().transaction_count, 0); - } - - #[test] - fn clear_queue() { - let b = TransactionBuilder::default(); - let listener = MyListener::default(); - let results = listener.0.clone(); - let mut txq = Pool::new(listener, DummyScoring::default(), Options::default()); - - // insert - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - import(&mut txq, b.tx().nonce(2).new()).unwrap(); - - // when - txq.clear(); - - // then - assert_eq!(*results.borrow(), &["added", "added", "dropped", "dropped"]); - } - - #[test] - fn cull_stalled() { - let b = TransactionBuilder::default(); - let listener = MyListener::default(); - let results = listener.0.clone(); - let mut txq = Pool::new(listener, DummyScoring::default(), Options::default()); - - // insert - import(&mut txq, b.tx().nonce(1).new()).unwrap(); - import(&mut txq, b.tx().nonce(2).new()).unwrap(); - - // when - txq.cull(None, NonceReady::new(3)); - - // then - assert_eq!(*results.borrow(), &["added", "added", "culled", "culled"]); - } -} diff --git a/transaction-pool/src/tests/tx_builder.rs b/transaction-pool/src/tests/tx_builder.rs deleted file mode 100644 index d543e830e..000000000 --- a/transaction-pool/src/tests/tx_builder.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::{Address, Transaction, H256, U256}; -use ethereum_types::BigEndianHash; - -#[derive(Debug, Default, Clone)] -pub struct TransactionBuilder { - nonce: U256, - gas_price: U256, - gas: U256, - sender: Address, - mem_usage: usize, -} - -impl TransactionBuilder { - pub fn tx(&self) -> Self { - self.clone() - } - - pub fn nonce(mut self, nonce: usize) -> Self { - self.nonce = U256::from(nonce); - self - } - - pub fn gas_price(mut self, gas_price: usize) -> Self { - self.gas_price = U256::from(gas_price); - self - } - - pub fn sender(mut self, sender: u64) -> Self { - self.sender = Address::from_low_u64_be(sender); - self - } - - pub fn mem_usage(mut self, mem_usage: usize) -> Self { - self.mem_usage = mem_usage; - self - } - - pub fn new(self) -> Transaction { - let hash: U256 = self.nonce - ^ (U256::from(100) * self.gas_price) - ^ (U256::from(100_000) * U256::from(self.sender.to_low_u64_be())); - Transaction { - hash: H256::from_uint(&hash), - nonce: self.nonce, - gas_price: self.gas_price, - gas: 21_000.into(), - sender: self.sender, - mem_usage: self.mem_usage, - } - } -} diff --git a/transaction-pool/src/transactions.rs b/transaction-pool/src/transactions.rs deleted file mode 100644 index 4d6d126af..000000000 --- a/transaction-pool/src/transactions.rs +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::{fmt, mem}; - -use log::warn; -use smallvec::SmallVec; - -use crate::{ - pool::Transaction, - ready::{Readiness, Ready}, - scoring::{self, Scoring}, -}; - -#[derive(Debug)] -pub enum AddResult { - Ok(T), - TooCheapToEnter(T, S), - TooCheap { old: T, new: T }, - Replaced { old: T, new: T }, - PushedOut { old: T, new: T }, -} - -/// Represents all transactions from a particular sender ordered by nonce. -const PER_SENDER: usize = 8; -#[derive(Debug)] -pub struct Transactions> { - // TODO [ToDr] Consider using something that doesn't require shifting all records. - transactions: SmallVec<[Transaction; PER_SENDER]>, - scores: SmallVec<[S::Score; PER_SENDER]>, -} - -impl> Default for Transactions { - fn default() -> Self { - Transactions { transactions: Default::default(), scores: Default::default() } - } -} - -impl> Transactions { - pub fn is_empty(&self) -> bool { - self.transactions.is_empty() - } - - pub fn len(&self) -> usize { - self.transactions.len() - } - - pub fn iter(&self) -> ::std::slice::Iter<'_, Transaction> { - self.transactions.iter() - } - - pub fn worst_and_best(&self) -> Option<((S::Score, Transaction), (S::Score, Transaction))> { - let len = self.scores.len(); - self.scores.get(0).cloned().map(|best| { - let worst = self.scores[len - 1].clone(); - let best_tx = self.transactions[0].clone(); - let worst_tx = self.transactions[len - 1].clone(); - - ((worst, worst_tx), (best, best_tx)) - }) - } - - pub fn find_next(&self, tx: &T, scoring: &S) -> Option<(S::Score, Transaction)> { - self.transactions.binary_search_by(|old| scoring.compare(old, &tx)).ok().and_then(|index| { - let index = index + 1; - if index < self.scores.len() { - Some((self.scores[index].clone(), self.transactions[index].clone())) - } else { - None - } - }) - } - - fn push_cheapest_transaction( - &mut self, - tx: Transaction, - scoring: &S, - max_count: usize, - ) -> AddResult, S::Score> { - let index = self.transactions.len(); - if index == max_count && !scoring.should_ignore_sender_limit(&tx) { - let min_score = self.scores[index - 1].clone(); - AddResult::TooCheapToEnter(tx, min_score) - } else { - self.transactions.push(tx.clone()); - self.scores.push(Default::default()); - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::InsertedAt(index)); - - AddResult::Ok(tx) - } - } - - pub fn update_scores(&mut self, scoring: &S, event: S::Event) { - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::Event(event)); - } - - pub fn add(&mut self, new: Transaction, scoring: &S, max_count: usize) -> AddResult, S::Score> { - let index = match self.transactions.binary_search_by(|old| scoring.compare(old, &new)) { - Ok(index) => index, - Err(index) => index, - }; - - // Insert at the end. - if index == self.transactions.len() { - return self.push_cheapest_transaction(new, scoring, max_count); - } - - // Decide if the transaction should replace some other. - match scoring.choose(&self.transactions[index], &new) { - // New transaction should be rejected - scoring::Choice::RejectNew => AddResult::TooCheap { old: self.transactions[index].clone(), new }, - // New transaction should be kept along with old ones. - scoring::Choice::InsertNew => { - self.transactions.insert(index, new.clone()); - self.scores.insert(index, Default::default()); - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::InsertedAt(index)); - - if self.transactions.len() > max_count { - let old = self.transactions.pop().expect("len is non-zero"); - self.scores.pop(); - scoring.update_scores( - &self.transactions, - &mut self.scores, - scoring::Change::RemovedAt(self.transactions.len()), - ); - - AddResult::PushedOut { old, new } - } else { - AddResult::Ok(new) - } - } - // New transaction is replacing some other transaction already in the queue. - scoring::Choice::ReplaceOld => { - let old = mem::replace(&mut self.transactions[index], new.clone()); - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::ReplacedAt(index)); - - AddResult::Replaced { old, new } - } - } - } - - pub fn remove(&mut self, tx: &T, scoring: &S) -> bool { - let index = match self.transactions.binary_search_by(|old| scoring.compare(old, tx)) { - Ok(index) => index, - Err(_) => { - warn!("Attempting to remove non-existent transaction {:?}", tx); - return false; - } - }; - - self.transactions.remove(index); - self.scores.remove(index); - // Update scoring - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::RemovedAt(index)); - return true; - } - - pub fn cull>(&mut self, ready: &mut R, scoring: &S) -> SmallVec<[Transaction; PER_SENDER]> { - let mut result = SmallVec::new(); - if self.is_empty() { - return result; - } - - let mut first_non_stalled = 0; - for tx in &self.transactions { - match ready.is_ready(tx) { - Readiness::Stale => { - first_non_stalled += 1; - } - Readiness::Ready | Readiness::Future => break, - } - } - - if first_non_stalled == 0 { - return result; - } - - // reverse the vectors to easily remove first elements. - self.transactions.reverse(); - self.scores.reverse(); - - for _ in 0..first_non_stalled { - self.scores.pop(); - result.push( - self.transactions.pop().expect("first_non_stalled is never greater than transactions.len(); qed"), - ); - } - - self.transactions.reverse(); - self.scores.reverse(); - - // update scoring - scoring.update_scores(&self.transactions, &mut self.scores, scoring::Change::Culled(result.len())); - - // reverse the result to maintain correct order. - result.reverse(); - result - } -} diff --git a/transaction-pool/src/verifier.rs b/transaction-pool/src/verifier.rs deleted file mode 100644 index d28e5a55e..000000000 --- a/transaction-pool/src/verifier.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use crate::VerifiedTransaction; - -/// Transaction verification. -/// -/// Verifier is responsible to decide if the transaction should even be considered for pool inclusion. -pub trait Verifier { - /// Verification error. - type Error; - - /// Verified transaction. - type VerifiedTransaction: VerifiedTransaction; - - /// Verifies a `UnverifiedTransaction` and produces `VerifiedTransaction` instance. - fn verify_transaction(&self, tx: U) -> Result; -} diff --git a/triehash/CHANGELOG.md b/triehash/CHANGELOG.md deleted file mode 100644 index 2f7d72d64..000000000 --- a/triehash/CHANGELOG.md +++ /dev/null @@ -1,18 +0,0 @@ -# Changelog - -The format is based on [Keep a Changelog]. - -[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ - -## [Unreleased] - -## [0.8.4] - 2020-01-08 -- Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) -## [0.8.3] - 2020-03-16 -- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) -## [0.8.2] - 2019-12-15 -- Added no-std support. [#280](https://github.com/paritytech/parity-common/pull/280) -## [0.8.1] - 2019-10-24 -- Migrated to 2018 edition. [#214](https://github.com/paritytech/parity-common/pull/214) -### Dependencies -- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/triehash/Cargo.toml b/triehash/Cargo.toml deleted file mode 100644 index 440c07ab8..000000000 --- a/triehash/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "triehash" -version = "0.8.4" -authors = ["Parity Technologies "] -description = "In-memory patricia trie operations" -repository = "https://github.com/paritytech/parity-common" -license = "MIT OR Apache-2.0" -edition = "2018" - -[dependencies] -hash-db = { version = "0.15.2", default-features = false } -rlp = { version = "0.5", path = "../rlp", default-features = false } - -[dev-dependencies] -criterion = "0.3.0" -keccak-hasher = "0.15.2" -ethereum-types = { version = "0.12.0", path = "../ethereum-types" } -tiny-keccak = { version = "2.0", features = ["keccak"] } -trie-standardmap = "0.15.2" -hex-literal = "0.3.1" - -[features] -default = ["std"] -std = [ - "hash-db/std", - "rlp/std", -] - -[[bench]] -name = "triehash" -path = "benches/triehash.rs" -harness = false diff --git a/triehash/README.md b/triehash/README.md deleted file mode 100644 index 99c5ce459..000000000 --- a/triehash/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This crate provides utility functions to validate and initialize tries using flexible input. -It is used extensively in `parity-ethereum` to validate blocks (mostly transactions and receipt roots). \ No newline at end of file diff --git a/triehash/benches/triehash.rs b/triehash/benches/triehash.rs deleted file mode 100644 index 14ce0dd6b..000000000 --- a/triehash/benches/triehash.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use criterion::{criterion_group, criterion_main, Criterion}; -use ethereum_types::H256; -use keccak_hasher::KeccakHasher; -use tiny_keccak::{Hasher, Keccak}; -use trie_standardmap::{Alphabet, StandardMap, ValueMode}; -use triehash::trie_root; - -fn keccak256(input: &[u8]) -> [u8; 32] { - let mut keccak256 = Keccak::v256(); - let mut out = [0u8; 32]; - keccak256.update(input); - keccak256.finalize(&mut out); - out -} - -fn random_word(alphabet: &[u8], min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { - assert!(min_count + diff_count <= 32); - *seed = H256(keccak256(seed.as_bytes())); - let r = min_count + (seed[31] as usize % (diff_count + 1)); - let mut ret: Vec = Vec::with_capacity(r); - for i in 0..r { - ret.push(alphabet[seed[i] as usize % alphabet.len()]); - } - ret -} - -fn random_bytes(min_count: usize, diff_count: usize, seed: &mut H256) -> Vec { - assert!(min_count + diff_count <= 32); - *seed = H256(keccak256(seed.as_bytes())); - let r = min_count + (seed[31] as usize % (diff_count + 1)); - seed[0..r].to_vec() -} - -fn random_value(seed: &mut H256) -> Vec { - *seed = H256(keccak256(seed.as_bytes())); - match seed[0] % 2 { - 1 => vec![seed[31]; 1], - _ => seed.as_bytes().to_vec(), - } -} - -fn bench_insertions(c: &mut Criterion) { - c.bench_function("32_mir_1k", |b| { - let st = StandardMap { - alphabet: Alphabet::All, - min_key: 32, - journal_key: 0, - value_mode: ValueMode::Mirror, - count: 1000, - }; - let d = st.make(); - b.iter(|| trie_root::(d.clone())); - }); - - c.bench_function("32_ran_1k", |b| { - let st = StandardMap { - alphabet: Alphabet::All, - min_key: 32, - journal_key: 0, - value_mode: ValueMode::Random, - count: 1000, - }; - let d = st.make(); - b.iter(|| trie_root::(d.clone())); - }); - - c.bench_function("six_high", |b| { - let mut d: Vec<(Vec, Vec)> = Vec::new(); - let mut seed = H256::default(); - for _ in 0..1000 { - let k = random_bytes(6, 0, &mut seed); - let v = random_value(&mut seed); - d.push((k, v)) - } - b.iter(|| trie_root::(d.clone())); - }); - - c.bench_function("six_mid", |b| { - let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_"; - let mut d: Vec<(Vec, Vec)> = Vec::new(); - let mut seed = H256::default(); - for _ in 0..1000 { - let k = random_word(alphabet, 6, 0, &mut seed); - let v = random_value(&mut seed); - d.push((k, v)) - } - b.iter(|| trie_root::(d.clone())); - }); - - c.bench_function("random_mid", |b| { - let alphabet = b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_"; - let mut d: Vec<(Vec, Vec)> = Vec::new(); - let mut seed = H256::default(); - for _ in 0..1000 { - let k = random_word(alphabet, 1, 5, &mut seed); - let v = random_value(&mut seed); - d.push((k, v)) - } - b.iter(|| trie_root::(d.clone())); - }); - - c.bench_function("six_low", |b| { - let alphabet = b"abcdef"; - let mut d: Vec<(Vec, Vec)> = Vec::new(); - let mut seed = H256::default(); - for _ in 0..1000 { - let k = random_word(alphabet, 6, 0, &mut seed); - let v = random_value(&mut seed); - d.push((k, v)) - } - b.iter(|| trie_root::(d.clone())); - }); -} - -criterion_group!(benches, bench_insertions); -criterion_main!(benches); diff --git a/triehash/src/lib.rs b/triehash/src/lib.rs deleted file mode 100644 index 63f93a5f6..000000000 --- a/triehash/src/lib.rs +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Generetes trie root. -//! -//! This module should be used to generate trie root hash. - -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(not(feature = "std"))] -extern crate alloc; - -#[cfg(feature = "std")] -mod rstd { - pub use std::collections::BTreeMap; -} - -#[cfg(not(feature = "std"))] -mod rstd { - pub use alloc::collections::BTreeMap; - pub use alloc::vec::Vec; -} - -use core::cmp; -use core::iter::once; -use rstd::*; - -use hash_db::Hasher; -use rlp::RlpStream; - -fn shared_prefix_len(first: &[T], second: &[T]) -> usize { - first.iter().zip(second.iter()).position(|(f, s)| f != s).unwrap_or_else(|| cmp::min(first.len(), second.len())) -} - -/// Generates a trie root hash for a vector of values -/// -/// ``` -/// use hex_literal::hex; -/// use ethereum_types::H256; -/// use triehash::ordered_trie_root; -/// use keccak_hasher::KeccakHasher; -/// -/// let v = &["doe", "reindeer"]; -/// let root = H256::from(hex!("e766d5d51b89dc39d981b41bda63248d7abce4f0225eefd023792a540bcffee3")); -/// assert_eq!(ordered_trie_root::(v), root.as_ref()); -/// ``` -pub fn ordered_trie_root(input: I) -> H::Out -where - I: IntoIterator, - I::Item: AsRef<[u8]>, - H: Hasher, - ::Out: cmp::Ord, -{ - trie_root::(input.into_iter().enumerate().map(|(i, v)| (rlp::encode(&i), v))) -} - -/// Generates a trie root hash for a vector of key-value tuples -/// -/// ``` -/// use hex_literal::hex; -/// use triehash::trie_root; -/// use ethereum_types::H256; -/// use keccak_hasher::KeccakHasher; -/// -/// let v = vec![ -/// ("doe", "reindeer"), -/// ("dog", "puppy"), -/// ("dogglesworth", "cat"), -/// ]; -/// -/// let root = H256::from(hex!("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3")); -/// assert_eq!(trie_root::(v), root.as_ref()); -/// ``` -pub fn trie_root(input: I) -> H::Out -where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - H: Hasher, - ::Out: cmp::Ord, -{ - // first put elements into btree to sort them and to remove duplicates - let input = input.into_iter().collect::>(); - - let mut nibbles = Vec::with_capacity(input.keys().map(|k| k.as_ref().len()).sum::() * 2); - let mut lens = Vec::with_capacity(input.len() + 1); - lens.push(0); - for k in input.keys() { - for &b in k.as_ref() { - nibbles.push(b >> 4); - nibbles.push(b & 0x0F); - } - lens.push(nibbles.len()); - } - - // then move them to a vector - let input = input.into_iter().zip(lens.windows(2)).map(|((_, v), w)| (&nibbles[w[0]..w[1]], v)).collect::>(); - - let mut stream = RlpStream::new(); - hash256rlp::(&input, 0, &mut stream); - H::hash(&stream.out()) -} - -/// Generates a key-hashed (secure) trie root hash for a vector of key-value tuples. -/// -/// ``` -/// use hex_literal::hex; -/// use ethereum_types::H256; -/// use triehash::sec_trie_root; -/// use keccak_hasher::KeccakHasher; -/// -/// let v = vec![ -/// ("doe", "reindeer"), -/// ("dog", "puppy"), -/// ("dogglesworth", "cat"), -/// ]; -/// -/// let root = H256::from(hex!("d4cd937e4a4368d7931a9cf51686b7e10abb3dce38a39000fd7902a092b64585")); -/// assert_eq!(sec_trie_root::(v), root.as_ref()); -/// ``` -pub fn sec_trie_root(input: I) -> H::Out -where - I: IntoIterator, - A: AsRef<[u8]>, - B: AsRef<[u8]>, - H: Hasher, - ::Out: cmp::Ord, -{ - trie_root::(input.into_iter().map(|(k, v)| (H::hash(k.as_ref()), v))) -} - -/// Hex-prefix Notation. First nibble has flags: oddness = 2^0 & termination = 2^1. -/// -/// The "termination marker" and "leaf-node" specifier are completely equivalent. -/// -/// Input values are in range `[0, 0xf]`. -/// -/// ```markdown -/// [0,0,1,2,3,4,5] 0x10012345 // 7 > 4 -/// [0,1,2,3,4,5] 0x00012345 // 6 > 4 -/// [1,2,3,4,5] 0x112345 // 5 > 3 -/// [0,0,1,2,3,4] 0x00001234 // 6 > 3 -/// [0,1,2,3,4] 0x101234 // 5 > 3 -/// [1,2,3,4] 0x001234 // 4 > 3 -/// [0,0,1,2,3,4,5,T] 0x30012345 // 7 > 4 -/// [0,0,1,2,3,4,T] 0x20001234 // 6 > 4 -/// [0,1,2,3,4,5,T] 0x20012345 // 6 > 4 -/// [1,2,3,4,5,T] 0x312345 // 5 > 3 -/// [1,2,3,4,T] 0x201234 // 4 > 3 -/// ``` -fn hex_prefix_encode<'a>(nibbles: &'a [u8], leaf: bool) -> impl Iterator + 'a { - let inlen = nibbles.len(); - let oddness_factor = inlen % 2; - - let first_byte = { - let mut bits = ((inlen as u8 & 1) + (2 * leaf as u8)) << 4; - if oddness_factor == 1 { - bits += nibbles[0]; - } - bits - }; - once(first_byte).chain(nibbles[oddness_factor..].chunks(2).map(|ch| ch[0] << 4 | ch[1])) -} - -fn hash256rlp(input: &[(A, B)], pre_len: usize, stream: &mut RlpStream) -where - A: AsRef<[u8]>, - B: AsRef<[u8]>, - H: Hasher, -{ - let inlen = input.len(); - - // in case of empty slice, just append empty data - if inlen == 0 { - stream.append_empty_data(); - return; - } - - // take slices - let key: &[u8] = &input[0].0.as_ref(); - let value: &[u8] = &input[0].1.as_ref(); - - // if the slice contains just one item, append the suffix of the key - // and then append value - if inlen == 1 { - stream.begin_list(2); - stream.append_iter(hex_prefix_encode(&key[pre_len..], true)); - stream.append(&value); - return; - } - - // get length of the longest shared prefix in slice keys - let shared_prefix = input - .iter() - // skip first tuple - .skip(1) - // get minimum number of shared nibbles between first and each successive - .fold(key.len(), |acc, &(ref k, _)| cmp::min(shared_prefix_len(key, k.as_ref()), acc)); - - // if shared prefix is higher than current prefix append its - // new part of the key to the stream - // then recursively append suffixes of all items who had this key - if shared_prefix > pre_len { - stream.begin_list(2); - stream.append_iter(hex_prefix_encode(&key[pre_len..shared_prefix], false)); - hash256aux::(input, shared_prefix, stream); - return; - } - - // an item for every possible nibble/suffix - // + 1 for data - stream.begin_list(17); - - // if first key len is equal to prefix_len, move to next element - let mut begin = if pre_len == key.len() { 1 } else { 0 }; - - // iterate over all possible nibbles - for i in 0..16 { - // count how many successive elements have same next nibble - let len = input.iter().skip(begin).take_while(|pair| pair.0.as_ref()[pre_len] == i).count(); - - // if at least 1 successive element has the same nibble - // append their suffixes - match len { - 0 => { - stream.append_empty_data(); - } - _ => hash256aux::(&input[begin..(begin + len)], pre_len + 1, stream), - } - begin += len; - } - - // if fist key len is equal prefix, append its value - if pre_len == key.len() { - stream.append(&value); - } else { - stream.append_empty_data(); - } -} - -fn hash256aux(input: &[(A, B)], pre_len: usize, stream: &mut RlpStream) -where - A: AsRef<[u8]>, - B: AsRef<[u8]>, - H: Hasher, -{ - let mut s = RlpStream::new(); - hash256rlp::(input, pre_len, &mut s); - let out = s.out(); - match out.len() { - 0..=31 => stream.append_raw(&out, 1), - _ => stream.append(&H::hash(&out).as_ref()), - }; -} - -#[cfg(test)] -mod tests { - use super::*; - #[cfg(not(feature = "std"))] - use alloc::vec; - use ethereum_types::H256; - use hex_literal::hex; - use keccak_hasher::KeccakHasher; - - #[test] - fn test_hex_prefix_encode() { - let v = vec![0, 0, 1, 2, 3, 4, 5]; - let e = vec![0x10, 0x01, 0x23, 0x45]; - let h = hex_prefix_encode(&v, false).collect::>(); - assert_eq!(h, e); - - let v = vec![0, 1, 2, 3, 4, 5]; - let e = vec![0x00, 0x01, 0x23, 0x45]; - let h = hex_prefix_encode(&v, false).collect::>(); - assert_eq!(h, e); - - let v = vec![0, 1, 2, 3, 4, 5]; - let e = vec![0x20, 0x01, 0x23, 0x45]; - let h = hex_prefix_encode(&v, true).collect::>(); - assert_eq!(h, e); - - let v = vec![1, 2, 3, 4, 5]; - let e = vec![0x31, 0x23, 0x45]; - let h = hex_prefix_encode(&v, true).collect::>(); - assert_eq!(h, e); - - let v = vec![1, 2, 3, 4]; - let e = vec![0x00, 0x12, 0x34]; - let h = hex_prefix_encode(&v, false).collect::>(); - assert_eq!(h, e); - - let v = vec![4, 1]; - let e = vec![0x20, 0x41]; - let h = hex_prefix_encode(&v, true).collect::>(); - assert_eq!(h, e); - } - - #[test] - fn simple_test() { - assert_eq!( - trie_root::(vec![( - b"A", - b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" as &[u8] - )]), - H256::from(hex!("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")).as_ref(), - ); - } - - #[test] - fn test_triehash_out_of_order() { - assert_eq!( - trie_root::(vec![ - (vec![0x01u8, 0x23], vec![0x01u8, 0x23]), - (vec![0x81u8, 0x23], vec![0x81u8, 0x23]), - (vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]), - ]), - trie_root::(vec![ - (vec![0x01u8, 0x23], vec![0x01u8, 0x23]), - (vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]), // last two tuples are swapped - (vec![0x81u8, 0x23], vec![0x81u8, 0x23]), - ]), - ); - } - - #[test] - fn test_shared_prefix() { - let a = vec![1, 2, 3, 4, 5, 6]; - let b = vec![4, 2, 3, 4, 5, 6]; - assert_eq!(shared_prefix_len(&a, &b), 0); - } - - #[test] - fn test_shared_prefix2() { - let a = vec![1, 2, 3, 3, 5]; - let b = vec![1, 2, 3]; - assert_eq!(shared_prefix_len(&a, &b), 3); - } - - #[test] - fn test_shared_prefix3() { - let a = vec![1, 2, 3, 4, 5, 6]; - let b = vec![1, 2, 3, 4, 5, 6]; - assert_eq!(shared_prefix_len(&a, &b), 6); - } -} From a05ef25d8d7bfe636f06fdc60fadf2a9c221fb98 Mon Sep 17 00:00:00 2001 From: Igor Matuszewski Date: Fri, 2 Jul 2021 21:02:12 +0200 Subject: [PATCH 226/359] primitive-types/impl-codec: Bump to include new `parity_scale_codec::MaxEncodedLen` impls (#552) * impl-codec: Use new parity-scale-codec 2.2.0 with `max-encoded-len` feature * impl-codec: Bump to parity-scale-codec is not breaking * Bump primitive-types to 0.10.1 in order to bump impl-codec to 0.5.1 --- primitive-types/CHANGELOG.md | 5 +++++ primitive-types/Cargo.toml | 4 ++-- primitive-types/impls/codec/CHANGELOG.md | 4 ++++ primitive-types/impls/codec/Cargo.toml | 4 ++-- primitive-types/impls/codec/src/lib.rs | 12 ++++++++++++ 5 files changed, 25 insertions(+), 4 deletions(-) diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 520cc9d43..39c1a4e52 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.10.1] - 2021-07-02 +### Added +- Implemented `parity_scale_codec::MaxEncodedLen` trait for `{U128, U256, U512}` and `{H128, H160, H256, H512}` types. + +## [0.10.0] - 2021-07-02 ### Added - Added `U128::full_mul` method. [#546](https://github.com/paritytech/parity-common/pull/546) ### Breaking diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 030db9470..7a0b9d9e5 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.10.0" +version = "0.10.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -11,7 +11,7 @@ edition = "2018" fixed-hash = { version = "0.7", path = "../fixed-hash", default-features = false } uint = { version = "0.9.0", path = "../uint", default-features = false } impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false, optional = true } -impl-codec = { version = "0.5.0", path = "impls/codec", default-features = false, optional = true } +impl-codec = { version = "0.5.1", path = "impls/codec", default-features = false, optional = true } impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } scale-info-crate = { package = "scale-info", version = ">=0.9, <2", features = ["derive"], default-features = false, optional = true } diff --git a/primitive-types/impls/codec/CHANGELOG.md b/primitive-types/impls/codec/CHANGELOG.md index 179be164f..c7fca7d25 100644 --- a/primitive-types/impls/codec/CHANGELOG.md +++ b/primitive-types/impls/codec/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.5.1] - 2021-07-02 +### Dependencies +- Updated `parity-scale-codec` to 2.2. [#552](https://github.com/paritytech/parity-common/pull/552) + ## [0.5.0] - 2021-01-27 ### Breaking - Updated `parity-scale-codec` to 2.0. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index 27a4aa7b6..8721e4333 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-codec" -version = "0.5.0" +version = "0.5.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -8,7 +8,7 @@ description = "Parity Codec serialization support for uint and fixed hash." edition = "2018" [dependencies] -parity-scale-codec = { version = "2.0.0", default-features = false } +parity-scale-codec = { version = "2.2.0", default-features = false, features = ["max-encoded-len"] } [features] default = ["std"] diff --git a/primitive-types/impls/codec/src/lib.rs b/primitive-types/impls/codec/src/lib.rs index 1a4f2e252..feacec08a 100644 --- a/primitive-types/impls/codec/src/lib.rs +++ b/primitive-types/impls/codec/src/lib.rs @@ -32,6 +32,12 @@ macro_rules! impl_uint_codec { <[u8; $len * 8] as $crate::codec::Decode>::decode(input).map(|b| $name::from_little_endian(&b)) } } + + impl $crate::codec::MaxEncodedLen for $name { + fn max_encoded_len() -> usize { + ::core::mem::size_of::<$name>() + } + } }; } @@ -52,5 +58,11 @@ macro_rules! impl_fixed_hash_codec { <[u8; $len] as $crate::codec::Decode>::decode(input).map($name) } } + + impl $crate::codec::MaxEncodedLen for $name { + fn max_encoded_len() -> usize { + ::core::mem::size_of::<$name>() + } + } }; } From 2d571df7fee92b85b47b49cf14aa3a7641f2f3b9 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 5 Jul 2021 14:57:56 +0200 Subject: [PATCH 227/359] decommission parity-crypto (#566) * decommission parity-crypto * fix CI --- .github/workflows/ci.yml | 6 - Cargo.toml | 1 - parity-crypto/CHANGELOG.md | 42 -- parity-crypto/Cargo.toml | 42 -- parity-crypto/README.md | 8 - parity-crypto/benches/bench.rs | 51 -- parity-crypto/src/aes.rs | 188 ------ parity-crypto/src/digest.rs | 109 ---- parity-crypto/src/error.rs | 142 ----- parity-crypto/src/hmac/mod.rs | 161 ------ parity-crypto/src/hmac/test.rs | 234 -------- parity-crypto/src/lib.rs | 82 --- parity-crypto/src/pbkdf2/mod.rs | 21 - parity-crypto/src/pbkdf2/test.rs | 22 - parity-crypto/src/publickey/ec_math_utils.rs | 149 ----- parity-crypto/src/publickey/ecdh.rs | 45 -- .../src/publickey/ecdsa_signature.rs | 331 ----------- parity-crypto/src/publickey/ecies.rs | 130 ----- parity-crypto/src/publickey/error.rs | 91 --- parity-crypto/src/publickey/extended_keys.rs | 542 ------------------ parity-crypto/src/publickey/keypair.rs | 109 ---- .../src/publickey/keypair_generator.rs | 22 - parity-crypto/src/publickey/mod.rs | 47 -- parity-crypto/src/publickey/secret_key.rs | 307 ---------- parity-crypto/src/scrypt.rs | 56 -- 25 files changed, 2938 deletions(-) delete mode 100644 parity-crypto/CHANGELOG.md delete mode 100644 parity-crypto/Cargo.toml delete mode 100644 parity-crypto/README.md delete mode 100644 parity-crypto/benches/bench.rs delete mode 100644 parity-crypto/src/aes.rs delete mode 100644 parity-crypto/src/digest.rs delete mode 100644 parity-crypto/src/error.rs delete mode 100644 parity-crypto/src/hmac/mod.rs delete mode 100644 parity-crypto/src/hmac/test.rs delete mode 100644 parity-crypto/src/lib.rs delete mode 100644 parity-crypto/src/pbkdf2/mod.rs delete mode 100644 parity-crypto/src/pbkdf2/test.rs delete mode 100644 parity-crypto/src/publickey/ec_math_utils.rs delete mode 100644 parity-crypto/src/publickey/ecdh.rs delete mode 100644 parity-crypto/src/publickey/ecdsa_signature.rs delete mode 100644 parity-crypto/src/publickey/ecies.rs delete mode 100644 parity-crypto/src/publickey/error.rs delete mode 100644 parity-crypto/src/publickey/extended_keys.rs delete mode 100644 parity-crypto/src/publickey/keypair.rs delete mode 100644 parity-crypto/src/publickey/keypair_generator.rs delete mode 100644 parity-crypto/src/publickey/mod.rs delete mode 100644 parity-crypto/src/publickey/secret_key.rs delete mode 100644 parity-crypto/src/scrypt.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 115bec02f..84a12b31c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -98,12 +98,6 @@ jobs: command: test args: -p ethbloom --all-features - - name: Test parity-crypto all-features - uses: actions-rs/cargo@v1 - with: - command: test - args: -p parity-crypto --all-features - - name: Test uint on bigendian if: runner.os == 'Linux' uses: actions-rs/cargo@v1 diff --git a/Cargo.toml b/Cargo.toml index 91bb0fee1..bb9fac368 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,6 @@ members = [ "kvdb-rocksdb", "kvdb-shared-tests", "parity-bytes", - "parity-crypto", "rlp", "rlp-derive", "uint", diff --git a/parity-crypto/CHANGELOG.md b/parity-crypto/CHANGELOG.md deleted file mode 100644 index af033f2a0..000000000 --- a/parity-crypto/CHANGELOG.md +++ /dev/null @@ -1,42 +0,0 @@ -# Changelog - -The format is based on [Keep a Changelog]. - -[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ - -## [Unreleased] - -### Breaking -- Updated `ethereum-types` to 0.12. [#556](https://github.com/paritytech/parity-common/pull/556) - -## [0.8.0] - 2021-01-27 -### Breaking -- Updated `ethereum-types` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) - -## [0.7.0] - 2021-01-05 -### Breaking -- Bump `rust-secp256k1` to v0.19, always allow zero signatures. [#438](https://github.com/paritytech/parity-common/pull/438) -- Updated `rlp` to 0.5. [#463](https://github.com/paritytech/parity-common/pull/463) -- Updated dependencies. [#483](https://github.com/paritytech/parity-common/pull/483) -- Remove deprecated trait impls `FromStr`/`TryFrom` for `Secret` [#495](https://github.com/paritytech/parity-common/pull/495) - -## [0.6.2] - 2020-06-19 -- Put `Secret` memory on heap. [#400](https://github.com/paritytech/parity-common/pull/400) -- Add `copy_from_str` conversion methods for `Secret`. -- Deprecate `From<&str>` in favor of `copy_from_str`. - -## [0.6.1] - 2020-04-11 -- Add `recover_allowing_all_zero_message()` and `ZeroesAllowedMessage` to accomodate ethereum's `ecrecover` builtin. [#369](https://github.com/paritytech/parity-common/pull/369) - -## [0.6.0] - 2020-03-16 -- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) -- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) - -## [0.5.0] - 2020-02-08 -- Remove `inv()` from `SecretKey` (breaking). [#258](https://github.com/paritytech/parity-common/pull/258) -- `Generate::generate()` does not return error. [#258](https://github.com/paritytech/parity-common/pull/258) -- `Secp256k1` is no longer exported. [#258](https://github.com/paritytech/parity-common/pull/258) -- Remove `public_is_valid()` as it is now impossible to create invalid public keys. [#258](https://github.com/paritytech/parity-common/pull/258) -- 0-valued `Secp::Message`s are disallowed (signatures on them are forgeable for all keys). [#258](https://github.com/paritytech/parity-common/pull/258) -- Switch to upstream `rust-secp256k1` at v0.17.2. [#258](https://github.com/paritytech/parity-common/pull/258) -- make `rustc_hex` dependency optional. [#337](https://github.com/paritytech/parity-common/pull/337) diff --git a/parity-crypto/Cargo.toml b/parity-crypto/Cargo.toml deleted file mode 100644 index 851ad0248..000000000 --- a/parity-crypto/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "parity-crypto" -version = "0.9.0" -authors = ["Parity Technologies "] -repository = "https://github.com/paritytech/parity-common" -description = "Crypto utils used by ethstore and network." -license = "MIT OR Apache-2.0" -autobenches = false -edition = "2018" - -[[bench]] -name = "bench" -harness = false -required-features = ["publickey"] - -[dependencies] -aes = "0.6.0" -aes-ctr = "0.6.0" -block-modes = "0.7.0" -digest = "0.9.0" -ethereum-types = { version = "0.12.0", optional = true, path = "../ethereum-types" } -hmac = "0.10.1" -lazy_static = { version = "1.4.0", optional = true } -pbkdf2 = "0.7.3" -ripemd160 = "0.9.1" -rustc-hex = { version = "2.1.0", default-features = false, optional = true } -scrypt = { version = "0.5.0" } -secp256k1 = { version = "0.20.0", optional = true, features = ["global-context", "recovery", "rand-std"] } -sha2 = "0.9.2" -subtle = "2.4.0" -tiny-keccak = { version = "2.0.2", features = ["keccak"] } -zeroize = { version = "1.2.0", default-features = false } - -[dev-dependencies] -criterion = "0.3.3" -hex-literal = "0.3.1" - -[features] -default = [] -# public key crypto utils -# moved from ethkey module in parity ethereum repository -publickey = ["secp256k1", "lazy_static", "ethereum-types", "rustc-hex"] diff --git a/parity-crypto/README.md b/parity-crypto/README.md deleted file mode 100644 index b7309710c..000000000 --- a/parity-crypto/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# parity-crypto - -General cryptographic utilities for Ethereum. - - -## Changelog - -The 0.4 release removes the dependency on `ring` and replaces it with pure rust alternatives. As a consequence of this, AES GCM support has been removed. `subtle` is used for constant time equality testing and error handling is pared down to the bare minimum required. diff --git a/parity-crypto/benches/bench.rs b/parity-crypto/benches/bench.rs deleted file mode 100644 index 562c50148..000000000 --- a/parity-crypto/benches/bench.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use crate::parity_crypto::publickey::Generator; -use criterion::{criterion_group, criterion_main, Bencher, Criterion}; - -criterion_group!(benches, input_len, ecdh_agree,); - -criterion_main!(benches); - -/// general benches for multiple input size -fn input_len(c: &mut Criterion) { - c.bench_function_over_inputs( - "ripemd", - |b: &mut Bencher, size: &usize| { - let data = vec![0u8; *size]; - b.iter(|| parity_crypto::digest::ripemd160(&data[..])); - }, - vec![100, 500, 1_000, 10_000, 100_000], - ); - - c.bench_function_over_inputs( - "aes_ctr", - |b: &mut Bencher, size: &usize| { - let data = vec![0u8; *size]; - let mut dest = vec![0; *size]; - let k = [0; 16]; - let iv = [0; 16]; - - b.iter(|| { - parity_crypto::aes::encrypt_128_ctr(&k[..], &iv[..], &data[..], &mut dest[..]).unwrap(); - // same as encrypt but add it just in case - parity_crypto::aes::decrypt_128_ctr(&k[..], &iv[..], &data[..], &mut dest[..]).unwrap(); - }); - }, - vec![100, 500, 1_000, 10_000, 100_000], - ); -} - -fn ecdh_agree(c: &mut Criterion) { - let keypair = parity_crypto::publickey::Random.generate().unwrap(); - let public = keypair.public().clone(); - let secret = keypair.secret().clone(); - - c.bench_function("ecdh_agree", move |b| b.iter(|| parity_crypto::publickey::ecdh::agree(&secret, &public))); -} diff --git a/parity-crypto/src/aes.rs b/parity-crypto/src/aes.rs deleted file mode 100644 index c7c860183..000000000 --- a/parity-crypto/src/aes.rs +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use aes::cipher::generic_array::GenericArray; -use aes::{Aes128, Aes256}; -use aes_ctr::cipher::stream::{NewStreamCipher, SyncStreamCipher}; -use block_modes::{ - block_padding::{Pkcs7, ZeroPadding}, - BlockMode, Cbc, Ecb, -}; - -use crate::error::SymmError; - -/// One time encoder/decoder for Ecb mode Aes256 with zero padding -pub struct AesEcb256(Ecb); - -impl AesEcb256 { - /// New encoder/decoder, no iv for ecb - pub fn new(key: &[u8]) -> Result { - Ok(AesEcb256(Ecb::new_var(key, &[])?)) - } - - /// Encrypt data in place without padding. The data length must be a multiple - /// of the block size. - pub fn encrypt(self, content: &mut [u8]) -> Result<(), SymmError> { - let len = content.len(); - self.0.encrypt(content, len)?; - Ok(()) - } - - /// Decrypt data in place without padding. The data length must be a multiple - /// of the block size. - pub fn decrypt(self, content: &mut [u8]) -> Result<(), SymmError> { - self.0.decrypt(content)?; - Ok(()) - } -} - -/// Reusable encoder/decoder for Aes256 in Ctr mode and no padding -pub struct AesCtr256(aes_ctr::Aes256Ctr); - -impl AesCtr256 { - /// New encoder/decoder - pub fn new(key: &[u8], iv: &[u8]) -> Result { - Ok(AesCtr256(aes_ctr::Aes256Ctr::new(GenericArray::from_slice(key), GenericArray::from_slice(iv)))) - } - - /// In place encrypt a content without padding, the content length must be a multiple - /// of the block size. - pub fn encrypt(&mut self, content: &mut [u8]) -> Result<(), SymmError> { - self.0.try_apply_keystream(content)?; - Ok(()) - } - - /// In place decrypt a content without padding, the content length must be a multiple - /// of the block size. - pub fn decrypt(&mut self, content: &mut [u8]) -> Result<(), SymmError> { - self.0.try_apply_keystream(content)?; - Ok(()) - } -} - -/// Encrypt a message (CTR mode). -/// -/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. -/// An error is returned if the input lengths are invalid. -/// If possible prefer `inplace_encrypt_128_ctr` to avoid a slice copy. -pub fn encrypt_128_ctr(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new(GenericArray::from_slice(k), GenericArray::from_slice(iv)); - &mut dest[..plain.len()].copy_from_slice(plain); - encryptor.try_apply_keystream(dest)?; - Ok(()) -} - -/// Encrypt a message (CTR mode). -/// -/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. -/// An error is returned if the input lengths are invalid. -pub fn inplace_encrypt_128_ctr(k: &[u8], iv: &[u8], data: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new(GenericArray::from_slice(k), GenericArray::from_slice(iv)); - encryptor.try_apply_keystream(data)?; - Ok(()) -} - -/// Decrypt a message (CTR mode). -/// -/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. -/// An error is returned if the input lengths are invalid. -/// If possible prefer `inplace_decrypt_128_ctr` instead. -pub fn decrypt_128_ctr(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new(GenericArray::from_slice(k), GenericArray::from_slice(iv)); - - &mut dest[..encrypted.len()].copy_from_slice(encrypted); - encryptor.try_apply_keystream(dest)?; - Ok(()) -} - -/// Decrypt a message (CTR mode). -/// -/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. -/// An error is returned if the input lengths are invalid. -pub fn inplace_decrypt_128_ctr(k: &[u8], iv: &[u8], data: &mut [u8]) -> Result<(), SymmError> { - let mut encryptor = aes_ctr::Aes128Ctr::new(GenericArray::from_slice(k), GenericArray::from_slice(iv)); - - encryptor.try_apply_keystream(data)?; - Ok(()) -} - -/// Decrypt a message (CBC mode). -/// -/// Key (`k`) length and initialisation vector (`iv`) length have to be 16 bytes each. -/// An error is returned if the input lengths are invalid. -pub fn decrypt_128_cbc(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result { - let encryptor = Cbc::::new_var(k, iv)?; - &mut dest[..encrypted.len()].copy_from_slice(encrypted); - let unpad_length = { encryptor.decrypt(&mut dest[..encrypted.len()])?.len() }; - Ok(unpad_length) -} - -#[cfg(test)] -mod tests { - - use super::*; - - // only use for test could be expose in the future - fn encrypt_128_cbc(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) -> Result<(), SymmError> { - let encryptor = Cbc::::new_var(k, iv)?; - &mut dest[..plain.len()].copy_from_slice(plain); - encryptor.encrypt(dest, plain.len())?; - Ok(()) - } - - #[test] - pub fn test_aes_short() -> Result<(), SymmError> { - let key = [ - 97, 110, 121, 99, 111, 110, 116, 101, 110, 116, 116, 111, 114, 101, 97, 99, 104, 49, 50, 56, 98, 105, 116, - 115, 105, 122, 101, 10, - ]; - let salt = [ - 109, 121, 115, 97, 108, 116, 115, 104, 111, 117, 108, 100, 102, 105, 108, 108, 115, 111, 109, 109, 101, 98, - 121, 116, 101, 108, 101, 110, 103, 116, 104, 10, - ]; - let content = [ - 83, 111, 109, 101, 32, 99, 111, 110, 116, 101, 110, 116, 32, 116, 111, 32, 116, 101, 115, 116, 32, 97, 101, - 115, 44, 10, 110, 111, 116, 32, 116, 111, 32, 109, 117, 99, 104, 32, 44, 32, 111, 110, 108, 121, 32, 118, - 101, 114, 121, 32, 98, 97, 115, 105, 99, 32, 116, 101, 115, 116, 32, 116, 111, 32, 97, 118, 111, 105, 100, - 32, 111, 98, 118, 105, 111, 117, 115, 32, 114, 101, 103, 114, 101, 115, 115, 105, 111, 110, 32, 119, 104, - 101, 110, 32, 115, 119, 105, 116, 99, 104, 105, 110, 103, 32, 108, 105, 98, 115, 46, 10, - ]; - let ctr_enc = [ - 65, 55, 246, 75, 24, 117, 30, 233, 218, 139, 91, 251, 251, 179, 171, 69, 60, 244, 249, 44, 238, 60, 10, 66, - 71, 10, 199, 111, 54, 24, 124, 223, 153, 250, 159, 154, 164, 109, 232, 82, 20, 199, 182, 40, 174, 104, 64, - 203, 236, 94, 222, 184, 117, 54, 234, 189, 253, 122, 135, 121, 100, 44, 227, 241, 123, 120, 110, 188, 109, - 148, 112, 160, 131, 205, 116, 104, 232, 8, 22, 170, 80, 231, 155, 246, 255, 115, 101, 5, 234, 104, 220, - 199, 192, 166, 181, 156, 113, 255, 187, 51, 38, 128, 75, 29, 237, 178, 205, 98, 101, 110, - ]; - let cbc_enc = [ - 167, 248, 5, 90, 11, 140, 215, 138, 165, 125, 137, 76, 47, 243, 191, 48, 183, 247, 109, 86, 24, 45, 81, - 215, 0, 51, 221, 185, 131, 97, 234, 189, 244, 255, 107, 210, 70, 60, 41, 221, 43, 137, 185, 166, 42, 65, - 18, 200, 151, 233, 255, 192, 109, 25, 105, 115, 161, 209, 126, 235, 99, 192, 241, 241, 19, 249, 87, 244, - 28, 146, 186, 189, 108, 9, 243, 132, 4, 105, 53, 162, 8, 235, 84, 107, 213, 59, 158, 113, 227, 120, 162, - 50, 237, 123, 70, 187, 83, 73, 146, 13, 44, 191, 53, 4, 125, 207, 176, 45, 8, 153, 175, 198, - ]; - let mut dest = vec![0; 110]; - let mut dest_padded = vec![0; 112]; - let mut dest_padded2 = vec![0; 128]; // TODO RustLib need an extra 16bytes in dest : looks extra buggy but function is not currently use (keep it private for now) - encrypt_128_cbc(&key[..16], &salt[..16], &content, &mut dest_padded2)?; - assert!(&dest_padded2[..112] == &cbc_enc[..]); - encrypt_128_ctr(&key[..16], &salt[..16], &content, &mut dest)?; - assert!(&dest[..] == &ctr_enc[..]); - let mut content_data = content.to_vec(); - inplace_encrypt_128_ctr(&key[..16], &salt[..16], &mut content_data[..])?; - assert!(&content_data[..] == &ctr_enc[..]); - decrypt_128_ctr(&key[..16], &salt[..16], &ctr_enc[..], &mut dest)?; - assert!(&dest[..] == &content[..]); - let mut content_data = ctr_enc.to_vec(); - inplace_decrypt_128_ctr(&key[..16], &salt[..16], &mut content_data[..])?; - assert!(&content_data[..] == &content[..]); - let l = decrypt_128_cbc(&key[..16], &salt[..16], &cbc_enc[..], &mut dest_padded)?; - assert!(&dest_padded[..l] == &content[..]); - Ok(()) - } -} diff --git a/parity-crypto/src/digest.rs b/parity-crypto/src/digest.rs deleted file mode 100644 index 759613f2f..000000000 --- a/parity-crypto/src/digest.rs +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::marker::PhantomData; -use std::ops::Deref; - -use digest::generic_array::{ - typenum::{U20, U32, U64}, - GenericArray, -}; -use sha2::Digest as RDigest; - -/// The message digest. -pub struct Digest(InnerDigest, PhantomData); - -enum InnerDigest { - Sha256(GenericArray), - Sha512(GenericArray), - Ripemd160(GenericArray), -} - -impl Deref for Digest { - type Target = [u8]; - fn deref(&self) -> &Self::Target { - match self.0 { - InnerDigest::Sha256(ref d) => &d[..], - InnerDigest::Sha512(ref d) => &d[..], - InnerDigest::Ripemd160(ref d) => &d[..], - } - } -} - -/// Single-step sha256 digest computation. -pub fn sha256(data: &[u8]) -> Digest { - let mut hasher = Hasher::sha256(); - hasher.update(data); - hasher.finish() -} - -/// Single-step sha512 digest computation. -pub fn sha512(data: &[u8]) -> Digest { - let mut hasher = Hasher::sha512(); - hasher.update(data); - hasher.finish() -} - -/// Single-step ripemd160 digest computation. -pub fn ripemd160(data: &[u8]) -> Digest { - let mut hasher = Hasher::ripemd160(); - hasher.update(data); - hasher.finish() -} - -#[derive(Debug)] -pub enum Sha256 {} -#[derive(Debug)] -pub enum Sha512 {} -#[derive(Debug)] -pub enum Ripemd160 {} - -/// Stateful digest computation. -pub struct Hasher(Inner, PhantomData); - -enum Inner { - Sha256(sha2::Sha256), - Sha512(sha2::Sha512), - Ripemd160(ripemd160::Ripemd160), -} - -impl Hasher { - pub fn sha256() -> Hasher { - Hasher(Inner::Sha256(sha2::Sha256::default()), PhantomData) - } -} - -impl Hasher { - pub fn sha512() -> Hasher { - Hasher(Inner::Sha512(sha2::Sha512::default()), PhantomData) - } -} - -impl Hasher { - pub fn ripemd160() -> Hasher { - Hasher(Inner::Ripemd160(ripemd160::Ripemd160::default()), PhantomData) - } -} - -impl Hasher { - pub fn update(&mut self, data: &[u8]) { - match self.0 { - Inner::Sha256(ref mut ctx) => ctx.update(data), - Inner::Sha512(ref mut ctx) => ctx.update(data), - Inner::Ripemd160(ref mut ctx) => ctx.update(data), - } - } - - pub fn finish(self) -> Digest { - match self.0 { - Inner::Sha256(ctx) => Digest(InnerDigest::Sha256(ctx.finalize()), PhantomData), - Inner::Sha512(ctx) => Digest(InnerDigest::Sha512(ctx.finalize()), PhantomData), - Inner::Ripemd160(ctx) => Digest(InnerDigest::Ripemd160(ctx.finalize()), PhantomData), - } - } -} diff --git a/parity-crypto/src/error.rs b/parity-crypto/src/error.rs deleted file mode 100644 index 6f413a247..000000000 --- a/parity-crypto/src/error.rs +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::{error::Error as StdError, fmt, result}; - -#[derive(Debug)] -pub enum Error { - Scrypt(ScryptError), - Symm(SymmError), -} - -#[derive(Debug)] -pub enum ScryptError { - // log(N) < r / 16 - InvalidN, - // p <= (2^31-1 * 32)/(128 * r) - InvalidP, - ScryptParam(scrypt::errors::InvalidParams), - ScryptLength(scrypt::errors::InvalidOutputLen), -} - -#[derive(Debug)] -pub struct SymmError(PrivSymmErr); - -#[derive(Debug)] -enum PrivSymmErr { - BlockMode(block_modes::BlockModeError), - KeyStream(aes_ctr::cipher::stream::LoopError), - InvalidKeyLength(block_modes::InvalidKeyIvLength), -} - -impl StdError for Error { - fn source(&self) -> Option<&(dyn StdError + 'static)> { - match self { - Error::Scrypt(scrypt_err) => Some(scrypt_err), - Error::Symm(symm_err) => Some(symm_err), - } - } -} - -impl StdError for ScryptError { - fn source(&self) -> Option<&(dyn StdError + 'static)> { - match self { - ScryptError::ScryptParam(err) => Some(err), - ScryptError::ScryptLength(err) => Some(err), - _ => None, - } - } -} - -impl StdError for SymmError { - fn source(&self) -> Option<&(dyn StdError + 'static)> { - match &self.0 { - PrivSymmErr::BlockMode(err) => Some(err), - PrivSymmErr::InvalidKeyLength(err) => Some(err), - _ => None, - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> result::Result<(), fmt::Error> { - match self { - Error::Scrypt(err) => write!(f, "scrypt error: {}", err), - Error::Symm(err) => write!(f, "symm error: {}", err), - } - } -} - -impl fmt::Display for ScryptError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> result::Result<(), fmt::Error> { - match self { - ScryptError::InvalidN => write!(f, "invalid n argument"), - ScryptError::InvalidP => write!(f, "invalid p argument"), - ScryptError::ScryptParam(err) => write!(f, "invalid params: {}", err), - ScryptError::ScryptLength(err) => write!(f, "invalid output length: {}", err), - } - } -} - -impl fmt::Display for SymmError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> result::Result<(), fmt::Error> { - match self { - SymmError(PrivSymmErr::BlockMode(err)) => write!(f, "block cipher error: {}", err), - SymmError(PrivSymmErr::KeyStream(err)) => write!(f, "ctr key stream ended: {}", err), - SymmError(PrivSymmErr::InvalidKeyLength(err)) => write!(f, "block cipher key length: {}", err), - } - } -} - -impl Into for Error { - fn into(self) -> std::io::Error { - std::io::Error::new(std::io::ErrorKind::Other, format!("Crypto error: {}", self)) - } -} - -impl From for SymmError { - fn from(e: block_modes::BlockModeError) -> SymmError { - SymmError(PrivSymmErr::BlockMode(e)) - } -} - -impl From for SymmError { - fn from(e: block_modes::InvalidKeyIvLength) -> SymmError { - SymmError(PrivSymmErr::InvalidKeyLength(e)) - } -} - -impl From for SymmError { - fn from(e: aes_ctr::cipher::stream::LoopError) -> SymmError { - SymmError(PrivSymmErr::KeyStream(e)) - } -} - -impl From for ScryptError { - fn from(e: scrypt::errors::InvalidParams) -> ScryptError { - ScryptError::ScryptParam(e) - } -} - -impl From for ScryptError { - fn from(e: scrypt::errors::InvalidOutputLen) -> ScryptError { - ScryptError::ScryptLength(e) - } -} - -impl From for Error { - fn from(e: ScryptError) -> Error { - Error::Scrypt(e) - } -} - -impl From for Error { - fn from(e: SymmError) -> Error { - Error::Symm(e) - } -} diff --git a/parity-crypto/src/hmac/mod.rs b/parity-crypto/src/hmac/mod.rs deleted file mode 100644 index ca20ae1c1..000000000 --- a/parity-crypto/src/hmac/mod.rs +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::marker::PhantomData; -use std::ops::Deref; - -use digest::generic_array::{ - typenum::{U32, U64}, - GenericArray, -}; -use hmac::{Hmac, Mac as _, NewMac as _}; -use zeroize::Zeroize; - -use crate::digest::{Sha256, Sha512}; - -/// HMAC signature. -#[derive(Debug)] -pub struct Signature(HashInner, PhantomData); - -#[derive(Debug)] -enum HashInner { - Sha256(GenericArray), - Sha512(GenericArray), -} - -impl Deref for Signature { - type Target = [u8]; - - fn deref(&self) -> &Self::Target { - match &self.0 { - HashInner::Sha256(a) => a.as_slice(), - HashInner::Sha512(a) => a.as_slice(), - } - } -} - -/// HMAC signing key. -pub struct SigKey(KeyInner, PhantomData); - -#[derive(PartialEq)] -// Using `Box[u8]` guarantees no reallocation can happen -struct DisposableBox(Box<[u8]>); - -impl std::fmt::Debug for DisposableBox { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", &self.0.as_ref()) - } -} - -impl DisposableBox { - fn from_slice(data: &[u8]) -> Self { - Self(data.to_vec().into_boxed_slice()) - } -} - -impl Drop for DisposableBox { - fn drop(&mut self) { - self.0.zeroize() - } -} - -#[derive(Debug, PartialEq)] -enum KeyInner { - Sha256(DisposableBox), - Sha512(DisposableBox), -} - -impl SigKey { - pub fn sha256(key: &[u8]) -> SigKey { - SigKey(KeyInner::Sha256(DisposableBox::from_slice(key)), PhantomData) - } -} - -impl SigKey { - pub fn sha512(key: &[u8]) -> SigKey { - SigKey(KeyInner::Sha512(DisposableBox::from_slice(key)), PhantomData) - } -} - -/// Compute HMAC signature of `data`. -pub fn sign(k: &SigKey, data: &[u8]) -> Signature { - let mut signer = Signer::with(k); - signer.update(data); - signer.sign() -} - -/// Stateful HMAC computation. -pub struct Signer(SignerInner, PhantomData); - -enum SignerInner { - Sha256(Hmac), - Sha512(Hmac), -} - -impl Signer { - pub fn with(key: &SigKey) -> Signer { - match &key.0 { - KeyInner::Sha256(key_bytes) => Signer( - SignerInner::Sha256(Hmac::::new_varkey(&key_bytes.0).expect("always returns Ok; qed")), - PhantomData, - ), - KeyInner::Sha512(key_bytes) => Signer( - SignerInner::Sha512(Hmac::::new_varkey(&key_bytes.0).expect("always returns Ok; qed")), - PhantomData, - ), - } - } - - pub fn update(&mut self, data: &[u8]) { - match &mut self.0 { - SignerInner::Sha256(hmac) => hmac.update(data), - SignerInner::Sha512(hmac) => hmac.update(data), - } - } - - pub fn sign(self) -> Signature { - match self.0 { - SignerInner::Sha256(hmac) => Signature(HashInner::Sha256(hmac.finalize().into_bytes()), PhantomData), - SignerInner::Sha512(hmac) => Signature(HashInner::Sha512(hmac.finalize().into_bytes()), PhantomData), - } - } -} - -/// HMAC signature verification key. -pub struct VerifyKey(KeyInner, PhantomData); - -impl VerifyKey { - pub fn sha256(key: &[u8]) -> VerifyKey { - VerifyKey(KeyInner::Sha256(DisposableBox::from_slice(key)), PhantomData) - } -} - -impl VerifyKey { - pub fn sha512(key: &[u8]) -> VerifyKey { - VerifyKey(KeyInner::Sha512(DisposableBox::from_slice(key)), PhantomData) - } -} - -/// Verify HMAC signature of `data`. -pub fn verify(key: &VerifyKey, data: &[u8], sig: &[u8]) -> bool { - match &key.0 { - KeyInner::Sha256(key_bytes) => { - let mut ctx = Hmac::::new_varkey(&key_bytes.0).expect("always returns Ok; qed"); - ctx.update(data); - ctx.verify(sig).is_ok() - } - KeyInner::Sha512(key_bytes) => { - let mut ctx = Hmac::::new_varkey(&key_bytes.0).expect("always returns Ok; qed"); - ctx.update(data); - ctx.verify(sig).is_ok() - } - } -} - -#[cfg(test)] -mod test; diff --git a/parity-crypto/src/hmac/test.rs b/parity-crypto/src/hmac/test.rs deleted file mode 100644 index 8f71bd4a9..000000000 --- a/parity-crypto/src/hmac/test.rs +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::*; -use hex_literal::hex; - -#[test] -fn simple_mac_and_verify() { - let input = b"Some bytes"; - let big_input = vec![7u8; 2000]; - - let key1 = vec![3u8; 64]; - let key2 = vec![4u8; 128]; - - let sig_key1 = SigKey::sha256(&key1[..]); - let sig_key2 = SigKey::sha512(&key2[..]); - - let mut signer1 = Signer::with(&sig_key1); - let mut signer2 = Signer::with(&sig_key2); - - signer1.update(&input[..]); - for i in 0..big_input.len() / 33 { - signer2.update(&big_input[i * 33..(i + 1) * 33]); - } - signer2.update(&big_input[(big_input.len() / 33) * 33..]); - let sig1 = signer1.sign(); - assert_eq!( - &sig1[..], - [ - 223, 208, 90, 69, 144, 95, 145, 180, 56, 155, 78, 40, 86, 238, 205, 81, 160, 245, 88, 145, 164, 67, 254, - 180, 202, 107, 93, 249, 64, 196, 86, 225 - ] - ); - let sig2 = signer2.sign(); - assert_eq!( - &sig2[..], - &[ - 29, 63, 46, 122, 27, 5, 241, 38, 86, 197, 91, 79, 33, 107, 152, 195, 118, 221, 117, 119, 84, 114, 46, 65, - 243, 157, 105, 12, 147, 176, 190, 37, 210, 164, 152, 8, 58, 243, 59, 206, 80, 10, 230, 197, 255, 110, 191, - 180, 93, 22, 255, 0, 99, 79, 237, 229, 209, 199, 125, 83, 15, 179, 134, 89 - ][..] - ); - assert_eq!(&sig1[..], &sign(&sig_key1, &input[..])[..]); - assert_eq!(&sig2[..], &sign(&sig_key2, &big_input[..])[..]); - let verif_key1 = VerifyKey::sha256(&key1[..]); - let verif_key2 = VerifyKey::sha512(&key2[..]); - assert!(verify(&verif_key1, &input[..], &sig1[..])); - assert!(verify(&verif_key2, &big_input[..], &sig2[..])); -} - -fn check_test_vector(key: &[u8], data: &[u8], expected_256: &[u8], expected_512: &[u8]) { - // Sha-256 - let sig_key = SigKey::sha256(&key); - let mut signer = Signer::with(&sig_key); - signer.update(&data); - let signature = signer.sign(); - assert_eq!(&signature[..], expected_256); - assert_eq!(&signature[..], &sign(&sig_key, data)[..]); - let ver_key = VerifyKey::sha256(&key); - assert!(verify(&ver_key, data, &signature)); - - // Sha-512 - let sig_key = SigKey::sha512(&key); - let mut signer = Signer::with(&sig_key); - signer.update(&data); - let signature = signer.sign(); - assert_eq!(&signature[..], expected_512); - assert_eq!(&signature[..], &sign(&sig_key, data)[..]); - let ver_key = VerifyKey::sha512(&key); - assert!(verify(&ver_key, data, &signature)); -} - -#[test] -fn ietf_test_vectors() { - // Test vectors from https://tools.ietf.org/html/rfc4231.html#section-4 - - // Test Case 1 - check_test_vector( - &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"), - &hex!("4869205468657265"), - &hex!( - " - b0344c61d8db38535ca8afceaf0bf12b - 881dc200c9833da726e9376c2e32cff7" - ), - &hex!( - " - 87aa7cdea5ef619d4ff0b4241a1d6cb0 - 2379f4e2ce4ec2787ad0b30545e17cde - daa833b7d6b8a702038b274eaea3f4e4 - be9d914eeb61f1702e696c203a126854" - ), - ); - - // Test Case 2 - check_test_vector( - &hex!("4a656665"), - &hex!("7768617420646f2079612077616e7420666f72206e6f7468696e673f"), - &hex!( - " - 5bdcc146bf60754e6a042426089575c7 - 5a003f089d2739839dec58b964ec3843" - ), - &hex!( - " - 164b7a7bfcf819e2e395fbe73b56e0a3 - 87bd64222e831fd610270cd7ea250554 - 9758bf75c05a994a6d034f65f8f0e6fd - caeab1a34d4a6b4b636e070a38bce737" - ), - ); - // Test Case 3 - check_test_vector( - &hex!("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), - &hex!("dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"), - &hex!( - " - 773ea91e36800e46854db8ebd09181a7 - 2959098b3ef8c122d9635514ced565fe" - ), - &hex!( - " - fa73b0089d56a284efb0f0756c890be9 - b1b5dbdd8ee81a3655f83e33b2279d39 - bf3e848279a722c806b485a47e67c807 - b946a337bee8942674278859e13292fb" - ), - ); - - // Test Case 4 - check_test_vector( - &hex!("0102030405060708090a0b0c0d0e0f10111213141516171819"), - &hex!( - " - cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd - cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd - cdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd - cdcd" - ), - &hex!( - " - 82558a389a443c0ea4cc819899f2083a - 85f0faa3e578f8077a2e3ff46729665b" - ), - &hex!( - " - b0ba465637458c6990e5a8c5f61d4af7 - e576d97ff94b872de76f8050361ee3db - a91ca5c11aa25eb4d679275cc5788063 - a5f19741120c4f2de2adebeb10a298dd" - ), - ); - - // Test Case 6 - check_test_vector( - &hex!( - " - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaa" - ), - &hex!( - " - 54657374205573696e67204c61726765 - 72205468616e20426c6f636b2d53697a - 65204b6579202d2048617368204b6579 - 204669727374" - ), - &hex!( - " - 60e431591ee0b67f0d8a26aacbf5b77f - 8e0bc6213728c5140546040f0ee37f54" - ), - &hex!( - " - 80b24263c7c1a3ebb71493c1dd7be8b4 - 9b46d1f41b4aeec1121b013783f8f352 - 6b56d037e05f2598bd0fd2215d6a1e52 - 95e64f73f63f0aec8b915a985d786598" - ), - ); - - // Test Case 7 - check_test_vector( - &hex!( - " - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa - aaaaaa" - ), - &hex!( - " - 54686973206973206120746573742075 - 73696e672061206c6172676572207468 - 616e20626c6f636b2d73697a65206b65 - 7920616e642061206c61726765722074 - 68616e20626c6f636b2d73697a652064 - 6174612e20546865206b6579206e6565 - 647320746f2062652068617368656420 - 6265666f7265206265696e6720757365 - 642062792074686520484d414320616c - 676f726974686d2e" - ), - &hex!( - " - 9b09ffa71b942fcb27635fbcd5b0e944 - bfdc63644f0713938a7f51535c3a35e2" - ), - &hex!( - " - e37b6a775dc87dbaa4dfa9f96e5e3ffd - debd71f8867289865df5a32d20cdc944 - b6022cac3c4982b10d5eeb55c3e4de15 - 134676fb6de0446065c97440fa8c6a58" - ), - ); -} diff --git a/parity-crypto/src/lib.rs b/parity-crypto/src/lib.rs deleted file mode 100644 index c3049716c..000000000 --- a/parity-crypto/src/lib.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Crypto utils used by ethstore and network. - -pub mod aes; -pub mod digest; -pub mod error; -pub mod hmac; -pub mod pbkdf2; -#[cfg(feature = "publickey")] -pub mod publickey; -pub mod scrypt; - -pub use crate::error::Error; - -use subtle::ConstantTimeEq; -use tiny_keccak::{Hasher, Keccak}; - -pub const KEY_LENGTH: usize = 32; -pub const KEY_ITERATIONS: usize = 10240; -pub const KEY_LENGTH_AES: usize = KEY_LENGTH / 2; - -/// Default authenticated data to use (in RPC). -pub const DEFAULT_MAC: [u8; 2] = [0, 0]; - -pub trait Keccak256 { - fn keccak256(&self) -> T - where - T: Sized; -} - -impl Keccak256<[u8; 32]> for T -where - T: AsRef<[u8]>, -{ - fn keccak256(&self) -> [u8; 32] { - let mut keccak = Keccak::v256(); - let mut result = [0u8; 32]; - keccak.update(self.as_ref()); - keccak.finalize(&mut result); - result - } -} - -pub fn derive_key_iterations(password: &[u8], salt: &[u8], c: u32) -> (Vec, Vec) { - let mut derived_key = [0u8; KEY_LENGTH]; - pbkdf2::sha256(c, pbkdf2::Salt(salt), pbkdf2::Secret(password), &mut derived_key); - let derived_right_bits = &derived_key[0..KEY_LENGTH_AES]; - let derived_left_bits = &derived_key[KEY_LENGTH_AES..KEY_LENGTH]; - (derived_right_bits.to_vec(), derived_left_bits.to_vec()) -} - -pub fn derive_mac(derived_left_bits: &[u8], cipher_text: &[u8]) -> Vec { - let mut mac = vec![0u8; KEY_LENGTH_AES + cipher_text.len()]; - mac[0..KEY_LENGTH_AES].copy_from_slice(derived_left_bits); - mac[KEY_LENGTH_AES..cipher_text.len() + KEY_LENGTH_AES].copy_from_slice(cipher_text); - mac -} - -pub fn is_equal(a: &[u8], b: &[u8]) -> bool { - a.ct_eq(b).into() -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn can_test_for_equality() { - let a = b"abc"; - let b = b"abc"; - let c = b"efg"; - assert!(is_equal(a, b)); - assert!(!is_equal(a, c)); - } -} diff --git a/parity-crypto/src/pbkdf2/mod.rs b/parity-crypto/src/pbkdf2/mod.rs deleted file mode 100644 index c9f1bd565..000000000 --- a/parity-crypto/src/pbkdf2/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub struct Salt<'a>(pub &'a [u8]); -pub struct Secret<'a>(pub &'a [u8]); - -pub fn sha256(iter: u32, salt: Salt<'_>, sec: Secret<'_>, out: &mut [u8; 32]) { - pbkdf2::pbkdf2::>(sec.0, salt.0, iter, out) -} - -pub fn sha512(iter: u32, salt: Salt<'_>, sec: Secret<'_>, out: &mut [u8; 64]) { - pbkdf2::pbkdf2::>(sec.0, salt.0, iter, out) -} - -#[cfg(test)] -mod test; diff --git a/parity-crypto/src/pbkdf2/test.rs b/parity-crypto/src/pbkdf2/test.rs deleted file mode 100644 index b0bed84ad..000000000 --- a/parity-crypto/src/pbkdf2/test.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::*; - -#[test] -fn basic_test() { - let mut dest = [0; 32]; - let salt = [5; 32]; - let secret = [7; 32]; - sha256(3, Salt(&salt[..]), Secret(&secret[..]), &mut dest); - let res = [ - 242, 33, 31, 124, 36, 223, 179, 185, 206, 175, 190, 253, 85, 33, 23, 126, 141, 29, 23, 97, 66, 63, 51, 196, 27, - 255, 135, 206, 74, 137, 172, 87, - ]; - assert_eq!(res, dest); -} diff --git a/parity-crypto/src/publickey/ec_math_utils.rs b/parity-crypto/src/publickey/ec_math_utils.rs deleted file mode 100644 index af2e2bf96..000000000 --- a/parity-crypto/src/publickey/ec_math_utils.rs +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Multiple primitives for work with public and secret keys and with secp256k1 curve points - -use super::{Error, Public, Secret}; -use ethereum_types::{BigEndianHash as _, H256, U256}; -use lazy_static::lazy_static; -use secp256k1::constants::CURVE_ORDER as SECP256K1_CURVE_ORDER; -use secp256k1::key; -use secp256k1::SECP256K1; - -/// Generation point array combined from X and Y coordinates -/// Equivalent to uncompressed form, see https://tools.ietf.org/id/draft-jivsov-ecc-compact-05.html#rfc.section.3 -pub const BASE_POINT_BYTES: [u8; 65] = [ - 0x4, // The X coordinate of the generator - 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac, 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07, 0x02, 0x9b, 0xfc, - 0xdb, 0x2d, 0xce, 0x28, 0xd9, 0x59, 0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98, - // The Y coordinate of the generator - 0x48, 0x3a, 0xda, 0x77, 0x26, 0xa3, 0xc4, 0x65, 0x5d, 0xa4, 0xfb, 0xfc, 0x0e, 0x11, 0x08, 0xa8, 0xfd, 0x17, 0xb4, - 0x48, 0xa6, 0x85, 0x54, 0x19, 0x9c, 0x47, 0xd0, 0x8f, 0xfb, 0x10, 0xd4, 0xb8, -]; - -lazy_static! { - pub static ref CURVE_ORDER: U256 = H256::from_slice(&SECP256K1_CURVE_ORDER).into_uint(); -} - -/// In-place multiply public key by secret key (EC point * scalar) -pub fn public_mul_secret(public: &mut Public, secret: &Secret) -> Result<(), Error> { - let key_secret = secret.to_secp256k1_secret()?; - let mut key_public = to_secp256k1_public(public)?; - key_public.mul_assign(&SECP256K1, &key_secret[..])?; - set_public(public, &key_public); - Ok(()) -} - -/// In-place add one public key to another (EC point + EC point) -pub fn public_add(public: &mut Public, other: &Public) -> Result<(), Error> { - let key_public = to_secp256k1_public(public)?; - let other_public = to_secp256k1_public(other)?; - let key_public = key_public.combine(&other_public)?; - set_public(public, &key_public); - Ok(()) -} - -/// In-place sub one public key from another (EC point - EC point) -pub fn public_sub(public: &mut Public, other: &Public) -> Result<(), Error> { - let mut key_neg_other = to_secp256k1_public(other)?; - key_neg_other.mul_assign(&SECP256K1, super::MINUS_ONE_KEY)?; - - let mut key_public = to_secp256k1_public(public)?; - key_public = key_public.combine(&key_neg_other)?; - set_public(public, &key_public); - Ok(()) -} - -/// Replace a public key with its additive inverse (EC point = - EC point) -pub fn public_negate(public: &mut Public) -> Result<(), Error> { - let mut key_public = to_secp256k1_public(public)?; - key_public.mul_assign(&SECP256K1, super::MINUS_ONE_KEY)?; - set_public(public, &key_public); - Ok(()) -} - -/// Return the generation point (aka base point) of secp256k1 -pub fn generation_point() -> Public { - let public_key = key::PublicKey::from_slice(&BASE_POINT_BYTES).expect("constructed using constants; qed"); - let mut public = Public::default(); - set_public(&mut public, &public_key); - public -} - -fn to_secp256k1_public(public: &Public) -> Result { - let public_data = { - let mut temp = [4u8; 65]; - (&mut temp[1..65]).copy_from_slice(&public[0..64]); - temp - }; - - Ok(key::PublicKey::from_slice(&public_data)?) -} - -fn set_public(public: &mut Public, key_public: &key::PublicKey) { - let key_public_serialized = key_public.serialize_uncompressed(); - public.as_bytes_mut().copy_from_slice(&key_public_serialized[1..65]); -} - -#[cfg(test)] -mod tests { - use super::super::{Generator, Random, Secret}; - use super::{generation_point, public_add, public_mul_secret, public_negate, public_sub}; - - #[test] - fn public_addition_is_commutative() { - let public1 = Random.generate().public().clone(); - let public2 = Random.generate().public().clone(); - - let mut left = public1.clone(); - public_add(&mut left, &public2).unwrap(); - - let mut right = public2.clone(); - public_add(&mut right, &public1).unwrap(); - - assert_eq!(left, right); - } - - #[test] - fn public_addition_is_reversible_with_subtraction() { - let public1 = Random.generate().public().clone(); - let public2 = Random.generate().public().clone(); - - let mut sum = public1.clone(); - public_add(&mut sum, &public2).unwrap(); - public_sub(&mut sum, &public2).unwrap(); - - assert_eq!(sum, public1); - } - - #[test] - fn public_negation_is_involutory() { - let public = Random.generate().public().clone(); - let mut negation = public.clone(); - public_negate(&mut negation).unwrap(); - public_negate(&mut negation).unwrap(); - - assert_eq!(negation, public); - } - - #[test] - fn generation_point_expected() { - let point = generation_point(); - // Check the returned value equal to uncompressed form for sec2561k1 - assert_eq!(format!("{:x}", point), "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"); - } - - #[test] - fn public_multiplication_verification() { - let secret = - Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let mut public = generation_point(); - public_mul_secret(&mut public, &secret).unwrap(); - assert_eq!(format!("{:x}", public), "8ce0db0b0359ffc5866ba61903cc2518c3675ef2cf380a7e54bde7ea20e6fa1ab45b7617346cd11b7610001ee6ae5b0155c41cad9527cbcdff44ec67848943a4"); - } -} diff --git a/parity-crypto/src/publickey/ecdh.rs b/parity-crypto/src/publickey/ecdh.rs deleted file mode 100644 index a44eaabd3..000000000 --- a/parity-crypto/src/publickey/ecdh.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! ECDH key agreement scheme implemented as a free function. - -use super::{Error, Public, Secret}; -use secp256k1::{self, ecdh, key}; - -/// Agree on a shared secret -pub fn agree(secret: &Secret, public: &Public) -> Result { - let pdata = { - let mut temp = [4u8; 65]; - (&mut temp[1..65]).copy_from_slice(&public[0..64]); - temp - }; - - let publ = key::PublicKey::from_slice(&pdata)?; - let sec = key::SecretKey::from_slice(secret.as_bytes())?; - let shared = ecdh::SharedSecret::new_with_hash(&publ, &sec, |x, _| x.into()); - - Secret::import_key(&shared[0..32]).map_err(|_| Error::Secp(secp256k1::Error::InvalidSecretKey)) -} - -#[cfg(test)] -mod tests { - use super::{agree, Public, Secret}; - use std::str::FromStr; - - #[test] - fn test_agree() { - // Just some random values for secret/public to check we agree with previous implementation. - let secret = - Secret::copy_from_str(&"01a400760945613ff6a46383b250bf27493bfe679f05274916182776f09b28f1").unwrap(); - let public= Public::from_str("e37f3cbb0d0601dc930b8d8aa56910dd5629f2a0979cc742418960573efc5c0ff96bc87f104337d8c6ab37e597d4f9ffbd57302bc98a825519f691b378ce13f5").unwrap(); - let shared = agree(&secret, &public); - - assert!(shared.is_ok()); - assert_eq!(shared.unwrap().to_hex(), "28ab6fad6afd854ff27162e0006c3f6bd2daafc0816c85b5dfb05dbb865fa6ac",); - } -} diff --git a/parity-crypto/src/publickey/ecdsa_signature.rs b/parity-crypto/src/publickey/ecdsa_signature.rs deleted file mode 100644 index b7d924c6c..000000000 --- a/parity-crypto/src/publickey/ecdsa_signature.rs +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Signature based on ECDSA, algorithm's description: https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm - -use super::{public_to_address, Address, Error, Message, Public, Secret}; -use ethereum_types::{H256, H520}; -use rustc_hex::{FromHex, ToHex}; -use secp256k1::{ - key::{PublicKey, SecretKey}, - recovery::{RecoverableSignature, RecoveryId}, - Error as SecpError, Message as SecpMessage, SECP256K1, -}; -use std::{ - cmp::PartialEq, - fmt, - hash::{Hash, Hasher}, - ops::{Deref, DerefMut}, - str::FromStr, -}; - -/// Signature encoded as RSV components -#[repr(C)] -pub struct Signature([u8; 65]); - -impl Signature { - /// Get a slice into the 'r' portion of the data. - pub fn r(&self) -> &[u8] { - &self.0[0..32] - } - - /// Get a slice into the 's' portion of the data. - pub fn s(&self) -> &[u8] { - &self.0[32..64] - } - - /// Get the recovery byte. - pub fn v(&self) -> u8 { - self.0[64] - } - - /// Encode the signature into RSV array (V altered to be in "Electrum" notation). - pub fn into_electrum(mut self) -> [u8; 65] { - self.0[64] += 27; - self.0 - } - - /// Parse bytes as a signature encoded as RSV (V in "Electrum" notation). - /// May return empty (invalid) signature if given data has invalid length. - pub fn from_electrum(data: &[u8]) -> Self { - if data.len() != 65 || data[64] < 27 { - // fallback to empty (invalid) signature - return Signature::default(); - } - - let mut sig = [0u8; 65]; - sig.copy_from_slice(data); - sig[64] -= 27; - Signature(sig) - } - - /// Create a signature object from the RSV triple. - pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Self { - let mut sig = [0u8; 65]; - sig[0..32].copy_from_slice(r.as_ref()); - sig[32..64].copy_from_slice(s.as_ref()); - sig[64] = v; - Signature(sig) - } - - /// Check if this is a "low" signature (that s part of the signature is in range - /// 0x1 and 0x7FFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF 5D576E73 57A4501D DFE92F46 681B20A0 (inclusive)). - /// This condition may be required by some verification algorithms - pub fn is_low_s(&self) -> bool { - const LOW_SIG_THRESHOLD: H256 = H256([ - 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x5D, 0x57, - 0x6E, 0x73, 0x57, 0xA4, 0x50, 0x1D, 0xDF, 0xE9, 0x2F, 0x46, 0x68, 0x1B, 0x20, 0xA0, - ]); - H256::from_slice(self.s()) <= LOW_SIG_THRESHOLD - } - - /// Check if each component of the signature is in valid range. - /// r is in range 0x1 and 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 (inclusive) - /// s is in range 0x1 and fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 (inclusive) - /// v is 0 or 1 - /// Group order for secp256k1 defined as 'n' in "Standards for Efficient Cryptography" (SEC2) 2.7.1; - /// used here as the upper bound for a valid (r, s, v) tuple - pub fn is_valid(&self) -> bool { - const UPPER_BOUND: H256 = H256([ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, - 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41, - ]); - const ONE: H256 = H256([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - ]); - let r = H256::from_slice(self.r()); - let s = H256::from_slice(self.s()); - self.v() <= 1 && r < UPPER_BOUND && r >= ONE && s < UPPER_BOUND && s >= ONE - } -} - -// manual implementation large arrays don't have trait impls by default. -// TODO[grbIzl] remove when integer generics exist -impl PartialEq for Signature { - fn eq(&self, other: &Self) -> bool { - &self.0[..] == &other.0[..] - } -} - -// manual implementation required in Rust 1.13+, see `std::cmp::AssertParamIsEq`. -impl Eq for Signature {} - -// also manual for the same reason, but the pretty printing might be useful. -impl fmt::Debug for Signature { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("Signature") - .field("r", &self.0[0..32].to_hex::()) - .field("s", &self.0[32..64].to_hex::()) - .field("v", &self.0[64..65].to_hex::()) - .finish() - } -} - -impl fmt::Display for Signature { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(f, "{}", self.to_hex::()) - } -} - -impl FromStr for Signature { - type Err = Error; - - fn from_str(s: &str) -> Result { - match s.from_hex::>() { - Ok(ref hex) if hex.len() == 65 => { - let mut data = [0; 65]; - data.copy_from_slice(&hex[0..65]); - Ok(Signature(data)) - } - _ => Err(Error::InvalidSignature), - } - } -} - -impl Default for Signature { - fn default() -> Self { - Signature([0; 65]) - } -} - -impl Hash for Signature { - fn hash(&self, state: &mut H) { - H520::from(self.0).hash(state); - } -} - -impl Clone for Signature { - fn clone(&self) -> Self { - Signature(self.0.clone()) - } -} - -impl From<[u8; 65]> for Signature { - fn from(s: [u8; 65]) -> Self { - Signature(s) - } -} - -impl Into<[u8; 65]> for Signature { - fn into(self) -> [u8; 65] { - self.0 - } -} - -impl From for H520 { - fn from(s: Signature) -> Self { - H520::from(s.0) - } -} - -impl From for Signature { - fn from(bytes: H520) -> Self { - Signature(bytes.into()) - } -} - -impl Deref for Signature { - type Target = [u8; 65]; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for Signature { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -/// Signs message with the given secret key. -/// Returns the corresponding signature. -pub fn sign(secret: &Secret, message: &Message) -> Result { - let context = &SECP256K1; - let sec = SecretKey::from_slice(secret.as_ref())?; - let s = context.sign_recoverable(&SecpMessage::from_slice(&message[..])?, &sec); - let (rec_id, data) = s.serialize_compact(); - let mut data_arr = [0; 65]; - - // no need to check if s is low, it always is - data_arr[0..64].copy_from_slice(&data[0..64]); - data_arr[64] = rec_id.to_i32() as u8; - Ok(Signature(data_arr)) -} - -/// Performs verification of the signature for the given message with corresponding public key -pub fn verify_public(public: &Public, signature: &Signature, message: &Message) -> Result { - let context = &SECP256K1; - let rsig = RecoverableSignature::from_compact(&signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; - let sig = rsig.to_standard(); - - let pdata: [u8; 65] = { - let mut temp = [4u8; 65]; - temp[1..65].copy_from_slice(public.as_bytes()); - temp - }; - - let publ = PublicKey::from_slice(&pdata)?; - match context.verify(&SecpMessage::from_slice(&message[..])?, &sig, &publ) { - Ok(_) => Ok(true), - Err(SecpError::IncorrectSignature) => Ok(false), - Err(x) => Err(Error::from(x)), - } -} - -/// Checks if the address corresponds to the public key from the signature for the message -pub fn verify_address(address: &Address, signature: &Signature, message: &Message) -> Result { - let public = recover(signature, message)?; - let recovered_address = public_to_address(&public); - Ok(address == &recovered_address) -} - -/// Recovers the public key from the signature for the message -pub fn recover(signature: &Signature, message: &Message) -> Result { - let rsig = RecoverableSignature::from_compact(&signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?; - let pubkey = &SECP256K1.recover(&SecpMessage::from_slice(&message[..])?, &rsig)?; - let serialized = pubkey.serialize_uncompressed(); - let mut public = Public::default(); - public.as_bytes_mut().copy_from_slice(&serialized[1..65]); - Ok(public) -} - -#[cfg(test)] -mod tests { - use super::{ - super::{Generator, Message, Random}, - recover, sign, verify_address, verify_public, Signature, - }; - use std::str::FromStr; - - #[test] - fn vrs_conversion() { - // given - let keypair = Random.generate(); - let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); - let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message"); - - // when - let vrs = signature.clone().into_electrum(); - let from_vrs = Signature::from_electrum(&vrs); - - // then - assert_eq!(signature, from_vrs); - } - - #[test] - fn signature_to_and_from_str() { - let keypair = Random.generate(); - let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); - let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message"); - let string = format!("{}", signature); - let deserialized = Signature::from_str(&string).unwrap(); - assert_eq!(signature, deserialized); - } - - #[test] - fn sign_and_recover_public() { - let keypair = Random.generate(); - let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); - let signature = sign(keypair.secret(), &message).unwrap(); - assert_eq!(keypair.public(), &recover(&signature, &message).unwrap()); - } - - #[test] - fn sign_and_recover_public_works_with_zeroed_messages() { - let keypair = Random.generate(); - let signature = sign(keypair.secret(), &Message::zero()).unwrap(); - let zero_message = Message::zero(); - assert_eq!(keypair.public(), &recover(&signature, &zero_message).unwrap()); - } - - #[test] - fn recover_allowing_all_zero_message_can_recover_from_all_zero_messages() { - let keypair = Random.generate(); - let signature = sign(keypair.secret(), &Message::zero()).unwrap(); - let zero_message = Message::zero(); - assert_eq!(keypair.public(), &recover(&signature, &zero_message).unwrap()) - } - - #[test] - fn sign_and_verify_public() { - let keypair = Random.generate(); - let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); - let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message"); - assert!(verify_public(keypair.public(), &signature, &message).unwrap()); - } - - #[test] - fn sign_and_verify_address() { - let keypair = Random.generate(); - let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); - let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message"); - assert!(verify_address(&keypair.address(), &signature, &message).unwrap()); - } -} diff --git a/parity-crypto/src/publickey/ecies.rs b/parity-crypto/src/publickey/ecies.rs deleted file mode 100644 index 3332f8a94..000000000 --- a/parity-crypto/src/publickey/ecies.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Functions for ECIES scheme encryption and decryption - -use super::{ecdh, Error, Generator, Public, Random, Secret}; -use crate::{aes, digest, hmac, is_equal}; -use ethereum_types::H128; - -const ENC_VERSION: u8 = 0x04; - -/// Encrypt a message with a public key, writing an HMAC covering both -/// the plaintext and authenticated data. -/// -/// Authenticated data may be empty. -pub fn encrypt(public: &Public, auth_data: &[u8], plain: &[u8]) -> Result, Error> { - let r = Random.generate(); - let z = ecdh::agree(r.secret(), public)?; - let mut key = [0u8; 32]; - kdf(&z, &[0u8; 0], &mut key); - - let ekey = &key[0..16]; - let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32])); - - let mut msg = vec![0u8; 1 + 64 + 16 + plain.len() + 32]; - msg[0] = ENC_VERSION; - { - let result_msg = &mut msg[1..]; - result_msg[0..64].copy_from_slice(r.public().as_bytes()); - let iv = H128::random(); - result_msg[64..80].copy_from_slice(iv.as_bytes()); - { - let cipher = &mut result_msg[(64 + 16)..(64 + 16 + plain.len())]; - aes::encrypt_128_ctr(ekey, iv.as_bytes(), plain, cipher)?; - } - let mut hmac = hmac::Signer::with(&mkey); - { - let cipher_iv = &result_msg[64..(64 + 16 + plain.len())]; - hmac.update(cipher_iv); - } - hmac.update(auth_data); - let sig = hmac.sign(); - result_msg[(64 + 16 + plain.len())..].copy_from_slice(&sig); - } - Ok(msg) -} - -/// Decrypt a message with a secret key, checking HMAC for ciphertext -/// and authenticated data validity. -pub fn decrypt(secret: &Secret, auth_data: &[u8], encrypted: &[u8]) -> Result, Error> { - const META_LEN: usize = 1 + 64 + 16 + 32; - let enc_version = encrypted[0]; - if encrypted.len() < META_LEN || enc_version < 2 || enc_version > 4 { - return Err(Error::InvalidMessage); - } - - let e = &encrypted[1..]; - let p = Public::from_slice(&e[0..64]); - let z = ecdh::agree(secret, &p)?; - let mut key = [0u8; 32]; - kdf(&z, &[0u8; 0], &mut key); - - let ekey = &key[0..16]; - let mkey = hmac::SigKey::sha256(&digest::sha256(&key[16..32])); - - let cipher_text_len = encrypted.len() - META_LEN; - let cipher_with_iv = &e[64..(64 + 16 + cipher_text_len)]; - let cipher_iv = &cipher_with_iv[0..16]; - let cipher_no_iv = &cipher_with_iv[16..]; - let msg_mac = &e[(64 + 16 + cipher_text_len)..]; - - // Verify tag - let mut hmac = hmac::Signer::with(&mkey); - hmac.update(cipher_with_iv); - hmac.update(auth_data); - let mac = hmac.sign(); - - if !is_equal(&mac.as_ref()[..], msg_mac) { - return Err(Error::InvalidMessage); - } - - let mut msg = vec![0u8; cipher_text_len]; - aes::decrypt_128_ctr(ekey, cipher_iv, cipher_no_iv, &mut msg[..])?; - Ok(msg) -} - -fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) { - // SEC/ISO/Shoup specify counter size SHOULD be equivalent - // to size of hash output, however, it also notes that - // the 4 bytes is okay. NIST specifies 4 bytes. - let mut ctr = 1u32; - let mut written = 0usize; - while written < dest.len() { - let mut hasher = digest::Hasher::sha256(); - let ctrs = [(ctr >> 24) as u8, (ctr >> 16) as u8, (ctr >> 8) as u8, ctr as u8]; - hasher.update(&ctrs); - hasher.update(secret.as_bytes()); - hasher.update(s1); - let d = hasher.finish(); - &mut dest[written..(written + 32)].copy_from_slice(&d); - written += 32; - ctr += 1; - } -} - -#[cfg(test)] -mod tests { - use super::super::{ecies, Generator, Random}; - - #[test] - fn ecies_shared() { - let kp = Random.generate(); - let message = b"So many books, so little time"; - - let shared = b"shared"; - let wrong_shared = b"incorrect"; - let encrypted = ecies::encrypt(kp.public(), shared, message).unwrap(); - assert!(encrypted[..] != message[..]); - assert_eq!(encrypted[0], 0x04); - - assert!(ecies::decrypt(kp.secret(), wrong_shared, &encrypted).is_err()); - let decrypted = ecies::decrypt(kp.secret(), shared, &encrypted).unwrap(); - assert_eq!(decrypted[..message.len()], message[..]); - } -} diff --git a/parity-crypto/src/publickey/error.rs b/parity-crypto/src/publickey/error.rs deleted file mode 100644 index 5ea8ce391..000000000 --- a/parity-crypto/src/publickey/error.rs +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Module specific errors. - -use crate::error::SymmError; -use std::{error::Error as StdError, fmt, result}; - -/// Module specific errors -#[derive(Debug)] -pub enum Error { - /// secp256k1 enc error - Secp(secp256k1::Error), - /// Invalid secret key - InvalidSecretKey, - /// Invalid public key - InvalidPublicKey, - /// Invalid address - InvalidAddress, - /// Invalid EC signature - InvalidSignature, - /// Invalid AES message - InvalidMessage, - /// IO Error - Io(std::io::Error), - /// Symmetric encryption error - Symm(SymmError), - /// Custom - Custom(String), -} - -impl StdError for Error { - fn source(&self) -> Option<&(dyn StdError + 'static)> { - match self { - Error::Secp(secp_err) => Some(secp_err), - Error::Io(err) => Some(err), - Error::Symm(symm_err) => Some(symm_err), - _ => None, - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - match self { - Error::Secp(err) => write!(f, "secp error: {}", err), - Error::InvalidSecretKey => write!(f, "invalid secret key"), - Error::InvalidPublicKey => write!(f, "invalid public key"), - Error::InvalidAddress => write!(f, "invalid address"), - Error::InvalidSignature => write!(f, "invalid EC signature"), - Error::InvalidMessage => write!(f, "invalid AES message"), - Error::Io(err) => write!(f, "I/O error: {}", err), - Error::Symm(err) => write!(f, "symmetric encryption error: {}", err), - Error::Custom(err) => write!(f, "custom crypto error: {}", err), - } - } -} - -impl Into for Error { - fn into(self) -> String { - format!("{}", self) - } -} - -impl From for Error { - fn from(err: std::io::Error) -> Error { - Error::Io(err) - } -} - -impl From for Error { - fn from(err: SymmError) -> Error { - Error::Symm(err) - } -} - -impl From for Error { - fn from(e: secp256k1::Error) -> Error { - match e { - secp256k1::Error::InvalidMessage => Error::InvalidMessage, - secp256k1::Error::InvalidPublicKey => Error::InvalidPublicKey, - secp256k1::Error::InvalidSecretKey => Error::InvalidSecretKey, - _ => Error::InvalidSignature, - } - } -} diff --git a/parity-crypto/src/publickey/extended_keys.rs b/parity-crypto/src/publickey/extended_keys.rs deleted file mode 100644 index adc4f3862..000000000 --- a/parity-crypto/src/publickey/extended_keys.rs +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Secret, public keys extended with the entropy (aka chain code), that allows further key derivation -//! Each extended key has 2^31 normal child keys, and 2^31 hardened child keys. -//! Each of these child keys has an index. The normal child keys use indices 0 through 2^31 - 1. -//! The hardened child keys use indices 2^31 through 2^32 - 1. -//! See more details about derivation in https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki - -pub use self::derivation::Error as DerivationError; -use super::{Public, Secret}; -use ethereum_types::H256; -use zeroize::Zeroize; - -/// Represents label that can be stored as a part of key derivation -pub trait Label { - /// Length of the data that label occupies - fn len() -> usize; - - /// Store label data to the key derivation sequence - /// Must not use more than `len()` bytes from slice - fn store(&self, target: &mut [u8]); -} - -impl Label for u32 { - fn len() -> usize { - 4 - } - - fn store(&self, target: &mut [u8]) { - let bytes = self.to_be_bytes(); - target[0..4].copy_from_slice(&bytes); - } -} - -/// Key derivation over generic label `T` -pub enum Derivation { - /// Soft key derivation (allow proof of parent) - Soft(T), - /// Hard key derivation (does not allow proof of parent) - Hard(T), -} - -impl From for Derivation { - fn from(index: u32) -> Self { - // Type of the derived key is defined by it index - // See module's documentation for more details - if index < (2 << 30) { - Derivation::Soft(index) - } else { - Derivation::Hard(index) - } - } -} - -impl Label for H256 { - fn len() -> usize { - 32 - } - - fn store(&self, target: &mut [u8]) { - (&mut target[0..32]).copy_from_slice(self.as_bytes()); - } -} - -/// Extended secret key, allows deterministic derivation of subsequent keys. -pub struct ExtendedSecret { - secret: Secret, - chain_code: H256, -} - -impl ExtendedSecret { - /// New extended key from given secret and chain code. - pub fn with_code(secret: Secret, chain_code: H256) -> ExtendedSecret { - ExtendedSecret { secret, chain_code } - } - - /// New extended key from given secret with the random chain code. - pub fn new_random(secret: Secret) -> ExtendedSecret { - ExtendedSecret::with_code(secret, H256::random()) - } - - /// New extended key from given secret. - /// Chain code will be derived from the secret itself (deterministically). - pub fn new(secret: Secret) -> ExtendedSecret { - let chain_code = derivation::chain_code(*secret); - ExtendedSecret::with_code(secret, chain_code) - } - - /// Derive new private key - pub fn derive(&self, index: Derivation) -> ExtendedSecret - where - T: Label, - { - let (mut derived_key, next_chain_code) = derivation::private(*self.secret, self.chain_code, index); - - let new_derived_secret = Secret::from(derived_key.0); - - derived_key.0.zeroize(); - - ExtendedSecret::with_code(new_derived_secret, next_chain_code) - } - - /// Private key component of the extended key. - pub fn as_raw(&self) -> &Secret { - &self.secret - } -} - -/// Extended public key, allows deterministic derivation of subsequent keys. -pub struct ExtendedPublic { - public: Public, - chain_code: H256, -} - -impl ExtendedPublic { - /// New extended public key from known parent and chain code - pub fn new(public: Public, chain_code: H256) -> Self { - ExtendedPublic { public: public, chain_code: chain_code } - } - - /// Create new extended public key from known secret - pub fn from_secret(secret: &ExtendedSecret) -> Result { - Ok(ExtendedPublic::new(derivation::point(**secret.as_raw())?, secret.chain_code.clone())) - } - - /// Derive new public key - /// Operation is defined only for index belongs [0..2^31) - pub fn derive(&self, index: Derivation) -> Result - where - T: Label, - { - let (derived_key, next_chain_code) = derivation::public(self.public, self.chain_code, index)?; - Ok(ExtendedPublic::new(derived_key, next_chain_code)) - } - - pub fn public(&self) -> &Public { - &self.public - } -} - -pub struct ExtendedKeyPair { - secret: ExtendedSecret, - public: ExtendedPublic, -} - -impl ExtendedKeyPair { - pub fn new(secret: Secret) -> Self { - let extended_secret = ExtendedSecret::new(secret); - let extended_public = - ExtendedPublic::from_secret(&extended_secret).expect("Valid `Secret` always produces valid public; qed"); - ExtendedKeyPair { secret: extended_secret, public: extended_public } - } - - pub fn with_code(secret: Secret, public: Public, chain_code: H256) -> Self { - ExtendedKeyPair { - secret: ExtendedSecret::with_code(secret, chain_code.clone()), - public: ExtendedPublic::new(public, chain_code), - } - } - - pub fn with_secret(secret: Secret, chain_code: H256) -> Self { - let extended_secret = ExtendedSecret::with_code(secret, chain_code); - let extended_public = - ExtendedPublic::from_secret(&extended_secret).expect("Valid `Secret` always produces valid public; qed"); - ExtendedKeyPair { secret: extended_secret, public: extended_public } - } - - pub fn with_seed(seed: &[u8]) -> Result { - let (master_key, chain_code) = derivation::seed_pair(seed); - Ok(ExtendedKeyPair::with_secret( - Secret::import_key(master_key.as_bytes()).map_err(|_| DerivationError::InvalidSeed)?, - chain_code, - )) - } - - pub fn secret(&self) -> &ExtendedSecret { - &self.secret - } - - pub fn public(&self) -> &ExtendedPublic { - &self.public - } - - pub fn derive(&self, index: Derivation) -> Result - where - T: Label, - { - let derived = self.secret.derive(index); - - Ok(ExtendedKeyPair { public: ExtendedPublic::from_secret(&derived)?, secret: derived }) - } -} - -// Derivation functions for private and public keys -// Work is based on BIP0032 -// https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki -mod derivation { - use super::super::ec_math_utils::CURVE_ORDER; - use super::{Derivation, Label}; - use crate::{hmac, Keccak256}; - use ethereum_types::{BigEndianHash, H256, H512, U256, U512}; - use secp256k1::{ - key::{PublicKey, SecretKey}, - SECP256K1, - }; - use std::convert::TryInto; - - #[derive(Debug)] - pub enum Error { - InvalidHardenedUse, - InvalidPoint, - MissingIndex, - InvalidSeed, - } - - // Deterministic derivation of the key using secp256k1 elliptic curve. - // Derivation can be either hardened or not. - // For hardened derivation, pass u32 index at least 2^31 or custom Derivation::Hard(T) enum - // - // Can panic if passed `private_key` is not a valid secp256k1 private key - // (outside of (0..curve_order()]) field - pub fn private(private_key: H256, chain_code: H256, index: Derivation) -> (H256, H256) - where - T: Label, - { - match index { - Derivation::Soft(index) => private_soft(private_key, chain_code, index), - Derivation::Hard(index) => private_hard(private_key, chain_code, index), - } - } - - fn hmac_pair(data: &[u8], private_key: H256, chain_code: H256) -> (H256, H256) { - let private: U256 = private_key.into_uint(); - - // produces 512-bit derived hmac (I) - let skey = hmac::SigKey::sha512(chain_code.as_bytes()); - let i_512 = hmac::sign(&skey, &data[..]); - - // left most 256 bits are later added to original private key - let hmac_key: U256 = H256::from_slice(&i_512[0..32]).into_uint(); - // right most 256 bits are new chain code for later derivations - let next_chain_code = H256::from_slice(&i_512[32..64]); - - let child_key = BigEndianHash::from_uint(&private_add(hmac_key, private)); - (child_key, next_chain_code) - } - - // Can panic if passed `private_key` is not a valid secp256k1 private key - // (outside of (0..curve_order()]) field - fn private_soft(private_key: H256, chain_code: H256, index: T) -> (H256, H256) - where - T: Label, - { - let mut data = vec![0u8; 33 + T::len()]; - - let sec_private = - SecretKey::from_slice(private_key.as_bytes()).expect("Caller should provide valid private key"); - let sec_public = PublicKey::from_secret_key(&SECP256K1, &sec_private); - let public_serialized = sec_public.serialize(); - - // curve point (compressed public key) -- index - // 0.33 -- 33..end - data[0..33].copy_from_slice(&public_serialized); - index.store(&mut data[33..]); - - hmac_pair(&data, private_key, chain_code) - } - - // Deterministic derivation of the key using secp256k1 elliptic curve - // This is hardened derivation and does not allow to associate - // corresponding public keys of the original and derived private keys - fn private_hard(private_key: H256, chain_code: H256, index: T) -> (H256, H256) - where - T: Label, - { - let mut data: Vec = vec![0u8; 33 + T::len()]; - let private: U256 = private_key.into_uint(); - - // 0x00 (padding) -- private_key -- index - // 0 -- 1..33 -- 33..end - private.to_big_endian(&mut data[1..33]); - index.store(&mut data[33..(33 + T::len())]); - - hmac_pair(&data, private_key, chain_code) - } - - fn private_add(k1: U256, k2: U256) -> U256 { - let sum = U512::from(k1) + U512::from(k2); - modulo(sum, *CURVE_ORDER) - } - - // todo: surely can be optimized - fn modulo(u1: U512, u2: U256) -> U256 { - let m = u1 % U512::from(u2); - m.try_into().expect("U512 modulo U256 should fit into U256; qed") - } - - pub fn public(public_key: H512, chain_code: H256, derivation: Derivation) -> Result<(H512, H256), Error> - where - T: Label, - { - let index = match derivation { - Derivation::Soft(index) => index, - Derivation::Hard(_) => { - return Err(Error::InvalidHardenedUse); - } - }; - - let mut public_sec_raw = [0u8; 65]; - public_sec_raw[0] = 4; - public_sec_raw[1..65].copy_from_slice(public_key.as_bytes()); - let public_sec = PublicKey::from_slice(&public_sec_raw).map_err(|_| Error::InvalidPoint)?; - let public_serialized = public_sec.serialize(); - - let mut data = vec![0u8; 33 + T::len()]; - // curve point (compressed public key) -- index - // 0.33 -- 33..end - data[0..33].copy_from_slice(&public_serialized); - index.store(&mut data[33..(33 + T::len())]); - - // HMAC512SHA produces [derived private(256); new chain code(256)] - let skey = hmac::SigKey::sha512(chain_code.as_bytes()); - let i_512 = hmac::sign(&skey, &data[..]); - - let new_private = H256::from_slice(&i_512[0..32]); - let new_chain_code = H256::from_slice(&i_512[32..64]); - - // Generated private key can (extremely rarely) be out of secp256k1 key field - if *CURVE_ORDER <= new_private.into_uint() { - return Err(Error::MissingIndex); - } - let new_private_sec = SecretKey::from_slice(new_private.as_bytes()).expect( - "Private key belongs to the field [0..CURVE_ORDER) (checked above); So initializing can never fail; qed", - ); - let mut new_public = PublicKey::from_secret_key(&SECP256K1, &new_private_sec); - - // Adding two points on the elliptic curves (combining two public keys) - new_public = new_public.combine(&public_sec).expect("Addition of two valid points produce valid point"); - - let serialized = new_public.serialize_uncompressed(); - - Ok((H512::from_slice(&serialized[1..65]), new_chain_code)) - } - - fn sha3(slc: &[u8]) -> H256 { - slc.keccak256().into() - } - - pub fn chain_code(secret: H256) -> H256 { - // 10,000 rounds of sha3 - let mut running_sha3 = sha3(secret.as_bytes()); - for _ in 0..99999 { - running_sha3 = sha3(running_sha3.as_bytes()); - } - running_sha3 - } - - pub fn point(secret: H256) -> Result { - let sec = SecretKey::from_slice(secret.as_bytes()).map_err(|_| Error::InvalidPoint)?; - let public_sec = PublicKey::from_secret_key(&SECP256K1, &sec); - let serialized = public_sec.serialize_uncompressed(); - Ok(H512::from_slice(&serialized[1..65])) - } - - pub fn seed_pair(seed: &[u8]) -> (H256, H256) { - let skey = hmac::SigKey::sha512(b"Bitcoin seed"); - let i_512 = hmac::sign(&skey, seed); - - let master_key = H256::from_slice(&i_512[0..32]); - let chain_code = H256::from_slice(&i_512[32..64]); - - (master_key, chain_code) - } -} - -#[cfg(test)] -mod tests { - use super::super::Secret; - use super::{derivation, Derivation}; - use super::{ExtendedKeyPair, ExtendedPublic, ExtendedSecret}; - use ethereum_types::{H128, H256, H512}; - use std::str::FromStr; - - fn master_chain_basic() -> (H256, H256) { - let seed = - H128::from_str("000102030405060708090a0b0c0d0e0f").expect("Seed should be valid H128").as_bytes().to_vec(); - - derivation::seed_pair(&*seed) - } - - fn test_extended(f: F, test_private: H256) - where - F: Fn(ExtendedSecret) -> ExtendedSecret, - { - let (private_seed, chain_code) = master_chain_basic(); - let extended_secret = ExtendedSecret::with_code(Secret::from(private_seed.0), chain_code); - let derived = f(extended_secret); - assert_eq!(**derived.as_raw(), test_private); - } - - #[test] - fn smoky() { - let secret = - Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::zero()); - - // hardened - assert_eq!(&**extended_secret.as_raw(), &*secret); - assert_eq!( - **extended_secret.derive(2147483648.into()).as_raw(), - H256::from_str("0927453daed47839608e414a3738dfad10aed17c459bbd9ab53f89b026c834b6").unwrap(), - ); - assert_eq!( - **extended_secret.derive(2147483649.into()).as_raw(), - H256::from_str("44238b6a29c6dcbe9b401364141ba11e2198c289a5fed243a1c11af35c19dc0f").unwrap(), - ); - - // normal - assert_eq!( - **extended_secret.derive(0.into()).as_raw(), - H256::from_str("bf6a74e3f7b36fc4c96a1e12f31abc817f9f5904f5a8fc27713163d1f0b713f6").unwrap() - ); - assert_eq!( - **extended_secret.derive(1.into()).as_raw(), - H256::from_str("bd4fca9eb1f9c201e9448c1eecd66e302d68d4d313ce895b8c134f512205c1bc").unwrap() - ); - assert_eq!( - **extended_secret.derive(2.into()).as_raw(), - H256::from_str("86932b542d6cab4d9c65490c7ef502d89ecc0e2a5f4852157649e3251e2a3268").unwrap() - ); - - let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); - let derived_public = extended_public.derive(0.into()).expect("First derivation of public should succeed"); - assert_eq!( - *derived_public.public(), - H512::from_str("f7b3244c96688f92372bfd4def26dc4151529747bab9f188a4ad34e141d47bd66522ff048bc6f19a0a4429b04318b1a8796c000265b4fa200dae5f6dda92dd94").unwrap(), - ); - - let keypair = ExtendedKeyPair::with_secret( - Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(), - H256::from_low_u64_be(64), - ); - assert_eq!( - **keypair.derive(2147483648u32.into()).expect("Derivation of keypair should succeed").secret().as_raw(), - H256::from_str("edef54414c03196557cf73774bc97a645c9a1df2164ed34f0c2a78d1375a930c").unwrap(), - ); - } - - #[test] - fn h256_soft_match() { - let secret = - Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let derivation_secret = - H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); - - let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::zero()); - let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); - - let derived_secret0 = extended_secret.derive(Derivation::Soft(derivation_secret)); - let derived_public0 = extended_public - .derive(Derivation::Soft(derivation_secret)) - .expect("First derivation of public should succeed"); - - let public_from_secret0 = - ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created"); - - assert_eq!(public_from_secret0.public(), derived_public0.public()); - } - - #[test] - fn h256_hard() { - let secret = - Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let derivation_secret = - H256::from_str("51eaf04f9dbbc1417dc97e789edd0c37ecda88bac490434e367ea81b71b7b015").unwrap(); - let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::from_low_u64_be(1)); - - assert_eq!( - **extended_secret.derive(Derivation::Hard(derivation_secret)).as_raw(), - H256::from_str("2bc2d696fb744d77ff813b4a1ef0ad64e1e5188b622c54ba917acc5ebc7c5486").unwrap(), - ); - } - - #[test] - fn test_key_derivation() { - let secret = - Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let extended_secret = ExtendedSecret::with_code(secret.clone(), H256::from_low_u64_be(1)); - let extended_public = ExtendedPublic::from_secret(&extended_secret).expect("Extended public should be created"); - - let derived_secret0 = extended_secret.derive(0.into()); - let derived_public0 = extended_public.derive(0.into()).expect("First derivation of public should succeed"); - - let public_from_secret0 = - ExtendedPublic::from_secret(&derived_secret0).expect("Extended public should be created"); - - assert_eq!(public_from_secret0.public(), derived_public0.public()); - } - - #[test] - fn test_seeds() { - let seed = - H128::from_str("000102030405060708090a0b0c0d0e0f").expect("Seed should be valid H128").as_bytes().to_vec(); - - // private key from bitcoin test vector - // xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs - let test_private = H256::from_str("e8f32e723decf4051aefac8e2c93c9c5b214313817cdb01a1494b917c8436b35") - .expect("Private should be decoded ok"); - - let (private_seed, _) = derivation::seed_pair(&*seed); - - assert_eq!(private_seed, test_private); - } - - #[test] - fn test_vector_1() { - // xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7 - // H(0) - test_extended( - |secret| secret.derive(2147483648.into()), - H256::from_str("edb2e14f9ee77d26dd93b4ecede8d16ed408ce149b6cd80b0715a2d911a0afea") - .expect("Private should be decoded ok"), - ); - } - - #[test] - fn test_vector_2() { - // xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs - // H(0)/1 - test_extended( - |secret| secret.derive(2147483648.into()).derive(1.into()), - H256::from_str("3c6cb8d0f6a264c91ea8b5030fadaa8e538b020f0a387421a12de9319dc93368") - .expect("Private should be decoded ok"), - ); - } -} diff --git a/parity-crypto/src/publickey/keypair.rs b/parity-crypto/src/publickey/keypair.rs deleted file mode 100644 index dbbf637dc..000000000 --- a/parity-crypto/src/publickey/keypair.rs +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Key pair (public + secret) description. - -use super::{Address, Error, Public, Secret}; -use crate::Keccak256; -use secp256k1::{key, SECP256K1}; -use std::fmt; - -/// Convert public key into the address -pub fn public_to_address(public: &Public) -> Address { - let hash = public.keccak256(); - let mut result = Address::zero(); - result.as_bytes_mut().copy_from_slice(&hash[12..]); - result -} - -#[derive(Debug, Clone, PartialEq)] -/// secp256k1 key pair -pub struct KeyPair { - secret: Secret, - public: Public, -} - -impl fmt::Display for KeyPair { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - writeln!(f, "secret: {:x}", self.secret)?; - writeln!(f, "public: {:x}", self.public)?; - write!(f, "address: {:x}", self.address()) - } -} - -impl KeyPair { - /// Create a pair from secret key - pub fn from_secret(secret: Secret) -> Result { - let context = &SECP256K1; - let s: key::SecretKey = key::SecretKey::from_slice(&secret[..])?; - let pub_key = key::PublicKey::from_secret_key(context, &s); - let serialized = pub_key.serialize_uncompressed(); - - let mut public = Public::default(); - public.as_bytes_mut().copy_from_slice(&serialized[1..65]); - - let keypair = KeyPair { secret, public }; - - Ok(keypair) - } - - /// Create a pair from the slice, which imported and verified as secret key - pub fn from_secret_slice(slice: &[u8]) -> Result { - Self::from_secret(Secret::import_key(slice)?) - } - - /// Copies a pair from another one - #[inline(always)] - pub fn from_keypair(sec: key::SecretKey, publ: key::PublicKey) -> Self { - let serialized = publ.serialize_uncompressed(); - let secret = Secret::from(sec); - let mut public = Public::default(); - public.as_bytes_mut().copy_from_slice(&serialized[1..65]); - - KeyPair { secret, public } - } - - /// Returns secret part of the keypair - pub fn secret(&self) -> &Secret { - &self.secret - } - - /// Returns public part of the keypair - pub fn public(&self) -> &Public { - &self.public - } - - /// Returns public part of the keypair converted into Address - pub fn address(&self) -> Address { - public_to_address(&self.public) - } -} - -#[cfg(test)] -mod tests { - use super::{KeyPair, Secret}; - - #[test] - fn from_secret() { - let secret = - Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let _ = KeyPair::from_secret(secret).unwrap(); - } - - #[test] - fn keypair_display() { - let expected = -"secret: a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65 -public: 8ce0db0b0359ffc5866ba61903cc2518c3675ef2cf380a7e54bde7ea20e6fa1ab45b7617346cd11b7610001ee6ae5b0155c41cad9527cbcdff44ec67848943a4 -address: 5b073e9233944b5e729e46d618f0d8edf3d9c34a".to_owned(); - let secret = - Secret::copy_from_str(&"a100df7a048e50ed308ea696dc600215098141cb391e9527329df289f9383f65").unwrap(); - let kp = KeyPair::from_secret(secret).unwrap(); - assert_eq!(format!("{}", kp), expected); - } -} diff --git a/parity-crypto/src/publickey/keypair_generator.rs b/parity-crypto/src/publickey/keypair_generator.rs deleted file mode 100644 index 6ebef0985..000000000 --- a/parity-crypto/src/publickey/keypair_generator.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Random key pair generator. Relies on the secp256k1 C-library to generate random data. - -use super::{Generator, KeyPair}; -use secp256k1::SECP256K1; - -/// Randomly generates new keypair, instantiating the RNG each time. -pub struct Random; - -impl Generator for Random { - fn generate(&mut self) -> KeyPair { - let (sec, publ) = SECP256K1.generate_keypair(&mut secp256k1::rand::thread_rng()); - KeyPair::from_keypair(sec, publ) - } -} diff --git a/parity-crypto/src/publickey/mod.rs b/parity-crypto/src/publickey/mod.rs deleted file mode 100644 index c7981515c..000000000 --- a/parity-crypto/src/publickey/mod.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Submodule of crypto utils for working with public key crypto primitives -//! If you are looking for git history please refer to the `ethkey` crate in the `parity-ethereum` repository. - -mod ecdsa_signature; -mod extended_keys; -mod keypair; -mod keypair_generator; -mod secret_key; - -pub mod ec_math_utils; -pub mod ecdh; -pub mod ecies; -pub mod error; - -pub use self::{ - ecdsa_signature::{recover, sign, verify_address, verify_public, Signature}, - error::Error, - extended_keys::{Derivation, DerivationError, ExtendedKeyPair, ExtendedPublic, ExtendedSecret}, - keypair::{public_to_address, KeyPair}, - keypair_generator::Random, - secret_key::{Secret, ZeroizeSecretKey}, -}; - -use ethereum_types::H256; - -pub use ethereum_types::{Address, Public}; -pub type Message = H256; - -/// The number -1 encoded as a secret key -const MINUS_ONE_KEY: &'static [u8] = &[ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, - 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40, -]; - -/// Generates new keypair. -pub trait Generator { - /// Should be called to generate new keypair. - fn generate(&mut self) -> KeyPair; -} diff --git a/parity-crypto/src/publickey/secret_key.rs b/parity-crypto/src/publickey/secret_key.rs deleted file mode 100644 index 7f5692f1d..000000000 --- a/parity-crypto/src/publickey/secret_key.rs +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Secret key implementation. - -use std::fmt; -use std::ops::Deref; -use std::str::FromStr; - -use ethereum_types::H256; -use secp256k1::constants::SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE; -use secp256k1::key; -use zeroize::Zeroize; - -use crate::publickey::Error; - -/// Represents secret key -#[derive(Clone, PartialEq, Eq)] -pub struct Secret { - inner: Box, -} - -impl Drop for Secret { - fn drop(&mut self) { - self.inner.0.zeroize() - } -} - -impl fmt::LowerHex for Secret { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(fmt) - } -} - -impl fmt::Debug for Secret { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(fmt) - } -} - -impl fmt::Display for Secret { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "Secret: 0x{:x}{:x}..{:x}{:x}", self.inner[0], self.inner[1], self.inner[30], self.inner[31]) - } -} - -impl Secret { - /// Creates a `Secret` from the given slice, returning `None` if the slice length != 32. - /// Caller is responsible to zeroize input slice. - pub fn copy_from_slice(key: &[u8]) -> Option { - if key.len() != 32 { - return None; - } - let mut h = H256::zero(); - h.as_bytes_mut().copy_from_slice(&key[0..32]); - Some(Secret { inner: Box::new(h) }) - } - - /// Creates a `Secret` from the given `str` representation, - /// returning an error for hex big endian representation of - /// the secret. - /// Caller is responsible to zeroize input slice. - pub fn copy_from_str(s: &str) -> Result { - let h = H256::from_str(s).map_err(|e| Error::Custom(format!("{:?}", e)))?; - Ok(Secret { inner: Box::new(h) }) - } - - /// Creates zero key, which is invalid for crypto operations, but valid for math operation. - pub fn zero() -> Self { - Secret { inner: Box::new(H256::zero()) } - } - - /// Imports and validates the key. - /// Caller is responsible to zeroize input slice. - pub fn import_key(key: &[u8]) -> Result { - let secret = key::SecretKey::from_slice(key)?; - Ok(secret.into()) - } - - /// Checks validity of this key. - pub fn check_validity(&self) -> Result<(), Error> { - self.to_secp256k1_secret().map(|_| ()) - } - - /// Wrapper over hex conversion - pub fn to_hex(&self) -> String { - format!("{:x}", self.inner.deref()) - } - - /// Inplace add one secret key to another (scalar + scalar) - pub fn add(&mut self, other: &Secret) -> Result<(), Error> { - match (self.is_zero(), other.is_zero()) { - (true, true) | (false, true) => Ok(()), - (true, false) => { - *self = other.clone(); - Ok(()) - } - (false, false) => { - let mut key_secret = self.to_secp256k1_secret()?; - let other_secret = other.to_secp256k1_secret()?; - key_secret.add_assign(&other_secret[..])?; - *self = key_secret.into(); - ZeroizeSecretKey(other_secret).zeroize(); - - Ok(()) - } - } - } - - /// Inplace subtract one secret key from another (scalar - scalar) - pub fn sub(&mut self, other: &Secret) -> Result<(), Error> { - match (self.is_zero(), other.is_zero()) { - (true, true) | (false, true) => Ok(()), - (true, false) => { - *self = other.clone(); - self.neg() - } - (false, false) => { - let mut key_secret = self.to_secp256k1_secret()?; - let mut other_secret = other.to_secp256k1_secret()?; - other_secret.mul_assign(super::MINUS_ONE_KEY)?; - key_secret.add_assign(&other_secret[..])?; - - *self = key_secret.into(); - ZeroizeSecretKey(other_secret).zeroize(); - Ok(()) - } - } - } - - /// Inplace decrease secret key (scalar - 1) - pub fn dec(&mut self) -> Result<(), Error> { - match self.is_zero() { - true => { - *self = Self::copy_from_slice(&super::MINUS_ONE_KEY) - .expect("Constructing a secret key from a known-good constant works; qed."); - Ok(()) - } - false => { - let mut key_secret = self.to_secp256k1_secret()?; - key_secret.add_assign(super::MINUS_ONE_KEY)?; - - *self = key_secret.into(); - Ok(()) - } - } - } - - /// Inplace multiply one secret key to another (scalar * scalar) - pub fn mul(&mut self, other: &Secret) -> Result<(), Error> { - match (self.is_zero(), other.is_zero()) { - (true, true) | (true, false) => Ok(()), - (false, true) => { - *self = Self::zero(); - Ok(()) - } - (false, false) => { - let mut key_secret = self.to_secp256k1_secret()?; - let other_secret = other.to_secp256k1_secret()?; - key_secret.mul_assign(&other_secret[..])?; - - *self = key_secret.into(); - ZeroizeSecretKey(other_secret).zeroize(); - Ok(()) - } - } - } - - /// Inplace negate secret key (-scalar) - pub fn neg(&mut self) -> Result<(), Error> { - match self.is_zero() { - true => Ok(()), - false => { - let mut key_secret = self.to_secp256k1_secret()?; - key_secret.mul_assign(super::MINUS_ONE_KEY)?; - - *self = key_secret.into(); - Ok(()) - } - } - } - - /// Compute power of secret key inplace (secret ^ pow). - pub fn pow(&mut self, pow: usize) -> Result<(), Error> { - if self.is_zero() { - return Ok(()); - } - - match pow { - 0 => *self = key::ONE_KEY.into(), - 1 => (), - _ => { - let c = self.clone(); - for _ in 1..pow { - self.mul(&c)?; - } - } - } - - Ok(()) - } - - /// Create a `secp256k1::key::SecretKey` based on this secret. - /// Warning the resulting secret key need to be zeroized manually. - pub fn to_secp256k1_secret(&self) -> Result { - key::SecretKey::from_slice(&self[..]).map_err(Into::into) - } -} - -impl From<[u8; 32]> for Secret { - #[inline(always)] - fn from(mut k: [u8; 32]) -> Self { - let result = Secret { inner: Box::new(H256(k)) }; - k.zeroize(); - result - } -} - -impl From for Secret { - #[inline(always)] - fn from(mut s: H256) -> Self { - let result = s.0.into(); - s.0.zeroize(); - result - } -} - -impl From for Secret { - #[inline(always)] - fn from(key: key::SecretKey) -> Self { - let mut a = [0; SECP256K1_SECRET_KEY_SIZE]; - a.copy_from_slice(&key[0..SECP256K1_SECRET_KEY_SIZE]); - ZeroizeSecretKey(key).zeroize(); - a.into() - } -} - -impl Deref for Secret { - type Target = H256; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -/// A wrapper type around `SecretKey` to prevent leaking secret key data. This -/// type will properly zeroize the secret key to `ONE_KEY` in a way that will -/// not get optimized away by the compiler nor be prone to leaks that take -/// advantage of access reordering. -#[derive(Clone, Copy)] -pub struct ZeroizeSecretKey(pub secp256k1::SecretKey); - -impl Default for ZeroizeSecretKey { - fn default() -> Self { - ZeroizeSecretKey(secp256k1::key::ONE_KEY) - } -} - -impl std::ops::Deref for ZeroizeSecretKey { - type Target = secp256k1::SecretKey; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl zeroize::DefaultIsZeroes for ZeroizeSecretKey {} - -#[cfg(test)] -mod tests { - use super::super::{Generator, Random}; - use super::Secret; - - #[test] - fn secret_pow() { - let secret = Random.generate().secret().clone(); - - let mut pow0 = secret.clone(); - pow0.pow(0).unwrap(); - assert_eq!( - pow0, - Secret::copy_from_str(&"0000000000000000000000000000000000000000000000000000000000000001").unwrap() - ); - - let mut pow1 = secret.clone(); - pow1.pow(1).unwrap(); - assert_eq!(pow1, secret); - - let mut pow2 = secret.clone(); - pow2.pow(2).unwrap(); - let mut pow2_expected = secret.clone(); - pow2_expected.mul(&secret).unwrap(); - assert_eq!(pow2, pow2_expected); - - let mut pow3 = secret.clone(); - pow3.pow(3).unwrap(); - let mut pow3_expected = secret.clone(); - pow3_expected.mul(&secret).unwrap(); - pow3_expected.mul(&secret).unwrap(); - assert_eq!(pow3, pow3_expected); - } -} diff --git a/parity-crypto/src/scrypt.rs b/parity-crypto/src/scrypt.rs deleted file mode 100644 index 15b7e14e1..000000000 --- a/parity-crypto/src/scrypt.rs +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::{KEY_LENGTH, KEY_LENGTH_AES}; -use crate::error::ScryptError; -use scrypt::{scrypt, ScryptParams}; - -#[cfg(test)] -use std::io::Error; - -pub fn derive_key(pass: &[u8], salt: &[u8], n: u32, p: u32, r: u32) -> Result<(Vec, Vec), ScryptError> { - // sanity checks - let log_n = (32 - n.leading_zeros() - 1) as u8; - if log_n as u32 >= r * 16 { - return Err(ScryptError::InvalidN); - } - - if p as u64 > ((u32::max_value() as u64 - 1) * 32) / (128 * (r as u64)) { - return Err(ScryptError::InvalidP); - } - - let mut derived_key = vec![0u8; KEY_LENGTH]; - let scrypt_params = ScryptParams::new(log_n, r, p)?; - scrypt(pass, salt, &scrypt_params, &mut derived_key)?; - let derived_right_bits = &derived_key[0..KEY_LENGTH_AES]; - let derived_left_bits = &derived_key[KEY_LENGTH_AES..KEY_LENGTH]; - Ok((derived_right_bits.to_vec(), derived_left_bits.to_vec())) -} - -// test is build from previous crypto lib behaviour, values may be incorrect -// if previous crypto lib got a bug. -#[test] -pub fn test_derive() -> Result<(), Error> { - let pass = [109, 121, 112, 97, 115, 115, 10]; - let salt = [ - 109, 121, 115, 97, 108, 116, 115, 104, 111, 117, 108, 100, 102, 105, 108, 108, 115, 111, 109, 109, 101, 98, - 121, 116, 101, 108, 101, 110, 103, 116, 104, 10, - ]; - let r1 = [93, 134, 79, 68, 223, 27, 44, 174, 236, 184, 179, 203, 74, 139, 73, 66]; - let r2 = [2, 24, 239, 131, 172, 164, 18, 171, 132, 207, 22, 217, 150, 20, 203, 37]; - let l1 = [6, 90, 119, 45, 67, 2, 99, 151, 81, 88, 166, 210, 244, 19, 123, 208]; - let l2 = [253, 123, 132, 12, 188, 89, 196, 2, 107, 224, 239, 231, 135, 177, 125, 62]; - - let (l, r) = derive_key(&pass[..], &salt, 262, 1, 8).unwrap(); - assert!(l == r1); - assert!(r == l1); - let (l, r) = derive_key(&pass[..], &salt, 144, 4, 4).unwrap(); - assert!(l == r2); - assert!(r == l2); - Ok(()) -} From a9dbe62c7b5dad5f33ae09c8877f791c0647f4ec Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 30 Jul 2021 15:20:40 +0200 Subject: [PATCH 228/359] rlp: fix bool encoding/decoding (#572) * rlp: fix bool encoding/decoding * rlp: update the changelog --- rlp/CHANGELOG.md | 1 + rlp/src/impls.rs | 12 +++++++----- rlp/tests/tests.rs | 11 ++++++++++- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index afd84d95c..189ff85f0 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Fix rlp encoding/decoding for bool. [#572](https://github.com/paritytech/parity-common/pull/572) ## [0.5.0] - 2021-01-05 ### Breaking diff --git a/rlp/src/impls.rs b/rlp/src/impls.rs index 7aa013925..aeedb1134 100644 --- a/rlp/src/impls.rs +++ b/rlp/src/impls.rs @@ -48,17 +48,19 @@ impl Decodable for Box { impl Encodable for bool { fn rlp_append(&self, s: &mut RlpStream) { - s.encoder().encode_iter(once(if *self { 1u8 } else { 0 })); + let as_uint = u8::from(*self); + Encodable::rlp_append(&as_uint, s); } } impl Decodable for bool { fn decode(rlp: &Rlp) -> Result { - rlp.decoder().decode_value(|bytes| match bytes.len() { + let as_uint = ::decode(rlp)?; + match as_uint { 0 => Ok(false), - 1 => Ok(bytes[0] != 0), - _ => Err(DecoderError::RlpIsTooBig), - }) + 1 => Ok(true), + _ => Err(DecoderError::Custom("invalid boolean value")), + } } } diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index 9ba2e3592..ec5ed6221 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -619,10 +619,19 @@ fn test_rlp_is_int() { for b in 0xb8..0xc0 { let data: Vec = vec![b]; let rlp = Rlp::new(&data); - assert_eq!(rlp.is_int(), false); + assert!(!rlp.is_int()); } } +#[test] +fn test_bool_same_as_int() { + assert_eq!(rlp::encode(&false), rlp::encode(&0x00u8)); + assert_eq!(rlp::encode(&true), rlp::encode(&0x01u8)); + let two = rlp::encode(&0x02u8); + let invalid: Result = rlp::decode(&two); + invalid.unwrap_err(); +} + // test described in // // https://github.com/paritytech/parity-common/issues/49 From faad8b6f63799d00c2f54189a056b405140295d3 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 30 Jul 2021 15:39:23 +0200 Subject: [PATCH 229/359] update rustfmt config to match substrate (#570) * update the rustfmt to the substrate one * apply rustfmt * go back to 120 limit --- .github/workflows/ci.yml | 2 +- ethbloom/src/lib.rs | 8 +- ethereum-types/src/hash.rs | 7 +- fixed-hash/benches/cmp.rs | 3 +- fixed-hash/src/hash.rs | 5 +- keccak-hash/src/lib.rs | 8 +- kvdb-memorydb/src/lib.rs | 30 +++---- kvdb-rocksdb/benches/bench_read_perf.rs | 10 ++- kvdb-rocksdb/examples/memtest.rs | 9 +- kvdb-rocksdb/src/iter.rs | 3 +- kvdb-rocksdb/src/lib.rs | 86 +++++++++++++------- kvdb-rocksdb/src/stats.rs | 10 ++- kvdb/src/io_stats.rs | 18 ++-- kvdb/src/lib.rs | 3 +- parity-bytes/src/lib.rs | 4 +- parity-util-mem/derive/lib.rs | 4 +- parity-util-mem/src/allocators.rs | 40 ++++----- parity-util-mem/src/malloc_size.rs | 18 ++-- primitive-types/impls/rlp/src/lib.rs | 2 +- primitive-types/impls/serde/src/serialize.rs | 24 +++--- primitive-types/src/lib.rs | 8 +- rlp-derive/src/de.rs | 2 +- rlp-derive/src/en.rs | 7 +- rlp/src/impls.rs | 30 ++++--- rlp/src/lib.rs | 10 +-- rlp/src/rlpin.rs | 38 ++++----- rlp/src/stream.rs | 22 ++--- rlp/tests/tests.rs | 5 +- rustfmt.toml | 21 ++++- uint/tests/uint_tests.rs | 22 ++--- 30 files changed, 263 insertions(+), 196 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 84a12b31c..93a736514 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -154,7 +154,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: stable + toolchain: nightly override: true - run: rustup component add rustfmt - uses: actions-rs/cargo@v1 diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index aa8993aae..ecda1a332 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -6,7 +6,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! //! ``` //! use hex_literal::hex; //! use ethbloom::{Bloom, Input}; @@ -46,7 +45,6 @@ //! assert!(my_bloom.contains_input(Input::Raw(&topic))); //! assert_eq!(my_bloom, bloom); //! ``` -//! #![cfg_attr(not(feature = "std"), no_std)] @@ -80,7 +78,7 @@ impl_fixed_hash_codec!(Bloom, BLOOM_SIZE); /// Returns log2. fn log2(x: usize) -> u32 { if x <= 1 { - return 0; + return 0 } let n = x.leading_zeros(); @@ -106,7 +104,7 @@ impl<'a> From> for Hash<'a> { keccak256.update(raw); keccak256.finalize(&mut out); Hash::Owned(out) - } + }, Input::Hash(hash) => Hash::Ref(hash), } } @@ -248,7 +246,7 @@ impl<'a> BloomRef<'a> { let a = self.0[i]; let b = bloom_ref.0[i]; if (a & b) != b { - return false; + return false } } true diff --git a/ethereum-types/src/hash.rs b/ethereum-types/src/hash.rs index 596f0fede..bd459568c 100644 --- a/ethereum-types/src/hash.rs +++ b/ethereum-types/src/hash.rs @@ -46,8 +46,7 @@ impl_fixed_hash_serde!(H128, 16); #[cfg(feature = "codec")] impl_fixed_hash_codec!(H128, 16); -pub use primitive_types::H160; -pub use primitive_types::H256; +pub use primitive_types::{H160, H256}; construct_fixed_hash! { pub struct H264(33); } #[cfg(feature = "rlp")] @@ -136,7 +135,9 @@ mod tests { #[test] fn test_parse_0x() { - assert!("0x0000000000000000000000000000000000000000000000000000000000000000".parse::().is_ok()) + assert!("0x0000000000000000000000000000000000000000000000000000000000000000" + .parse::() + .is_ok()) } #[test] diff --git a/fixed-hash/benches/cmp.rs b/fixed-hash/benches/cmp.rs index fd8918006..62c71db00 100644 --- a/fixed-hash/benches/cmp.rs +++ b/fixed-hash/benches/cmp.rs @@ -8,8 +8,7 @@ //! Benchmarks for fixed-hash cmp implementation. -use criterion::{black_box, Criterion, ParameterizedBenchmark}; -use criterion::{criterion_group, criterion_main}; +use criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark}; use fixed_hash::construct_fixed_hash; diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index bc64993b5..3d0ca6990 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -595,7 +595,7 @@ macro_rules! impl_rustc_hex_for_fixed_hash { *byte = iter.next().ok_or(Self::Err::InvalidHexLength)??; } if iter.next().is_some() { - return Err(Self::Err::InvalidHexLength); + return Err(Self::Err::InvalidHexLength) } Ok(result) } @@ -778,7 +778,8 @@ macro_rules! impl_fixed_hash_conversions { ); let mut ret = $small_ty::zero(); - ret.as_bytes_mut().copy_from_slice(&value[(large_ty_size - small_ty_size)..large_ty_size]); + ret.as_bytes_mut() + .copy_from_slice(&value[(large_ty_size - small_ty_size)..large_ty_size]); ret } } diff --git a/keccak-hash/src/lib.rs b/keccak-hash/src/lib.rs index e01f6156d..a3c128e85 100644 --- a/keccak-hash/src/lib.rs +++ b/keccak-hash/src/lib.rs @@ -129,7 +129,7 @@ pub fn keccak_pipe(r: &mut dyn io::BufRead, w: &mut dyn io::Write) -> Result None, - Some(map) => { - map.iter().find(|&(ref k, _)| k.starts_with(prefix)).map(|(_, v)| v.to_vec().into_boxed_slice()) - } + Some(map) => map + .iter() + .find(|&(ref k, _)| k.starts_with(prefix)) + .map(|(_, v)| v.to_vec().into_boxed_slice()), } } @@ -57,17 +58,15 @@ impl KeyValueDB for InMemory { let ops = transaction.ops; for op in ops { match op { - DBOp::Insert { col, key, value } => { + DBOp::Insert { col, key, value } => if let Some(col) = columns.get_mut(&col) { col.insert(key.into_vec(), value); - } - } - DBOp::Delete { col, key } => { + }, + DBOp::Delete { col, key } => if let Some(col) = columns.get_mut(&col) { col.remove(&*key); - } - } - DBOp::DeletePrefix { col, prefix } => { + }, + DBOp::DeletePrefix { col, prefix } => if let Some(col) = columns.get_mut(&col) { use std::ops::Bound; if prefix.is_empty() { @@ -75,7 +74,9 @@ impl KeyValueDB for InMemory { } else { let start_range = Bound::Included(prefix.to_vec()); let keys: Vec<_> = if let Some(end_range) = kvdb::end_prefix(&prefix[..]) { - col.range((start_range, Bound::Excluded(end_range))).map(|(k, _)| k.clone()).collect() + col.range((start_range, Bound::Excluded(end_range))) + .map(|(k, _)| k.clone()) + .collect() } else { col.range((start_range, Bound::Unbounded)).map(|(k, _)| k.clone()).collect() }; @@ -83,8 +84,7 @@ impl KeyValueDB for InMemory { col.remove(&key[..]); } } - } - } + }, } } Ok(()) @@ -94,7 +94,9 @@ impl KeyValueDB for InMemory { match self.columns.read().get(&col) { Some(map) => Box::new( // TODO: worth optimizing at all? - map.clone().into_iter().map(|(k, v)| (k.into_boxed_slice(), v.into_boxed_slice())), + map.clone() + .into_iter() + .map(|(k, v)| (k.into_boxed_slice(), v.into_boxed_slice())), ), None => Box::new(None.into_iter()), } diff --git a/kvdb-rocksdb/benches/bench_read_perf.rs b/kvdb-rocksdb/benches/bench_read_perf.rs index 5a956020b..78e8b773f 100644 --- a/kvdb-rocksdb/benches/bench_read_perf.rs +++ b/kvdb-rocksdb/benches/bench_read_perf.rs @@ -20,8 +20,10 @@ const NEEDLES: usize = 10_000; const NEEDLES_TO_HAYSTACK_RATIO: usize = 100; -use std::io; -use std::time::{Duration, Instant}; +use std::{ + io, + time::{Duration, Instant}, +}; use alloc_counter::{count_alloc, AllocCounterSystem}; use criterion::{black_box, criterion_group, criterion_main, Criterion}; @@ -53,7 +55,9 @@ fn n_random_bytes(n: usize) -> Vec { let variability: i64 = rng.gen_range(0..(n / 5) as i64); let plus_or_minus: i64 = if variability % 2 == 0 { 1 } else { -1 }; let range = Uniform::from(0..u8::max_value()); - rng.sample_iter(&range).take((n as i64 + plus_or_minus * variability) as usize).collect() + rng.sample_iter(&range) + .take((n as i64 + plus_or_minus * variability) as usize) + .collect() } /// Writes `NEEDLES * NEEDLES_TO_HAYSTACK_RATIO` keys to the DB. Keys are random, 32 bytes long and diff --git a/kvdb-rocksdb/examples/memtest.rs b/kvdb-rocksdb/examples/memtest.rs index 54c031c5f..6c5c30bae 100644 --- a/kvdb-rocksdb/examples/memtest.rs +++ b/kvdb-rocksdb/examples/memtest.rs @@ -20,7 +20,10 @@ use ethereum_types::H256; use keccak_hash::keccak; use kvdb_rocksdb::{Database, DatabaseConfig}; -use std::sync::{atomic::AtomicBool, atomic::Ordering as AtomicOrdering, Arc}; +use std::sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, +}; use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; const COLUMN_COUNT: u32 = 100; @@ -67,7 +70,9 @@ fn proc_memory_usage() -> u64 { let self_pid = get_current_pid().ok(); let memory = if let Some(self_pid) = self_pid { if sys.refresh_process(self_pid) { - let proc = sys.get_process(self_pid).expect("Above refresh_process succeeds, this should be Some(), qed"); + let proc = sys + .get_process(self_pid) + .expect("Above refresh_process succeeds, this should be Some(), qed"); proc.memory() } else { 0 diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs index ba8c20e34..818099ca4 100644 --- a/kvdb-rocksdb/src/iter.rs +++ b/kvdb-rocksdb/src/iter.rs @@ -124,6 +124,7 @@ impl<'a> IterationHandler for &'a DBAndColumns { } fn iter_with_prefix(&self, col: u32, prefix: &[u8], read_opts: ReadOptions) -> Self::Iterator { - self.db.iterator_cf_opt(self.cf(col as usize), read_opts, IteratorMode::From(prefix, Direction::Forward)) + self.db + .iterator_cf_opt(self.cf(col as usize), read_opts, IteratorMode::From(prefix, Direction::Forward)) } } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index a21f8aed1..03a4eafe5 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -110,11 +110,11 @@ impl CompactionProfile { if file.read_exact(&mut buffer).is_ok() { // 0 means not rotational. if buffer == [48] { - return Self::ssd(); + return Self::ssd() } // 1 means rotational. if buffer == [49] { - return Self::hdd(); + return Self::hdd() } } } @@ -197,7 +197,9 @@ impl DatabaseConfig { /// Returns the total memory budget in bytes. pub fn memory_budget(&self) -> MiB { - (0..self.columns).map(|i| self.memory_budget.get(&i).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB).sum() + (0..self.columns) + .map(|i| self.memory_budget.get(&i).unwrap_or(&DB_DEFAULT_COLUMN_MEMORY_BUDGET_MB) * MB) + .sum() } /// Returns the memory budget of the specified column in bytes. @@ -261,7 +263,9 @@ impl MallocSizeOf for DBAndColumns { impl DBAndColumns { fn cf(&self, i: usize) -> &ColumnFamily { - self.db.cf_handle(&self.column_names[i]).expect("the specified column name is correct; qed") + self.db + .cf_handle(&self.column_names[i]) + .expect("the specified column name is correct; qed") } fn static_property_or_warn(&self, col: usize, prop: &str) -> usize { @@ -270,7 +274,7 @@ impl DBAndColumns { _ => { warn!("Cannot read expected static property of RocksDb database: {}", prop); 0 - } + }, } } } @@ -307,8 +311,9 @@ fn check_for_corruption>(path: P, res: result::Result bool { - err.as_ref().starts_with("Corruption:") - || err.as_ref().starts_with("Invalid argument: You have to open all column families") + err.as_ref().starts_with("Corruption:") || + err.as_ref() + .starts_with("Invalid argument: You have to open all column families") } /// Generate the options for RocksDB, based on the given `DatabaseConfig`. @@ -436,10 +441,10 @@ impl Database { .map_err(other_io_err)?; } Ok(db) - } + }, err => err, } - } + }, ok => ok, }; @@ -456,7 +461,7 @@ impl Database { .collect(); DB::open_cf_descriptors(&opts, path, cf_descriptors).map_err(other_io_err)? - } + }, Err(s) => return Err(other_io_err(s)), }) } @@ -477,7 +482,7 @@ impl Database { warn!("DB corrupted: {}, attempting repair", s); DB::repair(&opts, path).map_err(other_io_err)?; DB::open_cf_as_secondary(&opts, path, secondary_path, column_names).map_err(other_io_err)? - } + }, Err(s) => return Err(other_io_err(s)), }) } @@ -506,12 +511,12 @@ impl Database { DBOp::Insert { col: _, key, value } => { stats_total_bytes += key.len() + value.len(); batch.put_cf(cf, &key, &value); - } + }, DBOp::Delete { col: _, key } => { // We count deletes as writes. stats_total_bytes += key.len(); batch.delete_cf(cf, &key); - } + }, DBOp::DeletePrefix { col, prefix } => { let end_prefix = kvdb::end_prefix(&prefix[..]); let no_end = end_prefix.is_none(); @@ -528,13 +533,13 @@ impl Database { batch.delete_cf(cf, &key[..]); } } - } + }, }; } self.stats.tally_bytes_written(stats_total_bytes as u64); check_for_corruption(&self.path, cfs.db.write_opt(batch, &self.write_opts)) - } + }, None => Err(other_io_err("Database is closed")), } } @@ -544,7 +549,7 @@ impl Database { match *self.db.read() { Some(ref cfs) => { if cfs.column_names.get(col as usize).is_none() { - return Err(other_io_err("column index is out of bounds")); + return Err(other_io_err("column index is out of bounds")) } self.stats.tally_reads(1); let value = cfs @@ -556,11 +561,11 @@ impl Database { match value { Ok(Some(ref v)) => self.stats.tally_bytes_read((key.len() + v.len()) as u64), Ok(None) => self.stats.tally_bytes_read(key.len() as u64), - _ => {} + _ => {}, }; value - } + }, None => Ok(None), } } @@ -618,23 +623,23 @@ impl Database { Ok(_) => { // ignore errors let _ = fs::remove_dir_all(new_db); - } + }, Err(err) => { debug!("DB atomic swap failed: {}", err); match swap_nonatomic(new_db, &self.path) { Ok(_) => { // ignore errors let _ = fs::remove_dir_all(new_db); - } + }, Err(err) => { warn!("Failed to swap DB directories: {:?}", err); return Err(io::Error::new( io::ErrorKind::Other, "DB restoration failed: could not swap DB directories", - )); - } + )) + }, } - } + }, } // reopen the database and steal handles into self @@ -663,7 +668,7 @@ impl Database { Ok(estimate) => Ok(estimate.unwrap_or_default()), Err(err_string) => Err(other_io_err(err_string)), } - } + }, None => Ok(0), } } @@ -676,7 +681,7 @@ impl Database { db.drop_cf(&name).map_err(other_io_err)?; } Ok(()) - } + }, None => Ok(()), } } @@ -691,7 +696,7 @@ impl Database { let _ = db.create_cf(&name, &col_config).map_err(other_io_err)?; column_names.push(name); Ok(()) - } + }, None => Ok(()), } } @@ -863,7 +868,12 @@ mod tests { db.write(transaction)?; let config = DatabaseConfig { - secondary: TempfileBuilder::new().prefix("").tempdir()?.path().to_str().map(|s| s.to_string()), + secondary: TempfileBuilder::new() + .prefix("") + .tempdir()? + .path() + .to_str() + .map(|s| s.to_string()), ..DatabaseConfig::with_columns(1) }; let second_db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; @@ -878,7 +888,12 @@ mod tests { let db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; let config = DatabaseConfig { - secondary: TempfileBuilder::new().prefix("").tempdir()?.path().to_str().map(|s| s.to_string()), + secondary: TempfileBuilder::new() + .prefix("") + .tempdir()? + .path() + .to_str() + .map(|s| s.to_string()), ..DatabaseConfig::with_columns(1) }; let second_db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; @@ -1067,7 +1082,10 @@ rocksdb.db.get.micros P50 : 2.000000 P95 : 3.000000 P99 : 4.000000 P100 : 5.0000 cfg.compaction.initial_file_size = 102030; cfg.memory_budget = [(0, 30), (1, 300)].iter().cloned().collect(); - let db_path = TempfileBuilder::new().prefix("config_test").tempdir().expect("the OS can create tmp dirs"); + let db_path = TempfileBuilder::new() + .prefix("config_test") + .tempdir() + .expect("the OS can create tmp dirs"); let db = Database::open(&cfg, db_path.path().to_str().unwrap()).expect("can open a db"); let mut rocksdb_log = std::fs::File::open(format!("{}/LOG", db_path.path().to_str().unwrap())) .expect("rocksdb creates a LOG file"); @@ -1099,7 +1117,10 @@ rocksdb.db.get.micros P50 : 2.000000 P95 : 3.000000 P99 : 4.000000 P100 : 5.0000 let include_indexes = settings.matches("cache_index_and_filter_blocks: 1").collect::>().len(); assert_eq!(include_indexes, NUM_COLS); // Pin index/filters on L0 - let pins = settings.matches("pin_l0_filter_and_index_blocks_in_cache: 1").collect::>().len(); + let pins = settings + .matches("pin_l0_filter_and_index_blocks_in_cache: 1") + .collect::>() + .len(); assert_eq!(pins, NUM_COLS); // Check target file size, aka initial file size @@ -1113,7 +1134,10 @@ rocksdb.db.get.micros P50 : 2.000000 P95 : 3.000000 P99 : 4.000000 P100 : 5.0000 // All columns use Snappy assert_eq!(snappy_compression, NUM_COLS + 1); // …even for L7 - let snappy_bottommost = settings.matches("Options.bottommost_compression: Disabled").collect::>().len(); + let snappy_bottommost = settings + .matches("Options.bottommost_compression: Disabled") + .collect::>() + .len(); assert_eq!(snappy_bottommost, NUM_COLS + 1); // 7 levels diff --git a/kvdb-rocksdb/src/stats.rs b/kvdb-rocksdb/src/stats.rs index c028b1948..ca7c4888d 100644 --- a/kvdb-rocksdb/src/stats.rs +++ b/kvdb-rocksdb/src/stats.rs @@ -7,10 +7,12 @@ // except according to those terms. use parking_lot::RwLock; -use std::collections::HashMap; -use std::str::FromStr; -use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; -use std::time::Instant; +use std::{ + collections::HashMap, + str::FromStr, + sync::atomic::{AtomicU64, Ordering as AtomicOrdering}, + time::Instant, +}; #[derive(Default, Clone, Copy)] pub struct RawDbStats { diff --git a/kvdb/src/io_stats.rs b/kvdb/src/io_stats.rs index 2f10dc707..9c1a89f97 100644 --- a/kvdb/src/io_stats.rs +++ b/kvdb/src/io_stats.rs @@ -58,7 +58,7 @@ impl IoStats { /// Average batch (transaction) size (writes per transaction) pub fn avg_batch_size(&self) -> f64 { if self.writes == 0 { - return 0.0; + return 0.0 } self.transactions as f64 / self.writes as f64 } @@ -66,7 +66,7 @@ impl IoStats { /// Read operations per second. pub fn reads_per_sec(&self) -> f64 { if self.span.as_secs_f64() == 0.0 { - return 0.0; + return 0.0 } self.reads as f64 / self.span.as_secs_f64() @@ -74,7 +74,7 @@ impl IoStats { pub fn byte_reads_per_sec(&self) -> f64 { if self.span.as_secs_f64() == 0.0 { - return 0.0; + return 0.0 } self.bytes_read as f64 / self.span.as_secs_f64() @@ -83,7 +83,7 @@ impl IoStats { /// Write operations per second. pub fn writes_per_sec(&self) -> f64 { if self.span.as_secs_f64() == 0.0 { - return 0.0; + return 0.0 } self.writes as f64 / self.span.as_secs_f64() @@ -91,7 +91,7 @@ impl IoStats { pub fn byte_writes_per_sec(&self) -> f64 { if self.span.as_secs_f64() == 0.0 { - return 0.0; + return 0.0 } self.bytes_written as f64 / self.span.as_secs_f64() @@ -100,7 +100,7 @@ impl IoStats { /// Total number of operations per second. pub fn ops_per_sec(&self) -> f64 { if self.span.as_secs_f64() == 0.0 { - return 0.0; + return 0.0 } (self.writes as f64 + self.reads as f64) / self.span.as_secs_f64() @@ -109,7 +109,7 @@ impl IoStats { /// Transactions per second. pub fn transactions_per_sec(&self) -> f64 { if self.span.as_secs_f64() == 0.0 { - return 0.0; + return 0.0 } (self.transactions as f64) / self.span.as_secs_f64() @@ -117,7 +117,7 @@ impl IoStats { pub fn avg_transaction_size(&self) -> f64 { if self.transactions == 0 { - return 0.0; + return 0.0 } self.bytes_written as f64 / self.transactions as f64 @@ -125,7 +125,7 @@ impl IoStats { pub fn cache_hit_ratio(&self) -> f64 { if self.reads == 0 { - return 0.0; + return 0.0 } self.cache_reads as f64 / self.reads as f64 diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index f4d553583..7693439b9 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -71,7 +71,8 @@ impl DBTransaction { /// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write. pub fn put(&mut self, col: u32, key: &[u8], value: &[u8]) { - self.ops.push(DBOp::Insert { col, key: DBKey::from_slice(key), value: value.to_vec() }) + self.ops + .push(DBOp::Insert { col, key: DBKey::from_slice(key), value: value.to_vec() }) } /// Insert a key-value pair in the transaction. Any existing value will be overwritten upon write. diff --git a/parity-bytes/src/lib.rs b/parity-bytes/src/lib.rs index e269776d4..9314d854c 100644 --- a/parity-bytes/src/lib.rs +++ b/parity-bytes/src/lib.rs @@ -83,12 +83,12 @@ impl<'a> BytesRef<'a> { data.resize(offset, 0); data.extend_from_slice(input); wrote - } + }, BytesRef::Fixed(ref mut data) if offset < data.len() => { let max = min(data.len() - offset, input.len()); data[offset..(max + offset)].copy_from_slice(&input[..max]); max - } + }, _ => 0, } } diff --git a/parity-util-mem/derive/lib.rs b/parity-util-mem/derive/lib.rs index 2331b0aaa..445e367a6 100644 --- a/parity-util-mem/derive/lib.rs +++ b/parity-util-mem/derive/lib.rs @@ -55,7 +55,9 @@ fn malloc_size_of_derive(s: synstructure::Structure) -> proc_macro2::TokenStream let mut where_clause = where_clause.unwrap_or(&parse_quote!(where)).clone(); for param in ast.generics.type_params() { let ident = ¶m.ident; - where_clause.predicates.push(parse_quote!(#ident: parity_util_mem::MallocSizeOf)); + where_clause + .predicates + .push(parse_quote!(#ident: parity_util_mem::MallocSizeOf)); } let tokens = quote! { diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs index 1eadf701b..ba2fbffde 100644 --- a/parity-util-mem/src/allocators.rs +++ b/parity-util-mem/src/allocators.rs @@ -9,29 +9,29 @@ //! default allocator management //! Features are: //! - windows: -//! - no features: default implementation from servo `heapsize` crate -//! - weealloc: default to `estimate_size` -//! - dlmalloc: default to `estimate_size` -//! - jemalloc: default windows allocator is used instead -//! - mimalloc: use mimallocator crate +//! - no features: default implementation from servo `heapsize` crate +//! - weealloc: default to `estimate_size` +//! - dlmalloc: default to `estimate_size` +//! - jemalloc: default windows allocator is used instead +//! - mimalloc: use mimallocator crate //! - arch x86: -//! - no features: use default alloc -//! - jemalloc: use jemallocator crate -//! - weealloc: default to `estimate_size` -//! - dlmalloc: default to `estimate_size` -//! - mimalloc: use mimallocator crate +//! - no features: use default alloc +//! - jemalloc: use jemallocator crate +//! - weealloc: default to `estimate_size` +//! - dlmalloc: default to `estimate_size` +//! - mimalloc: use mimallocator crate //! - arch x86/macos: -//! - no features: use default alloc, requires using `estimate_size` -//! - jemalloc: use jemallocator crate -//! - weealloc: default to `estimate_size` -//! - dlmalloc: default to `estimate_size` -//! - mimalloc: use mimallocator crate +//! - no features: use default alloc, requires using `estimate_size` +//! - jemalloc: use jemallocator crate +//! - weealloc: default to `estimate_size` +//! - dlmalloc: default to `estimate_size` +//! - mimalloc: use mimallocator crate //! - arch wasm32: -//! - no features: default to `estimate_size` -//! - weealloc: default to `estimate_size` -//! - dlmalloc: default to `estimate_size` -//! - jemalloc: compile error -//! - mimalloc: compile error (until https://github.com/microsoft/mimalloc/pull/32 is merged) +//! - no features: default to `estimate_size` +//! - weealloc: default to `estimate_size` +//! - dlmalloc: default to `estimate_size` +//! - jemalloc: compile error +//! - mimalloc: compile error (until https://github.com/microsoft/mimalloc/pull/32 is merged) #[cfg(feature = "std")] use crate::malloc_size::MallocUnconditionalSizeOf; diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs index 2e8a73eb7..907726674 100644 --- a/parity-util-mem/src/malloc_size.rs +++ b/parity-util-mem/src/malloc_size.rs @@ -70,10 +70,11 @@ pub use alloc::boxed::Box; use core::ffi::c_void; #[cfg(feature = "std")] use rstd::hash::Hash; -use rstd::marker::PhantomData; -use rstd::mem::size_of; -use rstd::ops::Range; -use rstd::ops::{Deref, DerefMut}; +use rstd::{ + marker::PhantomData, + mem::size_of, + ops::{Deref, DerefMut, Range}, +}; #[cfg(feature = "std")] use std::hash::BuildHasher; #[cfg(feature = "std")] @@ -125,7 +126,7 @@ impl MallocSizeOfOps { // larger than the required alignment, but small enough that it is // always in the first page of memory and therefore not a legitimate // address. - return ptr as *const usize as usize <= 256; + return ptr as *const usize as usize <= 256 } /// Call `size_of_op` on `ptr`, first checking that the allocation isn't @@ -535,8 +536,8 @@ where // trait bounds are ever allowed, this code should be uncommented. // (We do have a compile-fail test for this: // rc_arc_must_not_derive_malloc_size_of.rs) -//impl !MallocSizeOf for Arc { } -//impl !MallocShallowSizeOf for Arc { } +// impl !MallocSizeOf for Arc { } +// impl !MallocShallowSizeOf for Arc { } #[cfg(feature = "std")] fn arc_ptr(s: &Arc) -> *const T { @@ -797,8 +798,7 @@ malloc_size_of_is_0!(std::time::Duration); mod tests { use crate::{allocators::new_malloc_size_ops, MallocSizeOf, MallocSizeOfOps}; use smallvec::SmallVec; - use std::collections::BTreeSet; - use std::mem; + use std::{collections::BTreeSet, mem}; impl_smallvec!(3); #[test] diff --git a/primitive-types/impls/rlp/src/lib.rs b/primitive-types/impls/rlp/src/lib.rs index e542c6e6e..71382a303 100644 --- a/primitive-types/impls/rlp/src/lib.rs +++ b/primitive-types/impls/rlp/src/lib.rs @@ -64,7 +64,7 @@ macro_rules! impl_fixed_hash_rlp { let mut t = [0u8; $size]; t.copy_from_slice(bytes); Ok($name(t)) - } + }, }) } } diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index 90e42e2a6..4f18afc92 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -6,10 +6,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use alloc::string::String; -use alloc::vec::Vec; -use core::fmt; -use core::result::Result; +use alloc::{string::String, vec::Vec}; +use core::{fmt, result::Result}; use serde::{de, Deserializer, Serializer}; static CHARS: &[u8] = b"0123456789abcdef"; @@ -25,12 +23,12 @@ pub fn to_hex(bytes: &[u8], skip_leading_zero: bool) -> String { let non_zero = bytes.iter().take_while(|b| **b == 0).count(); let bytes = &bytes[non_zero..]; if bytes.is_empty() { - return "0x0".into(); + return "0x0".into() } else { bytes } } else if bytes.is_empty() { - return "0x".into(); + return "0x".into() } else { bytes }; @@ -96,7 +94,7 @@ impl fmt::Display for FromHexError { /// or non-hex characters are present. pub fn from_hex(v: &str) -> Result, FromHexError> { if !v.starts_with("0x") { - return Err(FromHexError::MissingPrefix); + return Err(FromHexError::MissingPrefix) } let mut bytes = vec![0u8; (v.len() - 1) / 2]; @@ -124,12 +122,12 @@ fn from_hex_raw<'a>(v: &str, bytes: &mut [u8]) -> Result { b'0'..=b'9' => buf |= byte - b'0', b' ' | b'\r' | b'\n' | b'\t' => { buf >>= 4; - continue; - } + continue + }, b => { let character = char::from(b); - return Err(FromHexError::InvalidHex { character, index }); - } + return Err(FromHexError::InvalidHex { character, index }) + }, } modulus += 1; @@ -244,7 +242,7 @@ where fn visit_str(self, v: &str) -> Result { if !v.starts_with("0x") { - return Err(E::custom(FromHexError::MissingPrefix)); + return Err(E::custom(FromHexError::MissingPrefix)) } let len = v.len(); @@ -254,7 +252,7 @@ where }; if !is_len_valid { - return Err(E::invalid_length(v.len() - 2, &self)); + return Err(E::invalid_length(v.len() - 2, &self)) } let bytes = match self.len { diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index 133b05c7e..b2f019802 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -161,7 +161,7 @@ impl TryFrom for U128 { fn try_from(value: U256) -> Result { let U256(ref arr) = value; if arr[2] | arr[3] != 0 { - return Err(Error::Overflow); + return Err(Error::Overflow) } let mut ret = [0; 2]; ret[0] = arr[0]; @@ -176,7 +176,7 @@ impl TryFrom for U256 { fn try_from(value: U512) -> Result { let U512(ref arr) = value; if arr[4] | arr[5] | arr[6] | arr[7] != 0 { - return Err(Error::Overflow); + return Err(Error::Overflow) } let mut ret = [0; 4]; ret[0] = arr[0]; @@ -193,7 +193,7 @@ impl TryFrom for U128 { fn try_from(value: U512) -> Result { let U512(ref arr) = value; if arr[2] | arr[3] | arr[4] | arr[5] | arr[6] | arr[7] != 0 { - return Err(Error::Overflow); + return Err(Error::Overflow) } let mut ret = [0; 2]; ret[0] = arr[0]; @@ -240,7 +240,7 @@ impl<'a> TryFrom<&'a U512> for U256 { fn try_from(value: &'a U512) -> Result { let U512(ref arr) = *value; if arr[4] | arr[5] | arr[6] | arr[7] != 0 { - return Err(Error::Overflow); + return Err(Error::Overflow) } let mut ret = [0; 4]; ret[0] = arr[0]; diff --git a/rlp-derive/src/de.rs b/rlp-derive/src/de.rs index 730c39270..87f5e3a81 100644 --- a/rlp-derive/src/de.rs +++ b/rlp-derive/src/de.rs @@ -126,7 +126,7 @@ fn decodable_field( panic!("only 1 #[rlp(default)] attribute is allowed in a struct") } match attr.parse_args() { - Ok(proc_macro2::TokenTree::Ident(ident)) if ident == "default" => {} + Ok(proc_macro2::TokenTree::Ident(ident)) if ident == "default" => {}, _ => panic!("only #[rlp(default)] attribute is supported"), } *default_attribute_encountered = true; diff --git a/rlp-derive/src/en.rs b/rlp-derive/src/en.rs index 9c21bebd2..e33c530c7 100644 --- a/rlp-derive/src/en.rs +++ b/rlp-derive/src/en.rs @@ -16,7 +16,12 @@ pub fn impl_encodable(ast: &syn::DeriveInput) -> TokenStream { panic!("#[derive(RlpEncodable)] is only defined for structs."); }; - let stmts: Vec<_> = body.fields.iter().enumerate().map(|(i, field)| encodable_field(i, field)).collect(); + let stmts: Vec<_> = body + .fields + .iter() + .enumerate() + .map(|(i, field)| encodable_field(i, field)) + .collect(); let name = &ast.ident; let stmts_len = stmts.len(); diff --git a/rlp/src/impls.rs b/rlp/src/impls.rs index aeedb1134..750ed5c28 100644 --- a/rlp/src/impls.rs +++ b/rlp/src/impls.rs @@ -9,19 +9,23 @@ #[cfg(not(feature = "std"))] use alloc::{borrow::ToOwned, boxed::Box, string::String, vec::Vec}; use bytes::{Bytes, BytesMut}; -use core::iter::{empty, once}; -use core::{mem, str}; - -use crate::error::DecoderError; -use crate::rlpin::Rlp; -use crate::stream::RlpStream; -use crate::traits::{Decodable, Encodable}; +use core::{ + iter::{empty, once}, + mem, str, +}; + +use crate::{ + error::DecoderError, + rlpin::Rlp, + stream::RlpStream, + traits::{Decodable, Encodable}, +}; pub fn decode_usize(bytes: &[u8]) -> Result { match bytes.len() { l if l <= mem::size_of::() => { if bytes[0] == 0 { - return Err(DecoderError::RlpInvalidIndirection); + return Err(DecoderError::RlpInvalidIndirection) } let mut res = 0usize; for (i, byte) in bytes.iter().enumerate().take(l) { @@ -29,7 +33,7 @@ pub fn decode_usize(bytes: &[u8]) -> Result { res += (*byte as usize) << shift; } Ok(res) - } + }, _ => Err(DecoderError::RlpIsTooBig), } } @@ -114,11 +118,11 @@ where match *self { None => { s.begin_list(0); - } + }, Some(ref value) => { s.begin_list(1); s.append(value); - } + }, } } } @@ -178,7 +182,7 @@ macro_rules! impl_decodable_for_u { 0 | 1 => u8::decode(rlp).map(|v| v as $name), l if l <= mem::size_of::<$name>() => { if bytes[0] == 0 { - return Err(DecoderError::RlpInvalidIndirection); + return Err(DecoderError::RlpInvalidIndirection) } let mut res = 0 as $name; for (i, byte) in bytes.iter().enumerate().take(l) { @@ -186,7 +190,7 @@ macro_rules! impl_decodable_for_u { res += (*byte as $name) << shift; } Ok(res) - } + }, _ => Err(DecoderError::RlpIsTooBig), }) } diff --git a/rlp/src/lib.rs b/rlp/src/lib.rs index 135b3b75c..f8ed6a9de 100644 --- a/rlp/src/lib.rs +++ b/rlp/src/lib.rs @@ -10,23 +10,23 @@ //! //! Allows encoding, decoding, and view onto rlp-slice //! -//!# What should you use when? +//! # What should you use when? //! -//!### Use `encode` function when: +//! ### Use `encode` function when: //! * You want to encode something inline. //! * You do not work on big set of data. //! * You want to encode whole data structure at once. //! -//!### Use `decode` function when: +//! ### Use `decode` function when: //! * You want to decode something inline. //! * You do not work on big set of data. //! * You want to decode whole rlp at once. //! -//!### Use `RlpStream` when: +//! ### Use `RlpStream` when: //! * You want to encode something in portions. //! * You encode a big set of data. //! -//!### Use `Rlp` when: +//! ### Use `Rlp` when: //! * You need to handle data corruption errors. //! * You are working on input data. //! * You want to get view onto rlp-slice. diff --git a/rlp/src/rlpin.rs b/rlp/src/rlpin.rs index 5ffaa89b7..53b8731fc 100644 --- a/rlp/src/rlpin.rs +++ b/rlp/src/rlpin.rs @@ -8,14 +8,11 @@ #[cfg(not(feature = "std"))] use alloc::{string::String, vec::Vec}; -use core::cell::Cell; -use core::fmt; +use core::{cell::Cell, fmt}; use rustc_hex::ToHex; -use crate::error::DecoderError; -use crate::impls::decode_usize; -use crate::traits::Decodable; +use crate::{error::DecoderError, impls::decode_usize, traits::Decodable}; /// rlp offset #[derive(Copy, Clone, Debug)] @@ -58,11 +55,11 @@ fn calculate_payload_info(header_bytes: &[u8], len_of_len: usize) -> Result (), } if header_bytes.len() < header_len { - return Err(DecoderError::RlpIsTooShort); + return Err(DecoderError::RlpIsTooShort) } let value_len = decode_usize(&header_bytes[1..header_len])?; if value_len <= 55 { - return Err(DecoderError::RlpInvalidIndirection); + return Err(DecoderError::RlpInvalidIndirection) } Ok(PayloadInfo::new(header_len, value_len)) } @@ -121,7 +118,7 @@ impl<'a> fmt::Display for Rlp<'a> { } write!(f, "{}", self.at(len - 1).unwrap())?; write!(f, "]") - } + }, Err(err) => write!(f, "{:?}", err), } } @@ -170,7 +167,7 @@ impl<'a> Rlp<'a> { let c = self.iter().count(); self.count_cache.set(Some(c)); Ok(c) - } + }, } } else { Err(DecoderError::RlpExpectedToBeList) @@ -206,20 +203,19 @@ impl<'a> Rlp<'a> { 'a: 'view, { if !self.is_list() { - return Err(DecoderError::RlpExpectedToBeList); + return Err(DecoderError::RlpExpectedToBeList) } // move to cached position if its index is less or equal to // current search index, otherwise move to beginning of list let cache = self.offset_cache.get(); let (bytes, indexes_to_skip, bytes_consumed) = match cache { - Some(ref cache) if cache.index <= index => { - (Rlp::consume(self.bytes, cache.offset)?, index - cache.index, cache.offset) - } + Some(ref cache) if cache.index <= index => + (Rlp::consume(self.bytes, cache.offset)?, index - cache.index, cache.offset), _ => { let (bytes, consumed) = self.consume_list_payload()?; (bytes, index, consumed) - } + }, }; // skip up to x items @@ -252,7 +248,7 @@ impl<'a> Rlp<'a> { pub fn is_int(&self) -> bool { if self.is_null() { - return false; + return false } match self.bytes[0] { @@ -261,7 +257,7 @@ impl<'a> Rlp<'a> { b @ 0xb8..=0xbf => { let payload_idx = 1 + b as usize - 0xb7; payload_idx < self.bytes.len() && self.bytes[payload_idx] != 0 - } + }, _ => false, } } @@ -309,7 +305,7 @@ impl<'a> Rlp<'a> { fn consume_list_payload(&self) -> Result<(&'a [u8], usize), DecoderError> { let item = BasicDecoder::payload_info(self.bytes)?; if self.bytes.len() < (item.header_len + item.value_len) { - return Err(DecoderError::RlpIsTooShort); + return Err(DecoderError::RlpIsTooShort) } Ok((&self.bytes[item.header_len..item.header_len + item.value_len], item.header_len)) } @@ -406,24 +402,24 @@ impl<'a> BasicDecoder<'a> { } else if l <= 0xb7 { let last_index_of = 1 + l as usize - 0x80; if bytes.len() < last_index_of { - return Err(DecoderError::RlpInconsistentLengthAndData); + return Err(DecoderError::RlpInconsistentLengthAndData) } let d = &bytes[1..last_index_of]; if l == 0x81 && d[0] < 0x80 { - return Err(DecoderError::RlpInvalidIndirection); + return Err(DecoderError::RlpInvalidIndirection) } Ok(f(d)?) } else if l <= 0xbf { let len_of_len = l as usize - 0xb7; let begin_of_value = 1 as usize + len_of_len; if bytes.len() < begin_of_value { - return Err(DecoderError::RlpInconsistentLengthAndData); + return Err(DecoderError::RlpInconsistentLengthAndData) } let len = decode_usize(&bytes[1..begin_of_value])?; let last_index_of_value = begin_of_value.checked_add(len).ok_or(DecoderError::RlpInvalidLength)?; if bytes.len() < last_index_of_value { - return Err(DecoderError::RlpInconsistentLengthAndData); + return Err(DecoderError::RlpInconsistentLengthAndData) } Ok(f(&bytes[begin_of_value..last_index_of_value])?) } else { diff --git a/rlp/src/stream.rs b/rlp/src/stream.rs index 607dc9344..d1b7f9a8c 100644 --- a/rlp/src/stream.rs +++ b/rlp/src/stream.rs @@ -182,7 +182,7 @@ impl RlpStream { self.buffer.put_u8(0xc0u8); self.note_appended(1); self.finished_list = true; - } + }, _ => { // payload is longer than 1 byte only for lists > 55 bytes // by pushing always this 1 byte we may avoid unnecessary shift of data @@ -190,7 +190,7 @@ impl RlpStream { let position = self.total_written(); self.unfinished_lists.push(ListInfo::new(position, Some(len))); - } + }, } // return chainable self @@ -212,7 +212,7 @@ impl RlpStream { /// Appends raw (pre-serialised) RLP data. Checks for size overflow. pub fn append_raw_checked(&mut self, bytes: &[u8], item_count: usize, max_size: usize) -> bool { if self.estimate_size(bytes.len()) > max_size { - return false; + return false } self.append_raw(bytes, item_count); true @@ -297,7 +297,7 @@ impl RlpStream { /// Try to finish lists fn note_appended(&mut self, inserted_items: usize) { if self.unfinished_lists.is_empty() { - return; + return } let back = self.unfinished_lists.len() - 1; @@ -310,7 +310,7 @@ impl RlpStream { Some(ref max) => x.current == *max, _ => false, } - } + }, }; if should_finish { let x = self.unfinished_lists.pop().unwrap(); @@ -370,11 +370,11 @@ impl<'a> BasicEncoder<'a> { match len { 0..=55 => { self.buffer[self.start_pos + pos - 1] = 0xc0u8 + len as u8; - } + }, _ => { let inserted_bytes = self.insert_size(len, pos); self.buffer[self.start_pos + pos - 1] = 0xf7u8 + inserted_bytes; - } + }, }; } @@ -392,8 +392,8 @@ impl<'a> BasicEncoder<'a> { (lower, Some(upper)) if lower == upper => lower, _ => { let value = value.collect::>(); - return self.encode_iter(value); - } + return self.encode_iter(value) + }, }; match len { // just 0 @@ -409,7 +409,7 @@ impl<'a> BasicEncoder<'a> { self.buffer.put_u8(first); self.buffer.extend(value); } - } + }, // (prefix + length of length), followed by the length, followd by the string len => { self.buffer.put_u8(0); @@ -417,7 +417,7 @@ impl<'a> BasicEncoder<'a> { let inserted_bytes = self.insert_size(len, position); self.buffer[self.start_pos + position - 1] = 0xb7 + inserted_bytes; self.buffer.extend(value); - } + }, } } } diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index ec5ed6221..a5eface28 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -686,7 +686,10 @@ fn test_nested_list_roundtrip() { impl Encodable for Inner { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_unbounded_list().append(&self.0).append(&self.1).finalize_unbounded_list(); + s.begin_unbounded_list() + .append(&self.0) + .append(&self.1) + .finalize_unbounded_list(); } } diff --git a/rustfmt.toml b/rustfmt.toml index c699603f5..4c2c9e8d8 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,4 +1,23 @@ +# https://github.com/paritytech/substrate/blob/master/rustfmt.toml +# Basic hard_tabs = true max_width = 120 use_small_heuristics = "Max" -edition = "2018" +# Imports +imports_granularity = "Crate" +reorder_imports = true +# Consistency +newline_style = "Unix" +normalize_comments = true +normalize_doc_attributes = true +# Misc +chain_width = 80 +spaces_around_ranges = false +binop_separator = "Back" +reorder_impl_items = false +match_arm_leading_pipes = "Preserve" +match_arm_blocks = false +match_block_trailing_comma = true +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 57ad732cd..7c9159658 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -6,9 +6,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use core::convert::TryInto; -use core::str::FromStr; -use core::u64::MAX; +use core::{convert::TryInto, str::FromStr, u64::MAX}; use crunchy::unroll; use uint::{construct_uint, overflowing, FromDecStrErr}; @@ -46,7 +44,7 @@ fn const_matching_works() { const ONE: U256 = U256([1, 0, 0, 0]); match U256::zero() { ONE => unreachable!(), - _ => {} + _ => {}, } } @@ -446,9 +444,11 @@ fn uint512_mul() { #[test] fn uint256_mul_overflow() { assert_eq!( - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap().overflowing_mul( - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - ), + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + .unwrap() + .overflowing_mul( + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() + ), (U256::from_str("1").unwrap(), true) ); } @@ -457,8 +457,8 @@ fn uint256_mul_overflow() { #[should_panic] #[allow(unused_must_use)] fn uint256_mul_overflow_panic() { - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - * U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(); + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() * + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(); } #[test] @@ -499,8 +499,8 @@ fn uint256_shl_words() { #[test] fn uint256_mul() { assert_eq!( - U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() - * U256::from_str("2").unwrap(), + U256::from_str("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap() * + U256::from_str("2").unwrap(), U256::from_str("fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe").unwrap() ); } From bd6bb46ff5da1f17502efcd28e29e4933b02926e Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Fri, 30 Jul 2021 15:39:37 +0200 Subject: [PATCH 230/359] bump rocksdb to 6.20.3 (#573) --- kvdb-rocksdb/CHANGELOG.md | 1 + kvdb-rocksdb/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 495612f0b..225809e84 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Bumped `rocksdb` to 0.17. [#573](https://github.com/paritytech/parity-common/pull/573) ### Breaking - Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index e14608731..a500771c1 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -19,7 +19,7 @@ log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.11.1" regex = "1.3.1" -rocksdb = { version = "0.16", features = ["snappy"], default-features = false } +rocksdb = { version = "0.17", features = ["snappy"], default-features = false } owning_ref = "0.4.0" parity-util-mem = { path = "../parity-util-mem", version = "0.10", default-features = false, features = ["std", "smallvec"] } From 916a3820f97a52d2fecd0b644bdf2546e801858b Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Sat, 31 Jul 2021 12:48:39 +0200 Subject: [PATCH 231/359] release rlp 0.5.1 and kvdb-rocksdb 0.12.1 (#575) * rlp: release 0.5.1 * kvdb-rocksdb: release 0.12.1 --- kvdb-rocksdb/CHANGELOG.md | 3 +++ kvdb-rocksdb/Cargo.toml | 2 +- rlp/CHANGELOG.md | 2 ++ rlp/Cargo.toml | 2 +- 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 225809e84..148f0815d 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,8 +5,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.12.1] - 2021-07-30 - Bumped `rocksdb` to 0.17. [#573](https://github.com/paritytech/parity-common/pull/573) +## [0.12.0] - 2021-07-02 ### Breaking - Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) - Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index a500771c1..400ae7f52 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.12.0" +version = "0.12.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index 189ff85f0..cf4695203 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.5.1] - 2021-07-30 - Fix rlp encoding/decoding for bool. [#572](https://github.com/paritytech/parity-common/pull/572) ## [0.5.0] - 2021-01-05 diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 3898e2b3c..6f346b5ea 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rlp" -version = "0.5.0" +version = "0.5.1" description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" From 8a1739af6e5a77584a5ba711a99b1c3debc45ce2 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Wed, 4 Aug 2021 17:20:33 +0200 Subject: [PATCH 232/359] add to kvdb-rocksdb create_if_missing config option (#576) * add to kvdb-rocksdb create_if_missing config option * add #[non_exhaustive] above the DatabaseConfig * update the changelog * fix the date Co-authored-by: Andronik Ordian --- kvdb-rocksdb/CHANGELOG.md | 5 +++++ kvdb-rocksdb/Cargo.toml | 2 +- kvdb-rocksdb/src/lib.rs | 10 ++++++++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 148f0815d..a1594f876 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.13.0] - 2021-08-04 +### Breaking +- `DatabaseConfig` is now `#[non_exhaustive]`. [#576](https://github.com/paritytech/parity-common/pull/576) +- Added `create_if_missing` to `DatabaseConfig`. [#576](https://github.com/paritytech/parity-common/pull/576) + ## [0.12.1] - 2021-07-30 - Bumped `rocksdb` to 0.17. [#573](https://github.com/paritytech/parity-common/pull/573) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 400ae7f52..c4c7588ca 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.12.1" +version = "0.13.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 03a4eafe5..9089d4cf0 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -142,6 +142,7 @@ impl CompactionProfile { /// Database configuration #[derive(Clone)] +#[non_exhaustive] pub struct DatabaseConfig { /// Max number of open files. pub max_open_files: i32, @@ -180,6 +181,9 @@ pub struct DatabaseConfig { /// Limit the size (in bytes) of write ahead logs /// More info: https://github.com/facebook/rocksdb/wiki/Write-Ahead-Log pub max_total_wal_size: Option, + /// Creates a new database if no database exists. + /// Set to `true` by default for backwards compatibility. + pub create_if_missing: bool, } impl DatabaseConfig { @@ -233,6 +237,7 @@ impl Default for DatabaseConfig { enable_statistics: false, secondary: None, max_total_wal_size: None, + create_if_missing: true, } } } @@ -325,7 +330,7 @@ fn generate_options(config: &DatabaseConfig) -> Options { opts.enable_statistics(); } opts.set_use_fsync(false); - opts.create_if_missing(true); + opts.create_if_missing(config.create_if_missing); if config.secondary.is_some() { opts.set_max_open_files(-1) } else { @@ -376,7 +381,7 @@ fn generate_block_based_options(config: &DatabaseConfig) -> io::Result Date: Thu, 5 Aug 2021 15:15:57 +0200 Subject: [PATCH 233/359] kvdb api uses now template argument `P: AsRef` instead of `&str` (#579) * update kvdb_rocksdb to 0.14 and use P: AsRef Path instead of str and Path as Database function arguments * remove duplicate import of PathBuf on linux --- kvdb-rocksdb/CHANGELOG.md | 4 ++ kvdb-rocksdb/Cargo.toml | 2 +- kvdb-rocksdb/examples/memtest.rs | 2 +- kvdb-rocksdb/src/lib.rs | 97 +++++++++++++++----------------- 4 files changed, 51 insertions(+), 54 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index a1594f876..195922bea 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.14.0] - 2021-08-05 +### Breaking +- `Database` api uses now template argument `P: AsRef` instead of `&str` [#579](https://github.com/paritytech/parity-common/pull/579) + ## [0.13.0] - 2021-08-04 ### Breaking - `DatabaseConfig` is now `#[non_exhaustive]`. [#576](https://github.com/paritytech/parity-common/pull/576) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index c4c7588ca..fc5470c8b 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.13.0" +version = "0.14.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" diff --git a/kvdb-rocksdb/examples/memtest.rs b/kvdb-rocksdb/examples/memtest.rs index 6c5c30bae..51b9d4807 100644 --- a/kvdb-rocksdb/examples/memtest.rs +++ b/kvdb-rocksdb/examples/memtest.rs @@ -107,7 +107,7 @@ fn main() { let dir = tempfile::Builder::new().prefix("rocksdb-example").tempdir().unwrap(); println!("Database is put in: {} (maybe check if it was deleted)", dir.path().to_string_lossy()); - let db = Database::open(&config, &dir.path().to_string_lossy()).unwrap(); + let db = Database::open(&config, &dir.path()).unwrap(); let mut step = 0; let mut keyvalues = KeyValueSeed::new(); diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 9089d4cf0..055f9d6f1 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -9,7 +9,14 @@ mod iter; mod stats; -use std::{cmp, collections::HashMap, convert::identity, error, fs, io, mem, path::Path, result}; +use std::{ + cmp, + collections::HashMap, + convert::identity, + error, fs, io, mem, + path::{Path, PathBuf}, + result, +}; use parity_util_mem::MallocSizeOf; use parking_lot::RwLock; @@ -27,8 +34,6 @@ use regex::Regex; #[cfg(target_os = "linux")] use std::fs::File; #[cfg(target_os = "linux")] -use std::path::PathBuf; -#[cfg(target_os = "linux")] use std::process::Command; fn other_io_err(e: E) -> io::Error @@ -96,9 +101,10 @@ pub fn rotational_from_df_output(df_out: Vec) -> Option { impl CompactionProfile { /// Attempt to determine the best profile automatically, only Linux for now. #[cfg(target_os = "linux")] - pub fn auto(db_path: &Path) -> CompactionProfile { + pub fn auto>(db_path: P) -> CompactionProfile { use std::io::Read; let hdd_check_file = db_path + .as_ref() .to_str() .and_then(|path_str| Command::new("df").arg(path_str).output().ok()) .and_then(|df_res| if df_res.status.success() { Some(df_res.stdout) } else { None }) @@ -125,7 +131,7 @@ impl CompactionProfile { /// Just default for other platforms. #[cfg(not(target_os = "linux"))] - pub fn auto(_db_path: &Path) -> CompactionProfile { + pub fn auto>(_db_path: P) -> CompactionProfile { Self::default() } @@ -177,7 +183,7 @@ pub struct DatabaseConfig { /// May have a negative performance impact on the secondary instance /// if the secondary instance reads and applies state changes before the primary instance compacts them. /// More info: https://github.com/facebook/rocksdb/wiki/Secondary-instance - pub secondary: Option, + pub secondary: Option, /// Limit the size (in bytes) of write ahead logs /// More info: https://github.com/facebook/rocksdb/wiki/Write-Ahead-Log pub max_total_wal_size: Option, @@ -290,7 +296,8 @@ pub struct Database { db: RwLock>, #[ignore_malloc_size_of = "insignificant"] config: DatabaseConfig, - path: String, + #[ignore_malloc_size_of = "insignificant"] + path: PathBuf, #[ignore_malloc_size_of = "insignificant"] opts: Options, #[ignore_malloc_size_of = "insignificant"] @@ -386,17 +393,17 @@ impl Database { /// # Safety /// /// The number of `config.columns` must not be zero. - pub fn open(config: &DatabaseConfig, path: &str) -> io::Result { + pub fn open>(config: &DatabaseConfig, path: P) -> io::Result { assert!(config.columns > 0, "the number of columns must not be zero"); let opts = generate_options(config); let block_opts = generate_block_based_options(config)?; // attempt database repair if it has been previously marked as corrupted - let db_corrupted = Path::new(path).join(Database::CORRUPTION_FILE_NAME); + let db_corrupted = path.as_ref().join(Database::CORRUPTION_FILE_NAME); if db_corrupted.exists() { warn!("DB has been previously marked as corrupted, attempting repair"); - DB::repair(&opts, path).map_err(other_io_err)?; + DB::repair(&opts, path.as_ref()).map_err(other_io_err)?; fs::remove_file(db_corrupted)?; } @@ -405,16 +412,16 @@ impl Database { let read_opts = generate_read_options(); let db = if let Some(secondary_path) = &config.secondary { - Self::open_secondary(&opts, path, secondary_path.as_str(), column_names.as_slice())? + Self::open_secondary(&opts, path.as_ref(), secondary_path.as_ref(), column_names.as_slice())? } else { let column_names: Vec<&str> = column_names.iter().map(|s| s.as_str()).collect(); - Self::open_primary(&opts, path, config, column_names.as_slice(), &block_opts)? + Self::open_primary(&opts, path.as_ref(), config, column_names.as_slice(), &block_opts)? }; Ok(Database { db: RwLock::new(Some(DBAndColumns { db, column_names })), config: config.clone(), - path: path.to_owned(), + path: path.as_ref().to_owned(), opts, read_opts, write_opts, @@ -424,9 +431,9 @@ impl Database { } /// Internal api to open a database in primary mode. - fn open_primary( + fn open_primary>( opts: &Options, - path: &str, + path: P, config: &DatabaseConfig, column_names: &[&str], block_opts: &BlockBasedOptions, @@ -435,10 +442,10 @@ impl Database { .map(|i| ColumnFamilyDescriptor::new(column_names[i as usize], config.column_config(&block_opts, i))) .collect(); - let db = match DB::open_cf_descriptors(&opts, path, cf_descriptors) { + let db = match DB::open_cf_descriptors(&opts, path.as_ref(), cf_descriptors) { Err(_) => { // retry and create CFs - match DB::open_cf(&opts, path, &[] as &[&str]) { + match DB::open_cf(&opts, path.as_ref(), &[] as &[&str]) { Ok(mut db) => { for (i, name) in column_names.iter().enumerate() { let _ = db @@ -457,7 +464,7 @@ impl Database { Ok(db) => db, Err(ref s) if is_corrupted(s) => { warn!("DB corrupted: {}, attempting repair", s); - DB::repair(&opts, path).map_err(other_io_err)?; + DB::repair(&opts, path.as_ref()).map_err(other_io_err)?; let cf_descriptors: Vec<_> = (0..config.columns) .map(|i| { @@ -473,19 +480,19 @@ impl Database { /// Internal api to open a database in secondary mode. /// Secondary database needs a seperate path to store its own logs. - fn open_secondary( + fn open_secondary>( opts: &Options, - path: &str, - secondary_path: &str, + path: P, + secondary_path: P, column_names: &[String], ) -> io::Result { - let db = DB::open_cf_as_secondary(&opts, path, secondary_path, column_names); + let db = DB::open_cf_as_secondary(&opts, path.as_ref(), secondary_path.as_ref(), column_names); Ok(match db { Ok(db) => db, Err(ref s) if is_corrupted(s) => { warn!("DB corrupted: {}, attempting repair", s); - DB::repair(&opts, path).map_err(other_io_err)?; + DB::repair(&opts, path.as_ref()).map_err(other_io_err)?; DB::open_cf_as_secondary(&opts, path, secondary_path, column_names).map_err(other_io_err)? }, Err(s) => return Err(other_io_err(s)), @@ -620,18 +627,18 @@ impl Database { } /// Restore the database from a copy at given path. - pub fn restore(&self, new_db: &str) -> io::Result<()> { + pub fn restore>(&self, new_db: P) -> io::Result<()> { self.close(); // swap is guaranteed to be atomic - match swap(new_db, &self.path) { + match swap(new_db.as_ref(), &self.path) { Ok(_) => { // ignore errors - let _ = fs::remove_dir_all(new_db); + let _ = fs::remove_dir_all(new_db.as_ref()); }, Err(err) => { debug!("DB atomic swap failed: {}", err); - match swap_nonatomic(new_db, &self.path) { + match swap_nonatomic(new_db.as_ref(), &self.path) { Ok(_) => { // ignore errors let _ = fs::remove_dir_all(new_db); @@ -864,24 +871,17 @@ mod tests { #[test] fn secondary_db_get() -> io::Result<()> { let primary = TempfileBuilder::new().prefix("").tempdir()?; + let secondary = TempfileBuilder::new().prefix("").tempdir()?; let config = DatabaseConfig::with_columns(1); - let db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; + let db = Database::open(&config, primary.path()).unwrap(); let key1 = b"key1"; let mut transaction = db.transaction(); transaction.put(0, key1, b"horse"); db.write(transaction)?; - let config = DatabaseConfig { - secondary: TempfileBuilder::new() - .prefix("") - .tempdir()? - .path() - .to_str() - .map(|s| s.to_string()), - ..DatabaseConfig::with_columns(1) - }; - let second_db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; + let config = DatabaseConfig { secondary: Some(secondary.path().to_owned()), ..DatabaseConfig::with_columns(1) }; + let second_db = Database::open(&config, primary.path()).unwrap(); assert_eq!(&*second_db.get(0, key1)?.unwrap(), b"horse"); Ok(()) } @@ -889,19 +889,12 @@ mod tests { #[test] fn secondary_db_catch_up() -> io::Result<()> { let primary = TempfileBuilder::new().prefix("").tempdir()?; + let secondary = TempfileBuilder::new().prefix("").tempdir()?; let config = DatabaseConfig::with_columns(1); - let db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; + let db = Database::open(&config, primary.path()).unwrap(); - let config = DatabaseConfig { - secondary: TempfileBuilder::new() - .prefix("") - .tempdir()? - .path() - .to_str() - .map(|s| s.to_string()), - ..DatabaseConfig::with_columns(1) - }; - let second_db = Database::open(&config, primary.path().to_str().expect("tempdir path is valid unicode"))?; + let config = DatabaseConfig { secondary: Some(secondary.path().to_owned()), ..DatabaseConfig::with_columns(1) }; + let second_db = Database::open(&config, primary.path()).unwrap(); let mut transaction = db.transaction(); transaction.put(0, b"key1", b"mule"); @@ -1008,7 +1001,7 @@ mod tests { // open 5, remove 4. { - let db = Database::open(&config_5, tempdir.path().to_str().unwrap()).expect("open with 5 columns"); + let db = Database::open(&config_5, tempdir.path()).expect("open with 5 columns"); assert_eq!(db.num_columns(), 5); for i in (1..5).rev() { @@ -1028,7 +1021,7 @@ mod tests { fn test_num_keys() { let tempdir = TempfileBuilder::new().prefix("").tempdir().unwrap(); let config = DatabaseConfig::with_columns(1); - let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); + let db = Database::open(&config, tempdir.path()).unwrap(); assert_eq!(db.num_keys(0).unwrap(), 0, "database is empty after creation"); let key1 = b"beef"; @@ -1092,7 +1085,7 @@ rocksdb.db.get.micros P50 : 2.000000 P95 : 3.000000 P99 : 4.000000 P100 : 5.0000 .prefix("config_test") .tempdir() .expect("the OS can create tmp dirs"); - let db = Database::open(&cfg, db_path.path().to_str().unwrap()).expect("can open a db"); + let db = Database::open(&cfg, db_path.path()).expect("can open a db"); let mut rocksdb_log = std::fs::File::open(format!("{}/LOG", db_path.path().to_str().unwrap())) .expect("rocksdb creates a LOG file"); let mut settings = String::new(); From 3030a6dd28352a4a12169340d22f130f331e0d12 Mon Sep 17 00:00:00 2001 From: Koute Date: Wed, 15 Sep 2021 18:51:24 +0900 Subject: [PATCH 234/359] Move memory stats gathering from `polkadot` to `parity-util-mem` (#588) * Move memory stats gathering from `polkadot` to `parity-util-mem` * Bump version to 0.10.1 * Update the CHANGELOG --- parity-util-mem/CHANGELOG.md | 5 +- parity-util-mem/Cargo.toml | 8 ++- parity-util-mem/src/lib.rs | 56 ++++++++++++++++++++ parity-util-mem/src/memory_stats_jemalloc.rs | 32 +++++++++++ parity-util-mem/src/memory_stats_noop.rs | 31 +++++++++++ 5 files changed, 129 insertions(+), 3 deletions(-) create mode 100644 parity-util-mem/src/memory_stats_jemalloc.rs create mode 100644 parity-util-mem/src/memory_stats_noop.rs diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index c5e13a77c..53f0c449c 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -4,7 +4,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ -## [Unreleased] +## [0.10.1] - 2021-09-15 +- Added support for memory stats gathering, ported over from `polkadot`. [#588](https://github.com/paritytech/parity-common/pull/588) + +## [0.10.0] - 2021-07-02 - Fixed `malloc_usable_size` for FreeBSD. [#553](https://github.com/paritytech/parity-common/pull/553) ### Breaking diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index ec90c9565..fac33b42c 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.10.0" +version = "0.10.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" @@ -37,6 +37,10 @@ winapi = { version = "0.3.8", features = ["heapapi"] } version = "0.3.2" optional = true +[target.'cfg(not(target_os = "windows"))'.dependencies.jemalloc-ctl] +version = "0.3.3" +optional = true + [features] default = ["std", "ethereum-impls", "lru", "hashbrown", "smallvec", "primitive-types"] std = ["parking_lot"] @@ -45,7 +49,7 @@ dlmalloc-global = ["dlmalloc", "estimate-heapsize"] # use wee_alloc as global allocator weealloc-global = ["wee_alloc", "estimate-heapsize"] # use jemalloc as global allocator -jemalloc-global = ["jemallocator"] +jemalloc-global = ["jemallocator", "jemalloc-ctl"] # use mimalloc as global allocator mimalloc-global = ["mimalloc", "libmimalloc-sys"] # implement additional types diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index bf6334e0e..5b3300069 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -24,14 +24,23 @@ cfg_if::cfg_if! { /// Global allocator #[global_allocator] pub static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + + mod memory_stats_jemalloc; + use memory_stats_jemalloc as memory_stats; } else if #[cfg(feature = "dlmalloc-global")] { /// Global allocator #[global_allocator] pub static ALLOC: dlmalloc::GlobalDlmalloc = dlmalloc::GlobalDlmalloc; + + mod memory_stats_noop; + use memory_stats_noop as memory_stats; } else if #[cfg(feature = "weealloc-global")] { /// Global allocator #[global_allocator] pub static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; + + mod memory_stats_noop; + use memory_stats_noop as memory_stats; } else if #[cfg(all( feature = "mimalloc-global", not(target_arch = "wasm32") @@ -39,8 +48,13 @@ cfg_if::cfg_if! { /// Global allocator #[global_allocator] pub static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc; + + mod memory_stats_noop; + use memory_stats_noop as memory_stats; } else { // default allocator used + mod memory_stats_noop; + use memory_stats_noop as memory_stats; } } @@ -78,6 +92,48 @@ pub fn malloc_size(t: &T) -> usize { MallocSizeOf::size_of(t, &mut allocators::new_malloc_size_ops()) } +/// An error related to the memory stats gathering. +#[derive(Clone, Debug)] +pub struct MemoryStatsError(memory_stats::Error); + +#[cfg(feature = "std")] +impl std::fmt::Display for MemoryStatsError { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + self.0.fmt(fmt) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for MemoryStatsError {} + +/// Snapshot of collected memory metrics. +#[non_exhaustive] +#[derive(Debug, Clone)] +pub struct MemoryAllocationSnapshot { + /// Total resident memory, in bytes. + pub resident: u64, + /// Total allocated memory, in bytes. + pub allocated: u64, +} + +/// Accessor to the allocator internals. +#[derive(Clone)] +pub struct MemoryAllocationTracker(self::memory_stats::MemoryAllocationTracker); + +impl MemoryAllocationTracker { + /// Create an instance of an allocation tracker. + pub fn new() -> Result { + self::memory_stats::MemoryAllocationTracker::new() + .map(MemoryAllocationTracker) + .map_err(MemoryStatsError) + } + + /// Create an allocation snapshot. + pub fn snapshot(&self) -> Result { + self.0.snapshot().map_err(MemoryStatsError) + } +} + #[cfg(feature = "std")] #[cfg(test)] mod test { diff --git a/parity-util-mem/src/memory_stats_jemalloc.rs b/parity-util-mem/src/memory_stats_jemalloc.rs new file mode 100644 index 000000000..854e932cf --- /dev/null +++ b/parity-util-mem/src/memory_stats_jemalloc.rs @@ -0,0 +1,32 @@ +// Copyright 2021 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use jemalloc_ctl::Error; +use jemalloc_ctl::{epoch, stats}; + +#[derive(Clone)] +pub struct MemoryAllocationTracker { + epoch: jemalloc_ctl::epoch_mib, + allocated: stats::allocated_mib, + resident: stats::resident_mib, +} + +impl MemoryAllocationTracker { + pub fn new() -> Result { + Ok(Self { epoch: epoch::mib()?, allocated: stats::allocated::mib()?, resident: stats::resident::mib()? }) + } + + pub fn snapshot(&self) -> Result { + // update stats by advancing the allocation epoch + self.epoch.advance()?; + + let allocated: u64 = self.allocated.read()? as _; + let resident: u64 = self.resident.read()? as _; + Ok(crate::MemoryAllocationSnapshot { allocated, resident }) + } +} diff --git a/parity-util-mem/src/memory_stats_noop.rs b/parity-util-mem/src/memory_stats_noop.rs new file mode 100644 index 000000000..cf077c6f8 --- /dev/null +++ b/parity-util-mem/src/memory_stats_noop.rs @@ -0,0 +1,31 @@ +// Copyright 2021 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[derive(Clone, Debug)] +pub struct Unimplemented; +pub use Unimplemented as Error; + +#[cfg(feature = "std")] +impl std::fmt::Display for Unimplemented { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.write_str("unimplemented") + } +} + +#[derive(Clone)] +pub struct MemoryAllocationTracker {} + +impl MemoryAllocationTracker { + pub fn new() -> Result { + Err(Error) + } + + pub fn snapshot(&self) -> Result { + unimplemented!(); + } +} From 6f186f18ebbec550bb79c6384d55309b05e001a2 Mon Sep 17 00:00:00 2001 From: Koute Date: Mon, 20 Sep 2021 19:03:43 +0900 Subject: [PATCH 235/359] Switch from `jemallocator` to `tikv-jemallocator` (#589) * Switch from `jemallocator` to `tikv-jemallocator` * Bump `parity-util-mem` version to 0.10.2 --- parity-util-mem/Cargo.toml | 12 ++++++------ parity-util-mem/src/allocators.rs | 6 +++--- parity-util-mem/src/lib.rs | 2 +- parity-util-mem/src/memory_stats_jemalloc.rs | 6 +++--- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index fac33b42c..6515cf8fb 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.10.1" +version = "0.10.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" @@ -33,12 +33,12 @@ primitive-types = { version = "0.10", path = "../primitive-types", default-featu [target.'cfg(target_os = "windows")'.dependencies] winapi = { version = "0.3.8", features = ["heapapi"] } -[target.'cfg(not(target_os = "windows"))'.dependencies.jemallocator] -version = "0.3.2" +[target.'cfg(not(target_os = "windows"))'.dependencies.tikv-jemallocator] +version = "0.4.1" optional = true -[target.'cfg(not(target_os = "windows"))'.dependencies.jemalloc-ctl] -version = "0.3.3" +[target.'cfg(not(target_os = "windows"))'.dependencies.tikv-jemalloc-ctl] +version = "0.4.2" optional = true [features] @@ -49,7 +49,7 @@ dlmalloc-global = ["dlmalloc", "estimate-heapsize"] # use wee_alloc as global allocator weealloc-global = ["wee_alloc", "estimate-heapsize"] # use jemalloc as global allocator -jemalloc-global = ["jemallocator", "jemalloc-ctl"] +jemalloc-global = ["tikv-jemallocator", "tikv-jemalloc-ctl"] # use mimalloc as global allocator mimalloc-global = ["mimalloc", "libmimalloc-sys"] # implement additional types diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs index ba2fbffde..b71ab5e0c 100644 --- a/parity-util-mem/src/allocators.rs +++ b/parity-util-mem/src/allocators.rs @@ -16,13 +16,13 @@ //! - mimalloc: use mimallocator crate //! - arch x86: //! - no features: use default alloc -//! - jemalloc: use jemallocator crate +//! - jemalloc: use tikv-jemallocator crate //! - weealloc: default to `estimate_size` //! - dlmalloc: default to `estimate_size` //! - mimalloc: use mimallocator crate //! - arch x86/macos: //! - no features: use default alloc, requires using `estimate_size` -//! - jemalloc: use jemallocator crate +//! - jemalloc: use tikv-jemallocator crate //! - weealloc: default to `estimate_size` //! - dlmalloc: default to `estimate_size` //! - mimalloc: use mimallocator crate @@ -85,7 +85,7 @@ mod usable_size { /// Use of jemalloc usable size C function through jemallocator crate call. pub unsafe extern "C" fn malloc_usable_size(ptr: *const c_void) -> usize { - jemallocator::usable_size(ptr) + tikv_jemallocator::usable_size(ptr) } } else if #[cfg(feature = "mimalloc-global")] { diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index 5b3300069..68771a2e0 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -23,7 +23,7 @@ cfg_if::cfg_if! { ))] { /// Global allocator #[global_allocator] - pub static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + pub static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; mod memory_stats_jemalloc; use memory_stats_jemalloc as memory_stats; diff --git a/parity-util-mem/src/memory_stats_jemalloc.rs b/parity-util-mem/src/memory_stats_jemalloc.rs index 854e932cf..22081d64c 100644 --- a/parity-util-mem/src/memory_stats_jemalloc.rs +++ b/parity-util-mem/src/memory_stats_jemalloc.rs @@ -6,12 +6,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -pub use jemalloc_ctl::Error; -use jemalloc_ctl::{epoch, stats}; +pub use tikv_jemalloc_ctl::Error; +use tikv_jemalloc_ctl::{epoch, stats}; #[derive(Clone)] pub struct MemoryAllocationTracker { - epoch: jemalloc_ctl::epoch_mib, + epoch: tikv_jemalloc_ctl::epoch_mib, allocated: stats::allocated_mib, resident: stats::resident_mib, } From 8e990ac6f938307a7d8510a4ac30d3be6c8734fb Mon Sep 17 00:00:00 2001 From: Koute Date: Mon, 20 Sep 2021 19:25:57 +0900 Subject: [PATCH 236/359] Update the CHANGELOG (#590) --- parity-util-mem/CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 53f0c449c..3de621c77 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -4,6 +4,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [0.10.2] - 2021-09-20 +- Switched from `jemallocator` to `tikv-jemallocator`. [#589](https://github.com/paritytech/parity-common/pull/589) + ## [0.10.1] - 2021-09-15 - Added support for memory stats gathering, ported over from `polkadot`. [#588](https://github.com/paritytech/parity-common/pull/588) From df638ab0885293d21d656dc300d39236b69ce57d Mon Sep 17 00:00:00 2001 From: Joshy Orndorff Date: Thu, 30 Sep 2021 12:16:29 -0400 Subject: [PATCH 237/359] Combine `scale-info` feature into `codec`, and wire it through `ethereum-types` (#593) * Start with Cargo.toml * better features, version, and ethbloom * feature * better feature * do bloom * hack H64 * remaining ethereum hash types * fix crate name in tests * revert breaking changes to primitive-types * Update primitive-types/tests/scale_info.rs Co-authored-by: Andronik Ordian Co-authored-by: Andronik Ordian --- ethbloom/Cargo.toml | 3 ++- ethbloom/src/lib.rs | 1 + ethereum-types/Cargo.toml | 3 ++- ethereum-types/src/hash.rs | 20 ++++++++++++++++---- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 61a759bad..dcce051e0 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -16,6 +16,7 @@ fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false impl-serde = { path = "../primitive-types/impls/serde", version = "0.3", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.5.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } +scale-info = { version = "1.0", features = ["derive"], default-features = false, optional = true } [dev-dependencies] criterion = "0.3.0" @@ -29,7 +30,7 @@ serialize = ["impl-serde"] rustc-hex = ["fixed-hash/rustc-hex"] arbitrary = ["fixed-hash/arbitrary"] rlp = ["impl-rlp"] -codec = ["impl-codec"] +codec = ["impl-codec", "scale-info"] [[bench]] name = "bloom" diff --git a/ethbloom/src/lib.rs b/ethbloom/src/lib.rs index ecda1a332..4f22f3247 100644 --- a/ethbloom/src/lib.rs +++ b/ethbloom/src/lib.rs @@ -66,6 +66,7 @@ const BLOOM_SIZE: usize = 256; construct_fixed_hash! { /// Bloom hash type with 256 bytes (2048 bits) size. + #[cfg_attr(feature = "codec", derive(scale_info::TypeInfo))] pub struct Bloom(BLOOM_SIZE); } #[cfg(feature = "rlp")] diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index b0c124958..4bf4f8eab 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -15,6 +15,7 @@ primitive-types = { path = "../primitive-types", version = "0.10", features = [" impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.0", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.5.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } +scale-info = { version = "1.0", features = ["derive"], default-features = false, optional = true } [dev-dependencies] serde_json = "1.0.41" @@ -25,5 +26,5 @@ std = ["uint-crate/std", "fixed-hash/std", "ethbloom/std", "primitive-types/std" serialize = ["impl-serde", "primitive-types/serde_no_std", "ethbloom/serialize"] arbitrary = ["ethbloom/arbitrary", "fixed-hash/arbitrary", "uint-crate/arbitrary"] rlp = ["impl-rlp", "ethbloom/rlp", "primitive-types/rlp"] -codec = ["impl-codec", "ethbloom/codec"] +codec = ["impl-codec", "ethbloom/codec", "scale-info", "primitive-types/scale-info"] num-traits = ["primitive-types/num-traits"] diff --git a/ethereum-types/src/hash.rs b/ethereum-types/src/hash.rs index bd459568c..4e4fd3b47 100644 --- a/ethereum-types/src/hash.rs +++ b/ethereum-types/src/hash.rs @@ -30,7 +30,10 @@ impl_fixed_hash_serde!(H32, 4); #[cfg(feature = "codec")] impl_fixed_hash_codec!(H32, 4); -construct_fixed_hash! { pub struct H64(8); } +construct_fixed_hash! { + #[cfg_attr(feature = "codec", derive(scale_info::TypeInfo))] + pub struct H64(8); +} #[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H64, 8); #[cfg(feature = "serialize")] @@ -38,7 +41,10 @@ impl_fixed_hash_serde!(H64, 8); #[cfg(feature = "codec")] impl_fixed_hash_codec!(H64, 8); -construct_fixed_hash! { pub struct H128(16); } +construct_fixed_hash! { + #[cfg_attr(feature = "codec", derive(scale_info::TypeInfo))] + pub struct H128(16); +} #[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H128, 16); #[cfg(feature = "serialize")] @@ -48,7 +54,10 @@ impl_fixed_hash_codec!(H128, 16); pub use primitive_types::{H160, H256}; -construct_fixed_hash! { pub struct H264(33); } +construct_fixed_hash! { + #[cfg_attr(feature = "codec", derive(scale_info::TypeInfo))] + pub struct H264(33); +} #[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H264, 33); #[cfg(feature = "serialize")] @@ -58,7 +67,10 @@ impl_fixed_hash_codec!(H264, 33); pub use primitive_types::H512; -construct_fixed_hash! { pub struct H520(65); } +construct_fixed_hash! { + #[cfg_attr(feature = "codec", derive(scale_info::TypeInfo))] + pub struct H520(65); +} #[cfg(feature = "rlp")] impl_fixed_hash_rlp!(H520, 65); #[cfg(feature = "serialize")] From aa5888340cd82f2dab2f2c4b5b65e1d0f4596482 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 10 Nov 2021 14:16:30 +0300 Subject: [PATCH 238/359] feat(primitive-types, serde): deserialize hex strings without 0x prefix (#598) * feat(primitive-types): deserialize hex strings without 0x prefix * change both to stripped * Revert "change both to stripped" This reverts commit 9d8d5320e4e3379ba892b418463de11dcd387329. * accept only stripped to from_hex_raw * cargo fmt * fix tests * oneliner for (v, stripped) * bump ethereum-types impl-serde version * remove unused lifetime * update changelog * allow deprecate * fix changelog date format (god bless america) --- ethereum-types/Cargo.toml | 2 +- ethereum-types/src/uint.rs | 21 ++++- ethereum-types/tests/serde.rs | 30 ++++++- parity-util-mem/derive/lib.rs | 2 +- primitive-types/impls/serde/CHANGELOG.md | 3 + primitive-types/impls/serde/Cargo.toml | 2 +- primitive-types/impls/serde/src/serialize.rs | 89 ++++++++++++++------ 7 files changed, 113 insertions(+), 36 deletions(-) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 4bf4f8eab..b11a8c1c3 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -12,7 +12,7 @@ ethbloom = { path = "../ethbloom", version = "0.11", default-features = false } fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.9", default-features = false } primitive-types = { path = "../primitive-types", version = "0.10", features = ["byteorder", "rustc-hex"], default-features = false } -impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.0", default-features = false, optional = true } +impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.2", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.5.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } scale-info = { version = "1.0", features = ["derive"], default-features = false, optional = true } diff --git a/ethereum-types/src/uint.rs b/ethereum-types/src/uint.rs index eaf9bbf49..7b9b8f07e 100644 --- a/ethereum-types/src/uint.rs +++ b/ethereum-types/src/uint.rs @@ -58,12 +58,29 @@ mod tests { assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); } + let tests = vec![ + ($name::from(0), "0"), + ($name::from(1), "1"), + ($name::from(2), "2"), + ($name::from(10), "a"), + ($name::from(15), "f"), + ($name::from(15), "f"), + ($name::from(16), "10"), + ($name::from(1_000), "3e8"), + ($name::from(100_000), "186a0"), + ($name::from(u64::max_value()), "ffffffffffffffff"), + ($name::from(u64::max_value()) + 1, "10000000000000000"), + ]; + + for (number, expected) in tests { + assert_eq!(format!("{:?}", "0x".to_string() + expected), ser::to_string_pretty(&number).unwrap()); + assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); + } + // Invalid examples assert!(ser::from_str::<$name>("\"0x\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0xg\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } }; } diff --git a/ethereum-types/tests/serde.rs b/ethereum-types/tests/serde.rs index 15796c3c7..596d1e953 100644 --- a/ethereum-types/tests/serde.rs +++ b/ethereum-types/tests/serde.rs @@ -32,12 +32,29 @@ macro_rules! test { assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); } + let tests = vec![ + ($name::from(0), "0"), + ($name::from(1), "1"), + ($name::from(2), "2"), + ($name::from(10), "a"), + ($name::from(15), "f"), + ($name::from(15), "f"), + ($name::from(16), "10"), + ($name::from(1_000), "3e8"), + ($name::from(100_000), "186a0"), + ($name::from(u64::max_value()), "ffffffffffffffff"), + ($name::from(u64::max_value()) + $name::from(1u64), "10000000000000000"), + ]; + + for (number, expected) in tests { + assert_eq!(format!("{:?}", "0x".to_string() + expected), ser::to_string_pretty(&number).unwrap()); + assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); + } + // Invalid examples assert!(ser::from_str::<$name>("\"0x\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0xg\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } }; } @@ -109,8 +126,15 @@ fn test_invalid() { } #[test] -fn test_invalid_char() { +fn test_invalid_char_with_prefix() { const INVALID_STR: &str = "\"0x000000000000000000000000000000000000000000000000000000000000000g\""; const EXPECTED_MSG: &str = "invalid hex character: g, at 65 at line 1 column 68"; assert_eq!(ser::from_str::(INVALID_STR).unwrap_err().to_string(), EXPECTED_MSG); } + +#[test] +fn test_invalid_char_without_prefix() { + const INVALID_STR: &str = "\"000000000000000000000000000000000000000000000000000000000000000g\""; + const EXPECTED_MSG: &str = "invalid hex character: g, at 63 at line 1 column 66"; + assert_eq!(ser::from_str::(INVALID_STR).unwrap_err().to_string(), EXPECTED_MSG); +} diff --git a/parity-util-mem/derive/lib.rs b/parity-util-mem/derive/lib.rs index 445e367a6..fc28e0859 100644 --- a/parity-util-mem/derive/lib.rs +++ b/parity-util-mem/derive/lib.rs @@ -30,7 +30,7 @@ fn malloc_size_of_derive(s: synstructure::Structure) -> proc_macro2::TokenStream "#[ignore_malloc_size_of] should have an explanation, \ e.g. #[ignore_malloc_size_of = \"because reasons\"]" ); - } + }, syn::Meta::NameValue(syn::MetaNameValue { ref path, .. }) if path.is_ident("ignore_malloc_size_of") => true, _ => false, }); diff --git a/primitive-types/impls/serde/CHANGELOG.md b/primitive-types/impls/serde/CHANGELOG.md index 2c6acdd96..390cc1581 100644 --- a/primitive-types/impls/serde/CHANGELOG.md +++ b/primitive-types/impls/serde/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.3.2] - 2021-11-10 +- Supported decoding of hex strings without `0x` prefix. [#598](https://github.com/paritytech/parity-common/pull/598) + ## [0.3.1] - 2020-05-05 - Added `no_std` support. [#385](https://github.com/paritytech/parity-common/pull/385) diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index e75eeba43..28c1ae9d5 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-serde" -version = "0.3.1" +version = "0.3.2" authors = ["Parity Technologies "] edition = "2018" license = "MIT OR Apache-2.0" diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index 4f18afc92..ee3a57c05 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -66,6 +66,7 @@ fn to_hex_raw<'a>(v: &'a mut [u8], bytes: &[u8], skip_leading_zero: bool) -> &'a #[derive(Debug, PartialEq, Eq)] pub enum FromHexError { /// The `0x` prefix is missing. + #[deprecated(since = "0.3.2", note = "We support non 0x-prefixed hex strings")] MissingPrefix, /// Invalid (non-hex) character encountered. InvalidHex { @@ -82,38 +83,34 @@ impl std::error::Error for FromHexError {} impl fmt::Display for FromHexError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { + #[allow(deprecated)] Self::MissingPrefix => write!(fmt, "0x prefix is missing"), Self::InvalidHex { character, index } => write!(fmt, "invalid hex character: {}, at {}", character, index), } } } -/// Decode given hex string into a vector of bytes. +/// Decode given (both 0x-prefixed or not) hex string into a vector of bytes. /// -/// Returns an error if the string is not prefixed with `0x` -/// or non-hex characters are present. +/// Returns an error if non-hex characters are present. pub fn from_hex(v: &str) -> Result, FromHexError> { - if !v.starts_with("0x") { - return Err(FromHexError::MissingPrefix) - } + let (v, stripped) = v.strip_prefix("0x").map_or((v, false), |v| (v, true)); - let mut bytes = vec![0u8; (v.len() - 1) / 2]; - from_hex_raw(v, &mut bytes)?; + let mut bytes = vec![0u8; (v.len() + 1) / 2]; + from_hex_raw(v, &mut bytes, stripped)?; Ok(bytes) } -/// Decode given 0x-prefixed hex string into provided slice. +/// Decode given 0x-prefix-stripped hex string into provided slice. /// Used internally by `from_hex` and `deserialize_check_len`. /// -/// The method will panic if: -/// 1. `v` is shorter than 2 characters (you need to check 0x prefix outside). -/// 2. `bytes` have incorrect length (make sure to allocate enough beforehand). -fn from_hex_raw<'a>(v: &str, bytes: &mut [u8]) -> Result { - let bytes_len = v.len() - 2; +/// The method will panic if `bytes` have incorrect length (make sure to allocate enough beforehand). +fn from_hex_raw(v: &str, bytes: &mut [u8], stripped: bool) -> Result { + let bytes_len = v.len(); let mut modulus = bytes_len % 2; let mut buf = 0; let mut pos = 0; - for (index, byte) in v.bytes().enumerate().skip(2) { + for (index, byte) in v.bytes().enumerate() { buf <<= 4; match byte { @@ -126,7 +123,7 @@ fn from_hex_raw<'a>(v: &str, bytes: &mut [u8]) -> Result { }, b => { let character = char::from(b); - return Err(FromHexError::InvalidHex { character, index }) + return Err(FromHexError::InvalidHex { character, index: index + if stripped { 2 } else { 0 } }) }, } @@ -208,7 +205,7 @@ where type Value = Vec; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a 0x-prefixed hex string") + write!(formatter, "a (both 0x-prefixed or not) hex string") } fn visit_str(self, v: &str) -> Result { @@ -237,22 +234,20 @@ where type Value = usize; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a 0x-prefixed hex string with {}", self.len) + write!(formatter, "a (both 0x-prefixed or not) hex string with {}", self.len) } fn visit_str(self, v: &str) -> Result { - if !v.starts_with("0x") { - return Err(E::custom(FromHexError::MissingPrefix)) - } + let (v, stripped) = v.strip_prefix("0x").map_or((v, false), |v| (v, true)); let len = v.len(); let is_len_valid = match self.len { - ExpectedLen::Exact(ref slice) => len == 2 * slice.len() + 2, - ExpectedLen::Between(min, ref slice) => len <= 2 * slice.len() + 2 && len > 2 * min + 2, + ExpectedLen::Exact(ref slice) => len == 2 * slice.len(), + ExpectedLen::Between(min, ref slice) => len <= 2 * slice.len() && len > 2 * min, }; if !is_len_valid { - return Err(E::invalid_length(v.len() - 2, &self)) + return Err(E::invalid_length(v.len(), &self)) } let bytes = match self.len { @@ -260,7 +255,7 @@ where ExpectedLen::Between(_, slice) => slice, }; - from_hex_raw(v, bytes).map_err(E::custom) + from_hex_raw(v, bytes, stripped).map_err(E::custom) } fn visit_string(self, v: String) -> Result { @@ -280,7 +275,7 @@ mod tests { struct Bytes(#[serde(with = "super")] Vec); #[test] - fn should_not_fail_on_short_string() { + fn should_not_fail_on_short_string_with_prefix() { let a: Bytes = serde_json::from_str("\"0x\"").unwrap(); let b: Bytes = serde_json::from_str("\"0x1\"").unwrap(); let c: Bytes = serde_json::from_str("\"0x12\"").unwrap(); @@ -297,7 +292,7 @@ mod tests { } #[test] - fn should_not_fail_on_other_strings() { + fn should_not_fail_on_other_strings_with_prefix() { let a: Bytes = serde_json::from_str("\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587\"").unwrap(); let b: Bytes = @@ -310,6 +305,37 @@ mod tests { assert_eq!(c.0.len(), 32); } + #[test] + fn should_not_fail_on_short_string_without_prefix() { + let a: Bytes = serde_json::from_str("\"\"").unwrap(); + let b: Bytes = serde_json::from_str("\"1\"").unwrap(); + let c: Bytes = serde_json::from_str("\"12\"").unwrap(); + let d: Bytes = serde_json::from_str("\"123\"").unwrap(); + let e: Bytes = serde_json::from_str("\"1234\"").unwrap(); + let f: Bytes = serde_json::from_str("\"12345\"").unwrap(); + + assert!(a.0.is_empty()); + assert_eq!(b.0, vec![1]); + assert_eq!(c.0, vec![0x12]); + assert_eq!(d.0, vec![0x1, 0x23]); + assert_eq!(e.0, vec![0x12, 0x34]); + assert_eq!(f.0, vec![0x1, 0x23, 0x45]); + } + + #[test] + fn should_not_fail_on_other_strings_without_prefix() { + let a: Bytes = + serde_json::from_str("\"7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587\"").unwrap(); + let b: Bytes = + serde_json::from_str("\"7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b\"").unwrap(); + let c: Bytes = + serde_json::from_str("\"7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b4\"").unwrap(); + + assert_eq!(a.0.len(), 31); + assert_eq!(b.0.len(), 32); + assert_eq!(c.0.len(), 32); + } + #[test] fn should_serialize_and_deserialize_empty_bytes() { let bytes = Bytes(Vec::new()); @@ -323,7 +349,7 @@ mod tests { } #[test] - fn should_encode_to_and_from_hex() { + fn should_encode_to_and_from_hex_with_prefix() { assert_eq!(to_hex(&[0, 1, 2], true), "0x102"); assert_eq!(to_hex(&[0, 1, 2], false), "0x000102"); assert_eq!(to_hex(&[0], true), "0x0"); @@ -334,4 +360,11 @@ mod tests { assert_eq!(from_hex("0x102"), Ok(vec![1, 2])); assert_eq!(from_hex("0xf"), Ok(vec![0xf])); } + + #[test] + fn should_decode_hex_without_prefix() { + assert_eq!(from_hex("0102"), Ok(vec![1, 2])); + assert_eq!(from_hex("102"), Ok(vec![1, 2])); + assert_eq!(from_hex("f"), Ok(vec![0xf])); + } } From 235a010c1e5faaa504b8b547d0ab4e33184bae0d Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 10 Nov 2021 13:50:05 +0100 Subject: [PATCH 239/359] prepare for releases: ethereum-types and ethbloom (#594) --- ethbloom/CHANGELOG.md | 3 +++ ethbloom/Cargo.toml | 2 +- ethereum-types/CHANGELOG.md | 4 ++++ ethereum-types/Cargo.toml | 2 +- 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 45781b2e4..ff59062ac 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.11.1] - 2021-09-30 +- Combined `scale-info` feature into `codec`. [#593](https://github.com/paritytech/parity-common/pull/593) + ## [0.11.0] - 2021-01-27 ### Breaking - Updated `impl-codec` to 0.5. [#510](https://github.com/paritytech/parity-common/pull/510) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index dcce051e0..f096d49c2 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.11.0" +version = "0.11.1" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT OR Apache-2.0" diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 15d97c7bf..3bcdf7106 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.12.1] - 2021-09-30 +- Combined `scale-info` feature into `codec`. [#593](https://github.com/paritytech/parity-common/pull/593) + +## [0.12.0] - 2021-07-02 ### Breaking - Updated `primitive-types` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index b11a8c1c3..3bda0e485 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.12.0" +version = "0.12.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" From eebf04b78f4144a77516e21310bf8c47410684f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Nov 2021 10:28:29 +0100 Subject: [PATCH 240/359] build(deps): update sysinfo requirement from 0.17.0 to 0.21.1 (#600) * build(deps): update sysinfo requirement from 0.17.0 to 0.21.1 Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Release notes](https://github.com/GuillaumeGomez/sysinfo/releases) - [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) --- updated-dependencies: - dependency-name: sysinfo dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Fix tests Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Keith Yeung --- kvdb-rocksdb/Cargo.toml | 2 +- kvdb-rocksdb/examples/memtest.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index fc5470c8b..9734453e9 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -31,6 +31,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.8" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.17.0" +sysinfo = "0.21.1" ctrlc = "3.1.4" chrono = "0.4" diff --git a/kvdb-rocksdb/examples/memtest.rs b/kvdb-rocksdb/examples/memtest.rs index 51b9d4807..32097279f 100644 --- a/kvdb-rocksdb/examples/memtest.rs +++ b/kvdb-rocksdb/examples/memtest.rs @@ -71,7 +71,7 @@ fn proc_memory_usage() -> u64 { let memory = if let Some(self_pid) = self_pid { if sys.refresh_process(self_pid) { let proc = sys - .get_process(self_pid) + .process(self_pid) .expect("Above refresh_process succeeds, this should be Some(), qed"); proc.memory() } else { From c3ef97d403bf30fb4ff35ce9ac74b60c3a0c8f42 Mon Sep 17 00:00:00 2001 From: Dan Shields <35669742+NukeManDan@users.noreply.github.com> Date: Sun, 21 Nov 2021 04:55:07 -0700 Subject: [PATCH 241/359] move to rust 2021 (#601) * move to rust 2021 * rust-version = "1.56.1" * Update CHANGELOG for 2021 and MSRV 1.56.1 * delete fuzz changelog Co-authored-by: Andronik Ordian --- Cargo.toml | 1 - ethbloom/CHANGELOG.md | 1 + ethbloom/Cargo.toml | 3 ++- ethereum-types/CHANGELOG.md | 1 + ethereum-types/Cargo.toml | 3 ++- fixed-hash/CHANGELOG.md | 1 + fixed-hash/Cargo.toml | 3 ++- keccak-hash/CHANGELOG.md | 1 + keccak-hash/Cargo.toml | 3 ++- kvdb-memorydb/CHANGELOG.md | 1 + kvdb-memorydb/Cargo.toml | 3 ++- kvdb-rocksdb/CHANGELOG.md | 1 + kvdb-rocksdb/Cargo.toml | 3 ++- kvdb-shared-tests/CHANGELOG.md | 1 + kvdb-shared-tests/Cargo.toml | 3 ++- kvdb/CHANGELOG.md | 1 + kvdb/Cargo.toml | 3 ++- parity-bytes/CHANGELOG.md | 1 + parity-bytes/Cargo.toml | 3 ++- parity-util-mem/CHANGELOG.md | 3 +++ parity-util-mem/Cargo.toml | 3 ++- parity-util-mem/derive/CHANGELOG.md | 1 + parity-util-mem/derive/Cargo.toml | 2 ++ primitive-types/CHANGELOG.md | 1 + primitive-types/Cargo.toml | 3 ++- primitive-types/impls/codec/CHANGELOG.md | 1 + primitive-types/impls/codec/Cargo.toml | 3 ++- primitive-types/impls/num-traits/CHANGELOG.md | 1 + primitive-types/impls/num-traits/Cargo.toml | 3 ++- primitive-types/impls/rlp/CHANGELOG.md | 1 + primitive-types/impls/rlp/Cargo.toml | 3 ++- primitive-types/impls/serde/CHANGELOG.md | 1 + primitive-types/impls/serde/Cargo.toml | 3 ++- rlp-derive/CHANGELOG.md | 1 + rlp-derive/Cargo.toml | 3 ++- rlp/CHANGELOG.md | 1 + rlp/Cargo.toml | 3 ++- uint/CHANGELOG.md | 1 + uint/Cargo.toml | 3 ++- uint/fuzz/Cargo.toml | 3 ++- 40 files changed, 61 insertions(+), 20 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index bb9fac368..fcd164017 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,4 @@ [workspace] -resolver = "2" members = [ "fixed-hash", "keccak-hash", diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index ff59062ac..0466d6c53 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.11.1] - 2021-09-30 - Combined `scale-info` feature into `codec`. [#593](https://github.com/paritytech/parity-common/pull/593) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index f096d49c2..65f1af299 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -7,7 +7,8 @@ license = "MIT OR Apache-2.0" documentation = "https://docs.rs/ethbloom" homepage = "https://github.com/paritytech/parity-common" repository = "https://github.com/paritytech/parity-common" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 3bcdf7106..7c9fbcfc1 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.12.1] - 2021-09-30 - Combined `scale-info` feature into `codec`. [#593](https://github.com/paritytech/parity-common/pull/593) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 3bda0e485..06869767c 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Ethereum types" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] ethbloom = { path = "../ethbloom", version = "0.11", default-features = false } diff --git a/fixed-hash/CHANGELOG.md b/fixed-hash/CHANGELOG.md index b74c4b3e4..8fe709b9f 100644 --- a/fixed-hash/CHANGELOG.md +++ b/fixed-hash/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.7.0] - 2021-01-05 ### Breaking diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 99419c582..27ad7d217 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -8,7 +8,8 @@ repository = "https://github.com/paritytech/parity-common" description = "Macros to define custom fixed-size hash types" documentation = "https://docs.rs/fixed-hash/" readme = "README.md" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [package.metadata.docs.rs] features = ["quickcheck", "api-dummy"] diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index 455ca717b..15eb38aa5 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ### Breaking - Updated `primitive-types` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index d48ef763e..9979285b2 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -6,7 +6,8 @@ authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" readme = "README.md" license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index e9f497217..399ca6cd8 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ### Breaking - Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index c60c48432..29d4cfa6f 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] parity-util-mem = { path = "../parity-util-mem", version = "0.10", default-features = false, features = ["std"] } diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 195922bea..11ea4cb84 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.14.0] - 2021-08-05 ### Breaking diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 9734453e9..45c2eab7c 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [[bench]] name = "bench_read_perf" diff --git a/kvdb-shared-tests/CHANGELOG.md b/kvdb-shared-tests/CHANGELOG.md index 17f7b2501..197bfd68f 100644 --- a/kvdb-shared-tests/CHANGELOG.md +++ b/kvdb-shared-tests/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ### Breaking - Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index 356b44766..e63c43370 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -2,7 +2,8 @@ name = "kvdb-shared-tests" version = "0.8.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" +rust-version = "1.56.1" description = "Shared tests for kvdb functionality, to be executed against actual implementations" license = "MIT OR Apache-2.0" diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 15b66dee7..6ab16be2e 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -8,6 +8,7 @@ The format is based on [Keep a Changelog]. ### Breaking - Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.9.0] - 2021-01-27 ### Breaking diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 66d8a18b6..348761b4f 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] smallvec = "1.0.0" diff --git a/parity-bytes/CHANGELOG.md b/parity-bytes/CHANGELOG.md index 49b2d0857..a3c092d58 100644 --- a/parity-bytes/CHANGELOG.md +++ b/parity-bytes/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.1.2] - 2020-03-16 - License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) diff --git a/parity-bytes/Cargo.toml b/parity-bytes/Cargo.toml index 651369f1c..6d745aca3 100644 --- a/parity-bytes/Cargo.toml +++ b/parity-bytes/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "byte utilities for Parity" license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 3de621c77..de095aad0 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -4,6 +4,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) + ## [0.10.2] - 2021-09-20 - Switched from `jemallocator` to `tikv-jemallocator`. [#589](https://github.com/paritytech/parity-common/pull/589) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 6515cf8fb..af9b7e840 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" # Prevent multiple versions from being linked into the same program. links = "parity-util-mem-ban-duplicates" diff --git a/parity-util-mem/derive/CHANGELOG.md b/parity-util-mem/derive/CHANGELOG.md index 545cf7dff..c9a41d07a 100644 --- a/parity-util-mem/derive/CHANGELOG.md +++ b/parity-util-mem/derive/CHANGELOG.md @@ -5,3 +5,4 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) diff --git a/parity-util-mem/derive/Cargo.toml b/parity-util-mem/derive/Cargo.toml index d41ba12f5..02d6a9d71 100644 --- a/parity-util-mem/derive/Cargo.toml +++ b/parity-util-mem/derive/Cargo.toml @@ -5,6 +5,8 @@ authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" description = "Crate for memory reporting" repository = "https://github.com/paritytech/pariry-common/parity-util-mem/derive" +edition = "2021" +rust-version = "1.56.1" [lib] path = "lib.rs" diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 39c1a4e52..a7c1da6b6 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.10.1] - 2021-07-02 ### Added diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 7a0b9d9e5..de3732646 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Primitive types shared by Ethereum and Substrate" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] fixed-hash = { version = "0.7", path = "../fixed-hash", default-features = false } diff --git a/primitive-types/impls/codec/CHANGELOG.md b/primitive-types/impls/codec/CHANGELOG.md index c7fca7d25..63cf2acca 100644 --- a/primitive-types/impls/codec/CHANGELOG.md +++ b/primitive-types/impls/codec/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.5.1] - 2021-07-02 ### Dependencies diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index 8721e4333..0e83d4c21 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Parity Codec serialization support for uint and fixed hash." -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] parity-scale-codec = { version = "2.2.0", default-features = false, features = ["max-encoded-len"] } diff --git a/primitive-types/impls/num-traits/CHANGELOG.md b/primitive-types/impls/num-traits/CHANGELOG.md index e0b657b7d..0cec2af8f 100644 --- a/primitive-types/impls/num-traits/CHANGELOG.md +++ b/primitive-types/impls/num-traits/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.1.1] - 2021-06-30 - Added `integer-sqrt` trait support. [#554](https://github.com/paritytech/parity-common/pull/554) diff --git a/primitive-types/impls/num-traits/Cargo.toml b/primitive-types/impls/num-traits/Cargo.toml index 27a8fc82a..bbcd3f02f 100644 --- a/primitive-types/impls/num-traits/Cargo.toml +++ b/primitive-types/impls/num-traits/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "num-traits implementation for uint." -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] num-traits = { version = "0.2", default-features = false } diff --git a/primitive-types/impls/rlp/CHANGELOG.md b/primitive-types/impls/rlp/CHANGELOG.md index 749b49f37..a7a98544b 100644 --- a/primitive-types/impls/rlp/CHANGELOG.md +++ b/primitive-types/impls/rlp/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.3.0] - 2021-01-05 ### Breaking diff --git a/primitive-types/impls/rlp/Cargo.toml b/primitive-types/impls/rlp/Cargo.toml index 622aa0f37..e85d68e43 100644 --- a/primitive-types/impls/rlp/Cargo.toml +++ b/primitive-types/impls/rlp/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "RLP serialization support for uint and fixed hash." -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] rlp = { version = "0.5", path = "../../../rlp", default-features = false } diff --git a/primitive-types/impls/serde/CHANGELOG.md b/primitive-types/impls/serde/CHANGELOG.md index 390cc1581..5d143f916 100644 --- a/primitive-types/impls/serde/CHANGELOG.md +++ b/primitive-types/impls/serde/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.3.2] - 2021-11-10 - Supported decoding of hex strings without `0x` prefix. [#598](https://github.com/paritytech/parity-common/pull/598) diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index 28c1ae9d5..f94c754ac 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -2,10 +2,11 @@ name = "impl-serde" version = "0.3.2" authors = ["Parity Technologies "] -edition = "2018" license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Serde serialization support for uint and fixed hash." +edition = "2021" +rust-version = "1.56.1" [features] default = ["std"] diff --git a/rlp-derive/CHANGELOG.md b/rlp-derive/CHANGELOG.md index d7b344b76..85516d84b 100644 --- a/rlp-derive/CHANGELOG.md +++ b/rlp-derive/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.1.0] - 2020-02-13 - Extracted from parity-ethereum repo. [#343](https://github.com/paritytech/parity-common/pull/343) diff --git a/rlp-derive/Cargo.toml b/rlp-derive/Cargo.toml index 16f7e010e..a1785d7b7 100644 --- a/rlp-derive/Cargo.toml +++ b/rlp-derive/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" description = "Derive macro for #[derive(RlpEncodable, RlpDecodable)]" homepage = "http://parity.io" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [lib] proc-macro = true diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index cf4695203..dedfeef65 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.5.1] - 2021-07-30 - Fix rlp encoding/decoding for bool. [#572](https://github.com/paritytech/parity-common/pull/572) diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 6f346b5ea..f649a7eed 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -5,7 +5,8 @@ description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] bytes = { version = "1", default-features = false } diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 73456b577..a5d14c05b 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.9.1] - 2021-06-30 - Added `integer_sqrt` method. [#554](https://github.com/paritytech/parity-common/pull/554) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 1db8710be..5d6a52f21 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -7,7 +7,8 @@ name = "uint" version = "0.9.1" authors = ["Parity Technologies "] readme = "README.md" -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [dependencies] byteorder = { version = "1.4.2", default-features = false } diff --git a/uint/fuzz/Cargo.toml b/uint/fuzz/Cargo.toml index b549817a5..78119034f 100644 --- a/uint/fuzz/Cargo.toml +++ b/uint/fuzz/Cargo.toml @@ -4,7 +4,8 @@ description = "Fuzzers for uint algorithms" publish = false version = "0.1.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" +rust-version = "1.56.1" [package.metadata] cargo-fuzz = true From 5fe7924049f39fc6cf772605efc5c28b3ecbae78 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Sat, 27 Nov 2021 08:16:36 -0800 Subject: [PATCH 242/359] Stop calling `state.finish()` for fixed hashes (#602) Calling [`state.finish();`][Hasher::finish] is a no-op. At call site, `state` is read-only and the return value is ignored. The call and may be removed. [Hasher::finish]: https://doc.rust-lang.org/std/hash/trait.Hasher.html#tymethod.finish --- fixed-hash/src/hash.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 3d0ca6990..232245868 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -274,7 +274,6 @@ macro_rules! construct_fixed_hash { impl $crate::core_::hash::Hash for $name { fn hash(&self, state: &mut H) where H: $crate::core_::hash::Hasher { state.write(&self.0); - state.finish(); } } From d8c63201624d39525198ce71fc550dd09a267271 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Dec 2021 10:51:47 +0100 Subject: [PATCH 243/359] build(deps): update sysinfo requirement from 0.21.1 to 0.22.0 (#609) Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Release notes](https://github.com/GuillaumeGomez/sysinfo/releases) - [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) --- updated-dependencies: - dependency-name: sysinfo dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 45c2eab7c..1c1103700 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -32,6 +32,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.8" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.21.1" +sysinfo = "0.22.0" ctrlc = "3.1.4" chrono = "0.4" From 71fbb390a4af9af000d5ab665558116482a267eb Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Mon, 17 Jan 2022 16:47:34 +0800 Subject: [PATCH 244/359] feat(rlp): add optional derive feature (#613) * feat(rlp): add optional derive feature Signed-off-by: koushiro * Update changelog Signed-off-by: koushiro --- rlp/CHANGELOG.md | 1 + rlp/Cargo.toml | 2 ++ rlp/src/lib.rs | 3 +++ 3 files changed, 6 insertions(+) diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index dedfeef65..072ace3f6 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Add optional `derive` feature. [#613](https://github.com/paritytech/parity-common/pull/613) ## [0.5.1] - 2021-07-30 - Fix rlp encoding/decoding for bool. [#572](https://github.com/paritytech/parity-common/pull/572) diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index f649a7eed..c0614ef88 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -11,6 +11,7 @@ rust-version = "1.56.1" [dependencies] bytes = { version = "1", default-features = false } rustc-hex = { version = "2.0.1", default-features = false } +rlp-derive = { version = "0.1", path = "../rlp-derive", optional = true } [dev-dependencies] criterion = "0.3.0" @@ -20,6 +21,7 @@ primitive-types = { path = "../primitive-types", version = "0.10", features = [" [features] default = ["std"] std = ["bytes/std", "rustc-hex/std"] +derive = ["rlp-derive"] [[bench]] name = "rlp" diff --git a/rlp/src/lib.rs b/rlp/src/lib.rs index f8ed6a9de..a0bd64ae9 100644 --- a/rlp/src/lib.rs +++ b/rlp/src/lib.rs @@ -48,6 +48,9 @@ use alloc::vec::Vec; use bytes::BytesMut; use core::borrow::Borrow; +#[cfg(feature = "derive")] +pub use rlp_derive::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; + pub use self::{ error::DecoderError, rlpin::{PayloadInfo, Prototype, Rlp, RlpIterator}, From a855f4df35664f29232e4c27866de637e56e9be5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jan 2022 05:11:30 -0800 Subject: [PATCH 245/359] build(deps): update sysinfo requirement from 0.22.0 to 0.23.0 (#614) Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Release notes](https://github.com/GuillaumeGomez/sysinfo/releases) - [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) --- updated-dependencies: - dependency-name: sysinfo dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 1c1103700..2a7b8d3b4 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -32,6 +32,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.8" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.22.0" +sysinfo = "0.23.0" ctrlc = "3.1.4" chrono = "0.4" From 8f5bf54ec20660b132a94a6adb8f1aa54045286e Mon Sep 17 00:00:00 2001 From: dalance Date: Fri, 28 Jan 2022 01:43:26 +0900 Subject: [PATCH 246/359] Display formatting support (#584) (#603) * Display formatting support (#584) * Update uint/src/uint.rs Co-authored-by: Niklas Adolfsson Co-authored-by: Andronik Co-authored-by: Niklas Adolfsson --- uint/src/uint.rs | 81 ++++++++++++++++++++++++---------------- uint/tests/uint_tests.rs | 20 ++++++++-- 2 files changed, 65 insertions(+), 36 deletions(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index a9d6df3ee..0cc013bf3 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -200,10 +200,7 @@ macro_rules! impl_try_from_for_primitive { fn try_from(u: $from) -> $crate::core_::result::Result<$to, &'static str> { let $from(arr) = u; if !u.fits_word() || arr[0] > <$to>::max_value() as u64 { - Err(concat!( - "integer overflow when casting to ", - stringify!($to) - )) + Err(concat!("integer overflow when casting to ", stringify!($to))) } else { Ok(arr[0] as $to) } @@ -223,9 +220,7 @@ macro_rules! uint_overflowing_binop { let mut ret = [0u64; $n_words]; let ret_ptr = &mut ret as *mut [u64; $n_words] as *mut u64; let mut carry = 0u64; - $crate::static_assertions::const_assert!( - core::isize::MAX as usize / core::mem::size_of::() > $n_words - ); + $crate::static_assertions::const_assert!(core::isize::MAX as usize / core::mem::size_of::() > $n_words); // `unroll!` is recursive, but doesn’t use `$crate::unroll`, so we need to ensure that it // is in scope unqualified. @@ -321,8 +316,7 @@ macro_rules! uint_full_mul_reg { #[doc(hidden)] macro_rules! uint_overflowing_mul { ($name:ident, $n_words: tt, $self_expr: expr, $other: expr) => {{ - let ret: [u64; $n_words * 2] = - $crate::uint_full_mul_reg!($name, $n_words, $self_expr, $other); + let ret: [u64; $n_words * 2] = $crate::uint_full_mul_reg!($name, $n_words, $self_expr, $other); // The safety of this is enforced by the compiler let ret: [[u64; $n_words]; 2] = unsafe { $crate::core_::mem::transmute(ret) }; @@ -1307,6 +1301,44 @@ macro_rules! construct_uint { $name(ret) } + + fn fmt_hex(&self, f: &mut $crate::core_::fmt::Formatter, is_lower: bool) -> $crate::core_::fmt::Result { + let &$name(ref data) = self; + // special case. + if self.is_zero() { + return f.pad_integral(true, "0x", "0"); + } + + let mut latch = false; + let mut buf = [0_u8; $n_words * 16]; + let mut i = 0; + for ch in data.iter().rev() { + for x in 0..16 { + // nibble < 16 + let nibble = (ch & (15u64 << ((15 - x) * 4) as u64)) >> (((15 - x) * 4) as u64); + if !latch { + latch = nibble != 0; + } + + if latch { + // nibble is `'0'..'9' 'a'..'f' 'A'..'F'` because nibble < 16 + let nibble = match nibble { + 0..=9 => nibble as u8 + b'0', + _ if is_lower => nibble as u8 - 10 + b'a', + _ => nibble as u8 - 10 + b'A', + }; + buf[i] = nibble; + i += 1; + } + } + } + + // sequence of `'0'..'9' 'a'..'f' 'A'..'F'` chars is guaranteed to be a valid UTF8 string + let s = unsafe { + $crate::core_::str::from_utf8_unchecked(&buf[0..i]) + }; + f.pad_integral(true, "0x", s) + } } impl $crate::core_::convert::From<$name> for [u8; $n_words * 8] { @@ -1673,35 +1705,19 @@ macro_rules! construct_uint { let s = unsafe { $crate::core_::str::from_utf8_unchecked(&buf[i..]) }; - f.write_str(s) + f.pad_integral(true, "", s) } } impl $crate::core_::fmt::LowerHex for $name { fn fmt(&self, f: &mut $crate::core_::fmt::Formatter) -> $crate::core_::fmt::Result { - let &$name(ref data) = self; - if f.alternate() { - $crate::core_::write!(f, "0x")?; - } - // special case. - if self.is_zero() { - return $crate::core_::write!(f, "0"); - } - - let mut latch = false; - for ch in data.iter().rev() { - for x in 0..16 { - let nibble = (ch & (15u64 << ((15 - x) * 4) as u64)) >> (((15 - x) * 4) as u64); - if !latch { - latch = nibble != 0; - } + self.fmt_hex(f, true) + } + } - if latch { - $crate::core_::write!(f, "{:x}", nibble)?; - } - } - } - Ok(()) + impl $crate::core_::fmt::UpperHex for $name { + fn fmt(&self, f: &mut $crate::core_::fmt::Formatter) -> $crate::core_::fmt::Result { + self.fmt_hex(f, false) } } @@ -1795,7 +1811,6 @@ macro_rules! impl_quickcheck_arbitrary_for_uint { ($uint: ty, $n_bytes: tt) => {}; } - #[cfg(feature = "arbitrary")] #[macro_export] #[doc(hidden)] diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 7c9159658..566d30abf 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -531,13 +531,27 @@ fn uint256_from_dec_str() { #[test] fn display_uint() { - let s = "12345678987654321023456789"; - assert_eq!(format!("{}", U256::from_dec_str(s).unwrap()), s); + let s = U256::from_dec_str("12345678987654321023456789").unwrap(); + assert_eq!(format!("{}", s), "12345678987654321023456789"); + assert_eq!(format!("{:x}", s), "a364c995584f929f39615"); + assert_eq!(format!("{:X}", s), "A364C995584F929F39615"); + assert_eq!(format!("{:032}", s), "00000012345678987654321023456789"); + assert_eq!(format!("{:032x}", s), "00000000000a364c995584f929f39615"); + assert_eq!(format!("{:032X}", s), "00000000000A364C995584F929F39615"); + assert_eq!(format!("{:#032x}", s), "0x000000000a364c995584f929f39615"); + assert_eq!(format!("{:#032X}", s), "0x000000000A364C995584F929F39615"); } #[test] fn display_uint_zero() { - assert_eq!(format!("{}", U256::from(0)), "0"); + let s = U256::from(0); + assert_eq!(format!("{}", s), "0"); + assert_eq!(format!("{:x}", s), "0"); + assert_eq!(format!("{:X}", s), "0"); + assert_eq!(format!("{:032x}", s), "00000000000000000000000000000000"); + assert_eq!(format!("{:032X}", s), "00000000000000000000000000000000"); + assert_eq!(format!("{:#032x}", s), "0x000000000000000000000000000000"); + assert_eq!(format!("{:#032X}", s), "0x000000000000000000000000000000"); } #[test] From aa0e8380d9d252945e3f4b1c3cea6d179bba6181 Mon Sep 17 00:00:00 2001 From: Andronik Date: Fri, 4 Feb 2022 09:42:19 +0100 Subject: [PATCH 247/359] uint: release 0.9.2 (#617) --- uint/CHANGELOG.md | 3 +++ uint/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index a5d14c05b..ad717f1a0 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -5,7 +5,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.9.2] - 2022-01-28 - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Display formatting support. [#603](ttps://github.com/paritytech/parity-common/pull/603) ## [0.9.1] - 2021-06-30 - Added `integer_sqrt` method. [#554](https://github.com/paritytech/parity-common/pull/554) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 5d6a52f21..b05270767 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -4,7 +4,7 @@ homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" name = "uint" -version = "0.9.1" +version = "0.9.2" authors = ["Parity Technologies "] readme = "README.md" edition = "2021" From 502533e6b200a1fbafd250558f85603af747d4dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Feb 2022 10:01:35 +0100 Subject: [PATCH 248/359] build(deps): update lru requirement from 0.6 to 0.7 (#595) Updates the requirements on [lru](https://github.com/jeromefroe/lru-rs) to permit the latest version. - [Release notes](https://github.com/jeromefroe/lru-rs/releases) - [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/jeromefroe/lru-rs/compare/0.6.0...0.7.0) --- updated-dependencies: - dependency-name: lru dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- parity-util-mem/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index af9b7e840..814947f88 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -19,7 +19,7 @@ build = "build.rs" cfg-if = "1.0.0" dlmalloc = { version = "0.2.1", features = ["global"], optional = true } wee_alloc = { version = "0.4.5", optional = true } -lru = { version = "0.6", optional = true } +lru = { version = "0.7", optional = true } hashbrown = { version = "0.11", optional = true } mimalloc = { version = "0.1.18", optional = true } libmimalloc-sys = { version = "0.1.14", optional = true } From 73bfa40cd95e1b352ea604e204ef2f0a2f3948fc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Feb 2022 10:02:30 +0100 Subject: [PATCH 249/359] build(deps): update parking_lot requirement from 0.11.1 to 0.12.0 (#619) Updates the requirements on [parking_lot](https://github.com/Amanieu/parking_lot) to permit the latest version. - [Release notes](https://github.com/Amanieu/parking_lot/releases) - [Changelog](https://github.com/Amanieu/parking_lot/blob/master/CHANGELOG.md) - [Commits](https://github.com/Amanieu/parking_lot/compare/0.11.1...0.12.0) --- updated-dependencies: - dependency-name: parking_lot dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-memorydb/Cargo.toml | 2 +- kvdb-rocksdb/Cargo.toml | 2 +- parity-util-mem/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 29d4cfa6f..8e7e13c10 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -10,7 +10,7 @@ rust-version = "1.56.1" [dependencies] parity-util-mem = { path = "../parity-util-mem", version = "0.10", default-features = false, features = ["std"] } -parking_lot = "0.11.1" +parking_lot = "0.12.0" kvdb = { version = "0.10", path = "../kvdb" } [dev-dependencies] diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 2a7b8d3b4..94da7aab9 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -18,7 +18,7 @@ fs-swap = "0.2.6" kvdb = { path = "../kvdb", version = "0.10" } log = "0.4.8" num_cpus = "1.10.1" -parking_lot = "0.11.1" +parking_lot = "0.12.0" regex = "1.3.1" rocksdb = { version = "0.17", features = ["snappy"], default-features = false } owning_ref = "0.4.0" diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 814947f88..86fa99da2 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -28,7 +28,7 @@ impl-trait-for-tuples = "0.2.0" smallvec = { version = "1.0.0", optional = true } ethereum-types = { version = "0.12.0", optional = true, path = "../ethereum-types" } -parking_lot = { version = "0.11.1", optional = true } +parking_lot = { version = "0.12.0", optional = true } primitive-types = { version = "0.10", path = "../primitive-types", default-features = false, optional = true } [target.'cfg(target_os = "windows")'.dependencies] From b88b13b411252504b07c5f213d54d8a6da625145 Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Fri, 4 Feb 2022 12:03:03 +0300 Subject: [PATCH 250/359] Bump parity-scale-codec to 3.0.0 (#622) --- primitive-types/impls/codec/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index 0e83d4c21..3e9443ffc 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" rust-version = "1.56.1" [dependencies] -parity-scale-codec = { version = "2.2.0", default-features = false, features = ["max-encoded-len"] } +parity-scale-codec = { version = "3.0.0", default-features = false, features = ["max-encoded-len"] } [features] default = ["std"] From 88cc5928e991c79545f9685f220f173a2fbffbd0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Feb 2022 10:03:39 +0100 Subject: [PATCH 251/359] build(deps): update hashbrown requirement from 0.11 to 0.12 (#612) Updates the requirements on [hashbrown](https://github.com/rust-lang/hashbrown) to permit the latest version. - [Release notes](https://github.com/rust-lang/hashbrown/releases) - [Changelog](https://github.com/rust-lang/hashbrown/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/hashbrown/compare/v0.11.0...v0.12.0) --- updated-dependencies: - dependency-name: hashbrown dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- parity-util-mem/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 86fa99da2..cfb070d0f 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -20,7 +20,7 @@ cfg-if = "1.0.0" dlmalloc = { version = "0.2.1", features = ["global"], optional = true } wee_alloc = { version = "0.4.5", optional = true } lru = { version = "0.7", optional = true } -hashbrown = { version = "0.11", optional = true } +hashbrown = { version = "0.12", optional = true } mimalloc = { version = "0.1.18", optional = true } libmimalloc-sys = { version = "0.1.14", optional = true } parity-util-mem-derive = { path = "derive", version = "0.1" } From 2d7f7e24d7daa3bca341ce6c8ad81d48846035ad Mon Sep 17 00:00:00 2001 From: Frost Red Lee Date: Fri, 4 Feb 2022 17:19:23 +0800 Subject: [PATCH 252/359] uint: div_mod_word use intrinsic u128 division(#406) (#478) --- uint/src/uint.rs | 40 +++------------------------------------- 1 file changed, 3 insertions(+), 37 deletions(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 0cc013bf3..1dc9cde6c 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1172,43 +1172,9 @@ macro_rules! construct_uint { #[inline(always)] fn div_mod_word(hi: u64, lo: u64, y: u64) -> (u64, u64) { debug_assert!(hi < y); - // NOTE: this is slow (__udivti3) - // let x = (u128::from(hi) << 64) + u128::from(lo); - // let d = u128::from(d); - // ((x / d) as u64, (x % d) as u64) - // TODO: look at https://gmplib.org/~tege/division-paper.pdf - const TWO32: u64 = 1 << 32; - let s = y.leading_zeros(); - let y = y << s; - let (yn1, yn0) = Self::split(y); - let un32 = (hi << s) | lo.checked_shr(64 - s).unwrap_or(0); - let un10 = lo << s; - let (un1, un0) = Self::split(un10); - let mut q1 = un32 / yn1; - let mut rhat = un32 - q1 * yn1; - - while q1 >= TWO32 || q1 * yn0 > TWO32 * rhat + un1 { - q1 -= 1; - rhat += yn1; - if rhat >= TWO32 { - break; - } - } - - let un21 = un32.wrapping_mul(TWO32).wrapping_add(un1).wrapping_sub(q1.wrapping_mul(y)); - let mut q0 = un21 / yn1; - rhat = un21.wrapping_sub(q0.wrapping_mul(yn1)); - - while q0 >= TWO32 || q0 * yn0 > TWO32 * rhat + un0 { - q0 -= 1; - rhat += yn1; - if rhat >= TWO32 { - break; - } - } - - let rem = un21.wrapping_mul(TWO32).wrapping_add(un0).wrapping_sub(y.wrapping_mul(q0)); - (q1 * TWO32 + q0, rem >> s) + let x = (u128::from(hi) << 64) + u128::from(lo); + let y = u128::from(y); + ((x / y) as u64, (x % y) as u64) } #[inline(always)] From b37d0b312d39fa47c61c4430b30ca87d90e45a08 Mon Sep 17 00:00:00 2001 From: Lennart Braun Date: Fri, 4 Feb 2022 10:39:26 +0100 Subject: [PATCH 253/359] uint: Fix overflowing_neg by implementing two's complement (#611) * uint: Fix overflowing_neg with two's complement The operation `overflowing_neg` on the primitive integer types in the Rust standard library computes the negation of the integer value using two's complement, i.e., it returns `!self + 1`. The previous implementation of the uint library implemented `overflowing_neg` using `!self` for non-zero values which is bit-wise negation (NOT). This lead to behavior where 0 - 1 != -1 for U256 with the `overflowing_neg` and `overflow_sub` operations. This patch adapts the `uint_overflowing_binop` macro to implement the two's complement correctly: Starting from the least significant word we apply `u64::overflowing_neg` until we have seen the first one-bit in the original integer, i.e., until `overflowing_neg` reports an overflow. Then we use bit-wise NOT for the remaining words. * Update uint/src/uint.rs * Update uint/src/uint.rs Co-authored-by: Andronik --- uint/src/uint.rs | 2 +- uint/tests/uint_tests.rs | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 1dc9cde6c..647a803d7 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1157,7 +1157,7 @@ macro_rules! construct_uint { if self.is_zero() { (self, false) } else { - (!self, true) + (!self + 1, true) } } diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 566d30abf..95e5ab2da 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -469,6 +469,39 @@ fn uint256_sub_overflow() { ); } +#[test] +fn uint256_neg_overflow() { + assert_eq!(U256::from_str("0").unwrap().overflowing_neg(), (U256::from_str("0").unwrap(), false)); + assert_eq!( + U256::from_str("1").unwrap().overflowing_neg(), + (U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), true) + ); + assert_eq!( + U256::from_str("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + .unwrap() + .overflowing_neg(), + (U256::from_str("1").unwrap(), true) + ); + assert_eq!( + U256::from_str("8000000000000000000000000000000000000000000000000000000000000000") + .unwrap() + .overflowing_neg(), + (U256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap(), true) + ); + assert_eq!( + U256::from_str("ffffffffffffffff0000000000000000ffffffffffffffff0000000000000000") + .unwrap() + .overflowing_neg(), + (U256::from_str("0000000000000000ffffffffffffffff00000000000000010000000000000000").unwrap(), true) + ); + assert_eq!( + U256::from_str("0000000000000000ffffffffffffffff0000000000000000ffffffffffffffff") + .unwrap() + .overflowing_neg(), + (U256::from_str("ffffffffffffffff0000000000000000ffffffffffffffff0000000000000001").unwrap(), true) + ); +} + #[test] #[should_panic] #[allow(unused_must_use)] From 1d3a3d5138403bbf07d42978b29d8ac7d3fff53b Mon Sep 17 00:00:00 2001 From: Andronik Date: Fri, 4 Feb 2022 11:59:35 +0100 Subject: [PATCH 254/359] New releases preparation (#623) * minor bump uint * major bump parity-util-mem * major bump kvdb * major bump kvdb-rocksdb * major bump kvdb-memorydb * who cares about tests * major bump impl-codec * major bump primitive-types * major bump keccak-hash * major bump parity-util-mem (again) * major bump ethereum-types * major bump parity-util-mem (again II) * major bump ethbloom * major bump ethereum-types (again) * changelog typo fix --- ethbloom/CHANGELOG.md | 4 ++++ ethbloom/Cargo.toml | 4 ++-- ethereum-types/CHANGELOG.md | 6 ++++++ ethereum-types/Cargo.toml | 8 ++++---- keccak-hash/CHANGELOG.md | 5 +++++ keccak-hash/Cargo.toml | 4 ++-- kvdb-memorydb/CHANGELOG.md | 5 +++++ kvdb-memorydb/Cargo.toml | 8 ++++---- kvdb-rocksdb/CHANGELOG.md | 4 +++- kvdb-rocksdb/Cargo.toml | 8 ++++---- kvdb-shared-tests/Cargo.toml | 4 ++-- kvdb/CHANGELOG.md | 7 ++++++- kvdb/Cargo.toml | 4 ++-- parity-util-mem/CHANGELOG.md | 8 ++++++++ parity-util-mem/Cargo.toml | 6 +++--- primitive-types/CHANGELOG.md | 4 ++++ primitive-types/Cargo.toml | 4 ++-- primitive-types/impls/codec/CHANGELOG.md | 4 ++++ primitive-types/impls/codec/Cargo.toml | 2 +- rlp/Cargo.toml | 2 +- uint/CHANGELOG.md | 4 ++++ uint/Cargo.toml | 2 +- 22 files changed, 77 insertions(+), 30 deletions(-) diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 0466d6c53..34191bfef 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -5,7 +5,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.12.0] - 2022-02-04 +### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `impl-codec` to 0.6. [#623](https://github.com/paritytech/parity-common/pull/623) ## [0.11.1] - 2021-09-30 - Combined `scale-info` feature into `codec`. [#593](https://github.com/paritytech/parity-common/pull/593) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 65f1af299..9c00302fc 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.11.1" +version = "0.12.0" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT OR Apache-2.0" @@ -16,7 +16,7 @@ crunchy = { version = "0.2.2", default-features = false, features = ["limit_256" fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } -impl-codec = { version = "0.5.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } +impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } scale-info = { version = "1.0", features = ["derive"], default-features = false, optional = true } [dev-dependencies] diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 7c9fbcfc1..d84500e40 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -5,7 +5,13 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.13.0] - 2022-02-04 +### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `impl-codec` to 0.6. [#623](https://github.com/paritytech/parity-common/pull/623) +- Updated `primitive-types` to 0.11. [#623](https://github.com/paritytech/parity-common/pull/623) +- Updated `ethbloom` to 0.12. [#623](https://github.com/paritytech/parity-common/pull/623) ## [0.12.1] - 2021-09-30 - Combined `scale-info` feature into `codec`. [#593](https://github.com/paritytech/parity-common/pull/593) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 06869767c..251bfc11c 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.12.1" +version = "0.13.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -9,13 +9,13 @@ edition = "2021" rust-version = "1.56.1" [dependencies] -ethbloom = { path = "../ethbloom", version = "0.11", default-features = false } +ethbloom = { path = "../ethbloom", version = "0.12", default-features = false } fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.9", default-features = false } -primitive-types = { path = "../primitive-types", version = "0.10", features = ["byteorder", "rustc-hex"], default-features = false } +primitive-types = { path = "../primitive-types", version = "0.11", features = ["byteorder", "rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.2", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } -impl-codec = { version = "0.5.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } +impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } scale-info = { version = "1.0", features = ["derive"], default-features = false, optional = true } [dev-dependencies] diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index 15eb38aa5..7e9373f67 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -5,8 +5,13 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.9.0] - 2022-02-04 +### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `primitive-types` to 0.11. [#623](https://github.com/paritytech/parity-common/pull/623) +## [0.8.0] - 2021-07-02 ### Breaking - Updated `primitive-types` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 9979285b2..8229dc88b 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "keccak-hash" -version = "0.8.0" +version = "0.9.0" description = "`keccak-hash` is a set of utility functions to facilitate working with Keccak hashes (256/512 bits long)." authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" @@ -11,7 +11,7 @@ rust-version = "1.56.1" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } -primitive-types = { path = "../primitive-types", version = "0.10", default-features = false } +primitive-types = { path = "../primitive-types", version = "0.11", default-features = false } [dev-dependencies] tempfile = "3.1.0" diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index 399ca6cd8..e6dc4f85c 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,8 +5,13 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.11.0] - 2022-02-04 +### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `kvdb` to 0.11. [#623](https://github.com/paritytech/parity-common/pull/623) +## [0.10.0] - 2021-07-02 ### Breaking - Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) - Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 8e7e13c10..6f7767956 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.10.0" +version = "0.11.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -9,12 +9,12 @@ edition = "2021" rust-version = "1.56.1" [dependencies] -parity-util-mem = { path = "../parity-util-mem", version = "0.10", default-features = false, features = ["std"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.11", default-features = false, features = ["std"] } parking_lot = "0.12.0" -kvdb = { version = "0.10", path = "../kvdb" } +kvdb = { version = "0.11", path = "../kvdb" } [dev-dependencies] -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.8" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.9" } [features] default = [] diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 11ea4cb84..524c279ba 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -4,8 +4,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ -## [Unreleased] +## [0.15.0] - 2022-02-04 +### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Bumped `kvdb` and `parity-util-mem`. [#623](https://github.com/paritytech/parity-common/pull/623) ## [0.14.0] - 2021-08-05 ### Breaking diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 94da7aab9..fe87709ae 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.14.0" +version = "0.15.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -15,20 +15,20 @@ harness = false [dependencies] smallvec = "1.0.0" fs-swap = "0.2.6" -kvdb = { path = "../kvdb", version = "0.10" } +kvdb = { path = "../kvdb", version = "0.11" } log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.12.0" regex = "1.3.1" rocksdb = { version = "0.17", features = ["snappy"], default-features = false } owning_ref = "0.4.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.10", default-features = false, features = ["std", "smallvec"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.11", default-features = false, features = ["std", "smallvec"] } [dev-dependencies] alloc_counter = "0.0.4" criterion = "0.3" ethereum-types = { path = "../ethereum-types" } -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.8" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.9" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index e63c43370..1ab98c5ef 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-shared-tests" -version = "0.8.0" +version = "0.9.0" authors = ["Parity Technologies "] edition = "2021" rust-version = "1.56.1" @@ -8,4 +8,4 @@ description = "Shared tests for kvdb functionality, to be executed against actua license = "MIT OR Apache-2.0" [dependencies] -kvdb = { path = "../kvdb", version = "0.10" } +kvdb = { path = "../kvdb", version = "0.11" } diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 6ab16be2e..5c5093b14 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -6,9 +6,14 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.11.0] - 2022-02-04 ### Breaking -- Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `parity-util-mem` to 0.11. [#623](https://github.com/paritytech/parity-common/pull/623) + +## [0.10.0] - 2021-07-02 +### Breaking +- Updated `parity-util-mem` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) ## [0.9.0] - 2021-01-27 ### Breaking diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 348761b4f..4a805e026 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.10.0" +version = "0.11.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" @@ -10,4 +10,4 @@ rust-version = "1.56.1" [dependencies] smallvec = "1.0.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.10", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.11", default-features = false } diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index de095aad0..4f0d1fa87 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -5,7 +5,15 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.11.0] - 2022-02-04 +### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `primitive-types` to 0.11. [#623](https://github.com/paritytech/parity-common/pull/623) +- Updated `ethereum-types` to 0.13. [#623](https://github.com/paritytech/parity-common/pull/623) +- Updated `lru` to 0.7. [#595](https://github.com/paritytech/parity-common/pull/595) +- Updated `parking_lot` to 0.12. [#619](https://github.com/paritytech/parity-common/pull/619) +- Updated `hashbrown` to 0.12. [#612](https://github.com/paritytech/parity-common/pull/612) ## [0.10.2] - 2021-09-20 - Switched from `jemallocator` to `tikv-jemallocator`. [#589](https://github.com/paritytech/parity-common/pull/589) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index cfb070d0f..a8d73cc32 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.10.2" +version = "0.11.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" @@ -27,9 +27,9 @@ parity-util-mem-derive = { path = "derive", version = "0.1" } impl-trait-for-tuples = "0.2.0" smallvec = { version = "1.0.0", optional = true } -ethereum-types = { version = "0.12.0", optional = true, path = "../ethereum-types" } +ethereum-types = { version = "0.13.0", optional = true, path = "../ethereum-types" } parking_lot = { version = "0.12.0", optional = true } -primitive-types = { version = "0.10", path = "../primitive-types", default-features = false, optional = true } +primitive-types = { version = "0.11", path = "../primitive-types", default-features = false, optional = true } [target.'cfg(target_os = "windows")'.dependencies] winapi = { version = "0.3.8", features = ["heapapi"] } diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index a7c1da6b6..16c8924a2 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,7 +5,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.11.0] - 2022-02-04 +### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `impl-codec` to 0.6. [#623](https://github.com/paritytech/parity-common/pull/623) ## [0.10.1] - 2021-07-02 ### Added diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index de3732646..3b9332c46 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.10.1" +version = "0.11.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -12,7 +12,7 @@ rust-version = "1.56.1" fixed-hash = { version = "0.7", path = "../fixed-hash", default-features = false } uint = { version = "0.9.0", path = "../uint", default-features = false } impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false, optional = true } -impl-codec = { version = "0.5.1", path = "impls/codec", default-features = false, optional = true } +impl-codec = { version = "0.6.0", path = "impls/codec", default-features = false, optional = true } impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } scale-info-crate = { package = "scale-info", version = ">=0.9, <2", features = ["derive"], default-features = false, optional = true } diff --git a/primitive-types/impls/codec/CHANGELOG.md b/primitive-types/impls/codec/CHANGELOG.md index 63cf2acca..713c28aa7 100644 --- a/primitive-types/impls/codec/CHANGELOG.md +++ b/primitive-types/impls/codec/CHANGELOG.md @@ -5,7 +5,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.6.0] - 2022-02-04 +### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `parity-scale-codec` to 3.0. [#622](https://github.com/paritytech/parity-common/pull/622) ## [0.5.1] - 2021-07-02 ### Dependencies diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index 3e9443ffc..50c5d2180 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-codec" -version = "0.5.1" +version = "0.6.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index c0614ef88..83022e5af 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -16,7 +16,7 @@ rlp-derive = { version = "0.1", path = "../rlp-derive", optional = true } [dev-dependencies] criterion = "0.3.0" hex-literal = "0.3.1" -primitive-types = { path = "../primitive-types", version = "0.10", features = ["impl-rlp"] } +primitive-types = { path = "../primitive-types", version = "0.11", features = ["impl-rlp"] } [features] default = ["std"] diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index ad717f1a0..cc7e75e31 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.3] - 2022-02-04 +- Simplified and faster `div_mod`. [#478](https://github.com/paritytech/parity-common/pull/478) +- Fixed `overflowing_neg`. [#611](https://github.com/paritytech/parity-common/pull/611) + ## [0.9.2] - 2022-01-28 - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) - Display formatting support. [#603](ttps://github.com/paritytech/parity-common/pull/603) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index b05270767..1de346d84 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -4,7 +4,7 @@ homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" name = "uint" -version = "0.9.2" +version = "0.9.3" authors = ["Parity Technologies "] readme = "README.md" edition = "2021" From f2bbf5190446cef0393866f70020abd884465d4e Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Mon, 7 Feb 2022 12:52:28 +0300 Subject: [PATCH 255/359] Remove duplicate H128 impl from ethereum-types (#624) --- ethereum-types/src/hash.rs | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/ethereum-types/src/hash.rs b/ethereum-types/src/hash.rs index 4e4fd3b47..82372ee64 100644 --- a/ethereum-types/src/hash.rs +++ b/ethereum-types/src/hash.rs @@ -41,18 +41,7 @@ impl_fixed_hash_serde!(H64, 8); #[cfg(feature = "codec")] impl_fixed_hash_codec!(H64, 8); -construct_fixed_hash! { - #[cfg_attr(feature = "codec", derive(scale_info::TypeInfo))] - pub struct H128(16); -} -#[cfg(feature = "rlp")] -impl_fixed_hash_rlp!(H128, 16); -#[cfg(feature = "serialize")] -impl_fixed_hash_serde!(H128, 16); -#[cfg(feature = "codec")] -impl_fixed_hash_codec!(H128, 16); - -pub use primitive_types::{H160, H256}; +pub use primitive_types::{H128, H160, H256}; construct_fixed_hash! { #[cfg_attr(feature = "codec", derive(scale_info::TypeInfo))] From bcb2e48a7f0e1594b97fdd9086973f046d6bdd5d Mon Sep 17 00:00:00 2001 From: Andronik Date: Mon, 7 Feb 2022 16:38:53 +0100 Subject: [PATCH 256/359] YOLO scale-info update (#627) --- ethbloom/CHANGELOG.md | 3 +++ ethbloom/Cargo.toml | 4 ++-- ethereum-types/CHANGELOG.md | 3 +++ ethereum-types/Cargo.toml | 4 ++-- primitive-types/CHANGELOG.md | 3 +++ primitive-types/Cargo.toml | 4 ++-- 6 files changed, 15 insertions(+), 6 deletions(-) diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 34191bfef..7f46176fd 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.12.1] - 2022-02-07 +- Updated `scale-info` to ">=1.0, <3". [#627](https://github.com/paritytech/parity-common/pull/627) + ## [0.12.0] - 2022-02-04 ### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 9c00302fc..95a84eebb 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.12.0" +version = "0.12.1" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT OR Apache-2.0" @@ -17,7 +17,7 @@ fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false impl-serde = { path = "../primitive-types/impls/serde", version = "0.3", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } -scale-info = { version = "1.0", features = ["derive"], default-features = false, optional = true } +scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } [dev-dependencies] criterion = "0.3.0" diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index d84500e40..7e1829d81 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.13.1] - 2022-02-07 +- Updated `scale-info` to ">=1.0, <3". [#627](https://github.com/paritytech/parity-common/pull/627) + ## [0.13.0] - 2022-02-04 ### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 251bfc11c..3449beccd 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.13.0" +version = "0.13.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -16,7 +16,7 @@ primitive-types = { path = "../primitive-types", version = "0.11", features = [" impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.2", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } -scale-info = { version = "1.0", features = ["derive"], default-features = false, optional = true } +scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } [dev-dependencies] serde_json = "1.0.41" diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 16c8924a2..e02524e30 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.11.1] - 2022-02-07 +- Updated `scale-info` to ">=0.9, <3". [#627](https://github.com/paritytech/parity-common/pull/627) + ## [0.11.0] - 2022-02-04 ### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 3b9332c46..e09b21215 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.11.0" +version = "0.11.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -15,7 +15,7 @@ impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false impl-codec = { version = "0.6.0", path = "impls/codec", default-features = false, optional = true } impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } -scale-info-crate = { package = "scale-info", version = ">=0.9, <2", features = ["derive"], default-features = false, optional = true } +scale-info-crate = { package = "scale-info", version = ">=0.9, <3", features = ["derive"], default-features = false, optional = true } [features] default = ["std"] From 70a745a701f1917dd9c54a009c46d00f6fcb1dee Mon Sep 17 00:00:00 2001 From: Andronik Date: Fri, 18 Feb 2022 17:34:51 +0100 Subject: [PATCH 257/359] update rocksdb to 0.18 (#629) * update rocksdb to 0.18 * prepare for release * enable jemalloc feature * update the changelog --- kvdb-rocksdb/CHANGELOG.md | 5 +++++ kvdb-rocksdb/Cargo.toml | 4 ++-- kvdb-rocksdb/src/lib.rs | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 524c279ba..01cf499a0 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -4,6 +4,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [Unreleased] + +## [0.15.1] - 2022-02-18 +- Updated `rocksdb` to 0.18 and enable `jemalloc` feature. [#629](https://github.com/paritytech/parity-common/pull/629) + ## [0.15.0] - 2022-02-04 ### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index fe87709ae..d43924ca8 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.15.0" +version = "0.15.1" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -20,7 +20,7 @@ log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.12.0" regex = "1.3.1" -rocksdb = { version = "0.17", features = ["snappy"], default-features = false } +rocksdb = { version = "0.18.0", features = ["snappy", "jemalloc"], default-features = false } owning_ref = "0.4.0" parity-util-mem = { path = "../parity-util-mem", version = "0.11", default-features = false, features = ["std", "smallvec"] } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 055f9d6f1..a8029c566 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -380,7 +380,7 @@ fn generate_block_based_options(config: &DatabaseConfig) -> io::Result Date: Sun, 20 Mar 2022 12:41:13 +0200 Subject: [PATCH 258/359] Disable jemalloc on OpenBSD and MSVC where it is not working (#633) * Disable jemalloc on OpenBSD and MSVC where it is not working * Update kvdb-rocksdb/CHANGELOG.md Co-authored-by: Andronik --- kvdb-rocksdb/CHANGELOG.md | 3 +++ kvdb-rocksdb/Cargo.toml | 15 +++++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 01cf499a0..08ae66937 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.15.2] - 2022-03-20 +- Disable `jemalloc` feature for `rocksdb` where it is not working. [#633](https://github.com/paritytech/parity-common/pull/633) + ## [0.15.1] - 2022-02-18 - Updated `rocksdb` to 0.18 and enable `jemalloc` feature. [#629](https://github.com/paritytech/parity-common/pull/629) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index d43924ca8..c8b98afa5 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.15.1" +version = "0.15.2" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -20,10 +20,21 @@ log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.12.0" regex = "1.3.1" -rocksdb = { version = "0.18.0", features = ["snappy", "jemalloc"], default-features = false } owning_ref = "0.4.0" parity-util-mem = { path = "../parity-util-mem", version = "0.11", default-features = false, features = ["std", "smallvec"] } +# OpenBSD and MSVC are unteested and shouldn't enable jemalloc: +# https://github.com/tikv/jemallocator/blob/52de4257fab3e770f73d5174c12a095b49572fba/jemalloc-sys/build.rs#L26-L27 +[target.'cfg(any(target_os = "openbsd", target_env = "msvc"))'.dependencies.rocksdb] +default-features = false +features = ["snappy"] +version = "0.18.0" + +[target.'cfg(not(any(target_os = "openbsd", target_env = "msvc")))'.dependencies.rocksdb] +default-features = false +features = ["snappy", "jemalloc"] +version = "0.18.0" + [dev-dependencies] alloc_counter = "0.0.4" criterion = "0.3" From 8d99ffab6da3703aa4cee7f477982cf71e977342 Mon Sep 17 00:00:00 2001 From: Jakub Bogucki Date: Mon, 4 Apr 2022 18:08:49 +0200 Subject: [PATCH 259/359] uint: Make is_zero implementation const (#639) --- uint/src/uint.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 647a803d7..c136297e8 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -673,9 +673,10 @@ macro_rules! construct_uint { /// Whether this is zero. #[inline] - pub fn is_zero(&self) -> bool { + pub const fn is_zero(&self) -> bool { let &$name(ref arr) = self; - for i in 0..$n_words { if arr[i] != 0 { return false; } } + let mut i = 0; + while i < $n_words { if arr[i] != 0 { return false; } else { i += 1; } } return true; } From 436cb0827f0e3238ccb80d7d453f756d126c0615 Mon Sep 17 00:00:00 2001 From: Matheus Cardoso <45436839+Cardosaum@users.noreply.github.com> Date: Thu, 28 Apr 2022 05:03:45 -0300 Subject: [PATCH 260/359] docs: improves `overflowing_add` documentation (#641) The proposed change makes this function's documentation more consistent with others already present in this file. --- uint/src/uint.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index c136297e8..f63eb8a3f 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1057,7 +1057,7 @@ macro_rules! construct_uint { } } - /// Add with overflow. + /// Addition which overflows and returns a flag if it does. #[inline(always)] pub fn overflowing_add(self, other: $name) -> ($name, bool) { $crate::uint_overflowing_binop!( From f6ed7ba3b0e340ca00a44e47f7619e685be3ea09 Mon Sep 17 00:00:00 2001 From: Sergejs Kostjucenko <85877331+sergejparity@users.noreply.github.com> Date: Thu, 2 Jun 2022 19:44:14 +0300 Subject: [PATCH 261/359] Add GHA to dependabot and CODEOWNERS (#646) * add CODEOWNERS * add gha to dependabot --- .github/dependabot.yml | 5 +++++ CODEOWNERS | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 CODEOWNERS diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 39fd4fa43..f3b696156 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,3 +5,8 @@ updates: labels: ["A2-insubstantial", "M5-dependencies"] schedule: interval: "daily" + - package-ecosystem: github-actions + directory: '/' + labels: ["A2-insubstantial", "M5-dependencies"] + schedule: + interval: daily diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..7eb3137e4 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,22 @@ +# Lists some code owners. +# +# A codeowner just oversees some part of the codebase. If an owned file is changed then the +# corresponding codeowner receives a review request. An approval of the codeowner might be +# required for merging a PR (depends on repository settings). +# +# For details about syntax, see: +# https://help.github.com/en/articles/about-code-owners +# But here are some important notes: +# +# - Glob syntax is git-like, e.g. `/core` means the core directory in the root, unlike `core` +# which can be everywhere. +# - Multiple owners are supported. +# - Either handle (e.g, @github_user or @github_org/team) or email can be used. Keep in mind, +# that handles might work better because they are more recognizable on GitHub, +# eyou can use them for mentioning unlike an email. +# - The latest matching rule, if multiple, takes precedence. + +# main codeowner + +# CI +/.github/ @paritytech/ci From b3717a02735443b7a60c957db388c9d35e985a82 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Jun 2022 14:12:51 +0200 Subject: [PATCH 262/359] build(deps): bump actions/checkout from 2 to 3 (#648) Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 93a736514..f7f3babad 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,7 @@ jobs: name: Check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -35,7 +35,7 @@ jobs: - ubuntu-latest - macOS-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -130,7 +130,7 @@ jobs: name: Test Windows runs-on: windows-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -150,7 +150,7 @@ jobs: name: Rustfmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal From 3dd1eede6818deb54a1b06fde3d1223f3cdba046 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Jun 2022 14:13:06 +0200 Subject: [PATCH 263/359] build(deps): bump Swatinem/rust-cache from 1.3.0 to 1.4.0 (#647) Bumps [Swatinem/rust-cache](https://github.com/Swatinem/rust-cache) from 1.3.0 to 1.4.0. - [Release notes](https://github.com/Swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/v1/CHANGELOG.md) - [Commits](https://github.com/Swatinem/rust-cache/compare/v1.3.0...v1.4.0) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f7f3babad..eddde0ce7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@v1.3.0 + uses: Swatinem/rust-cache@v1.4.0 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@v1.3.0 + uses: Swatinem/rust-cache@v1.4.0 - run: rustup target add wasm32-unknown-unknown @@ -138,7 +138,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@v1.3.0 + uses: Swatinem/rust-cache@v1.4.0 - uses: actions-rs/cargo@v1 with: From f48cfcdea670864ce3ff1f80907853fce465e821 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jun 2022 16:21:03 +0200 Subject: [PATCH 264/359] build(deps): update sysinfo requirement from 0.23.0 to 0.24.1 (#649) Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Release notes](https://github.com/GuillaumeGomez/sysinfo/releases) - [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) --- updated-dependencies: - dependency-name: sysinfo dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index c8b98afa5..91dc1dc89 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -43,6 +43,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.9" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.23.0" +sysinfo = "0.24.1" ctrlc = "3.1.4" chrono = "0.4" From 3cc2bcbce8b1a467683ca1358d75fccec6e6ed26 Mon Sep 17 00:00:00 2001 From: Simon Warta <2603011+webmaster128@users.noreply.github.com> Date: Wed, 15 Jun 2022 11:42:51 +0200 Subject: [PATCH 265/359] uint: make fn one const (#650) * Add one test * uint: Make `fn one` const * Test multiplicative inverse property of one * Update uint/CHANGELOG.md Co-authored-by: Andronik --- uint/CHANGELOG.md | 1 + uint/src/uint.rs | 6 ++++-- uint/tests/uint_tests.rs | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index cc7e75e31..267e9c51d 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -5,6 +5,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Make `one` const. [#650](https://github.com/paritytech/parity-common/pull/650) ## [0.9.3] - 2022-02-04 - Simplified and faster `div_mod`. [#478](https://github.com/paritytech/parity-common/pull/478) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index f63eb8a3f..44a4a421e 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -793,8 +793,10 @@ macro_rules! construct_uint { /// One (multiplicative identity) of this type. #[inline] - pub fn one() -> Self { - From::from(1u64) + pub const fn one() -> Self { + let mut words = [0; $n_words]; + words[0] = 1u64; + Self(words) } /// The maximum value which can be inhabited by this type. diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 95e5ab2da..8095a5faa 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -48,6 +48,21 @@ fn const_matching_works() { } } +#[test] +fn one() { + let one = U256::one(); + assert_eq!(one.0, [1, 0, 0, 0]); + + let one = U512::one(); + assert_eq!(one.0, [1, 0, 0, 0, 0, 0, 0, 0]); + + let any = U256::from(123456789); + assert_eq!(any * U256::one(), any); + + let any = U512::from(123456789); + assert_eq!(any * U512::one(), any); +} + #[test] fn u128_conversions() { let mut a = U256::from(u128::max_value()); From 6a7bf780208def015ea688512c7757a78c91526c Mon Sep 17 00:00:00 2001 From: Andronik Date: Fri, 17 Jun 2022 15:30:44 +0200 Subject: [PATCH 266/359] uint: remove some unsafety (#653) --- uint/src/uint.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 44a4a421e..3f81aeae9 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -218,7 +218,6 @@ macro_rules! uint_overflowing_binop { let $name(ref you) = $other; let mut ret = [0u64; $n_words]; - let ret_ptr = &mut ret as *mut [u64; $n_words] as *mut u64; let mut carry = 0u64; $crate::static_assertions::const_assert!(core::isize::MAX as usize / core::mem::size_of::() > $n_words); @@ -233,19 +232,12 @@ macro_rules! uint_overflowing_binop { let (res1, overflow1) = ($fn)(me[i], you[i]); let (res2, overflow2) = ($fn)(res1, carry); - unsafe { - // SAFETY: `i` is within bounds and `i * size_of::() < isize::MAX` - *ret_ptr.offset(i as _) = res2 - } + ret[i] = res2; carry = (overflow1 as u8 + overflow2 as u8) as u64; } else { let (res, overflow) = ($fn)(me[i], you[i]); - unsafe { - // SAFETY: `i` is within bounds and `i * size_of::() < isize::MAX` - *ret_ptr.offset(i as _) = res - } - + ret[i] = res; carry = overflow as u64; } } From b16db492536b4781fb3f794029788e68cc2dae00 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Jul 2022 09:41:20 +0200 Subject: [PATCH 267/359] build(deps): bump Swatinem/rust-cache from 1.4.0 to 2.0.0 (#654) Bumps [Swatinem/rust-cache](https://github.com/Swatinem/rust-cache) from 1.4.0 to 2.0.0. - [Release notes](https://github.com/Swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/Swatinem/rust-cache/compare/v1.4.0...v2.0.0) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index eddde0ce7..d6a8de07f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@v1.4.0 + uses: Swatinem/rust-cache@v2.0.0 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@v1.4.0 + uses: Swatinem/rust-cache@v2.0.0 - run: rustup target add wasm32-unknown-unknown @@ -138,7 +138,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@v1.4.0 + uses: Swatinem/rust-cache@v2.0.0 - uses: actions-rs/cargo@v1 with: From 55d5fe5b4536a21ac4b97002ab5b0d09cab4c76f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Aug 2022 11:22:18 +0200 Subject: [PATCH 268/359] build(deps): update sysinfo requirement from 0.24.1 to 0.25.1 (#657) Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Release notes](https://github.com/GuillaumeGomez/sysinfo/releases) - [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) --- updated-dependencies: - dependency-name: sysinfo dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 91dc1dc89..0bbd1c935 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -43,6 +43,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.9" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.24.1" +sysinfo = "0.25.1" ctrlc = "3.1.4" chrono = "0.4" From 615369348dd586b68f0f15551f7b4e441fa9f1ba Mon Sep 17 00:00:00 2001 From: Simon Warta <2603011+webmaster128@users.noreply.github.com> Date: Sun, 14 Aug 2022 14:08:58 +0200 Subject: [PATCH 269/359] uint: make max_value const, test, simplify (#652) * Make max_value const, test and simplify * Deprecate max_value * Test ::MAX * Update uint/src/uint.rs Co-authored-by: Andronik * Apply suggestions from code review Co-authored-by: Andronik Co-authored-by: Andronik --- uint/CHANGELOG.md | 1 + uint/benches/bigint.rs | 8 +++---- uint/src/uint.rs | 14 +++++-------- uint/tests/uint_tests.rs | 45 +++++++++++++++++++++++++++++++++++++++- 4 files changed, 54 insertions(+), 14 deletions(-) diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 267e9c51d..359b6e443 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - Make `one` const. [#650](https://github.com/paritytech/parity-common/pull/650) +- Make `max_value` const. [#652](https://github.com/paritytech/parity-common/pull/652) ## [0.9.3] - 2022-02-04 - Simplified and faster `div_mod`. [#478](https://github.com/paritytech/parity-common/pull/478) diff --git a/uint/benches/bigint.rs b/uint/benches/bigint.rs index f41f34123..a23b66760 100644 --- a/uint/benches/bigint.rs +++ b/uint/benches/bigint.rs @@ -142,7 +142,7 @@ fn u256_sub(c: &mut Criterion) { black_box(x.overflowing_sub(y).0) }) }, - vec![(U256::max_value(), 1u64), (U256::from(3), 2)], + vec![(U256::MAX, 1u64), (U256::from(3), 2)], ), ); } @@ -159,7 +159,7 @@ fn u256_mul(c: &mut Criterion) { }) }, vec![ - (U256::max_value(), 1u64), + (U256::MAX, 1u64), (U256::from(3), u64::max_value()), (U256::from_dec_str("21674844646682989462120101885968193938394323990565507610662749").unwrap(), 173), ], @@ -179,7 +179,7 @@ fn u512_div_mod(c: &mut Criterion) { }) }, vec![ - (U512::max_value(), U512::from(1u64)), + (U512::MAX, U512::from(1u64)), (U512::from(u64::max_value()), U512::from(u32::max_value())), (U512::from(u64::max_value()), U512::from(u64::max_value() - 1)), (U512::from(u64::max_value()), U512::from(u64::max_value() - 1)), @@ -240,7 +240,7 @@ fn u256_rem(c: &mut Criterion) { "", |b, (x, y)| b.iter(|| black_box(x % y)), vec![ - (U256::max_value(), U256::from(1u64)), + (U256::MAX, U256::from(1u64)), (U256::from(u64::max_value()), U256::from(u64::from(u32::max_value()) + 1)), ( U256([12767554894655550452, 16333049135534778834, 140317443000293558, 598963]), diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 3f81aeae9..da5361d2e 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -793,12 +793,8 @@ macro_rules! construct_uint { /// The maximum value which can be inhabited by this type. #[inline] - pub fn max_value() -> Self { - let mut result = [0; $n_words]; - for i in 0..$n_words { - result[i] = u64::max_value(); - } - $name(result) + pub const fn max_value() -> Self { + Self::MAX } fn full_shl(self, shift: u32) -> [u64; $n_words + 1] { @@ -1063,10 +1059,10 @@ macro_rules! construct_uint { ) } - /// Addition which saturates at the maximum value (Self::max_value()). + /// Addition which saturates at the maximum value (Self::MAX). pub fn saturating_add(self, other: $name) -> $name { match self.overflowing_add(other) { - (_, true) => $name::max_value(), + (_, true) => $name::MAX, (val, false) => val, } } @@ -1116,7 +1112,7 @@ macro_rules! construct_uint { /// Multiplication which saturates at the maximum value.. pub fn saturating_mul(self, other: $name) -> $name { match self.overflowing_mul(other) { - (_, true) => $name::max_value(), + (_, true) => $name::MAX, (val, false) => val, } } diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 8095a5faa..026cc243e 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -48,6 +48,27 @@ fn const_matching_works() { } } +#[test] +fn max() { + let max = U256::MAX; + assert_eq!(max.0, [0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF]); + + let max = U512::MAX; + assert_eq!( + max.0, + [ + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF + ] + ); +} + #[test] fn one() { let one = U256::one(); @@ -63,6 +84,28 @@ fn one() { assert_eq!(any * U512::one(), any); } +#[test] +#[allow(deprecated)] +fn max_value() { + let max = U256::max_value(); + assert_eq!(max.0, [0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF]); + + let max = U512::max_value(); + assert_eq!( + max.0, + [ + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF, + 0xFFFFFFFFFFFFFFFF + ] + ); +} + #[test] fn u128_conversions() { let mut a = U256::from(u128::max_value()); @@ -85,7 +128,7 @@ fn uint256_checked_ops() { assert_eq!(U256::from(10).checked_pow(U256::from(3)), Some(U256::from(1000))); assert_eq!(U256::from(10).checked_pow(U256::from(20)), Some(U256::exp10(20))); assert_eq!(U256::from(2).checked_pow(U256::from(0x100)), None); - assert_eq!(U256::max_value().checked_pow(U256::from(2)), None); + assert_eq!(U256::MAX.checked_pow(U256::from(2)), None); assert_eq!(a.checked_add(b), None); assert_eq!(a.checked_add(a), Some(20.into())); From 6ed31100b75ab91cb44ed048ffb782fc17ac84df Mon Sep 17 00:00:00 2001 From: Andronik Date: Sun, 14 Aug 2022 18:24:39 +0200 Subject: [PATCH 270/359] fix some warnings (#663) --- fixed-hash/benches/cmp.rs | 135 ++++--- kvdb-rocksdb/examples/memtest.rs | 13 +- parity-util-mem/derive/lib.rs | 1 - parity-util-mem/src/allocators.rs | 2 +- .../impls/serde/benches/impl_serde.rs | 101 ++--- uint/benches/bigint.rs | 353 +++++++++--------- 6 files changed, 296 insertions(+), 309 deletions(-) diff --git a/fixed-hash/benches/cmp.rs b/fixed-hash/benches/cmp.rs index 62c71db00..fc5551e1c 100644 --- a/fixed-hash/benches/cmp.rs +++ b/fixed-hash/benches/cmp.rs @@ -8,7 +8,7 @@ //! Benchmarks for fixed-hash cmp implementation. -use criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark}; +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; use fixed_hash::construct_fixed_hash; @@ -18,80 +18,77 @@ criterion_group!(cmp, eq_equal, eq_nonequal, compare,); criterion_main!(cmp); fn eq_equal(c: &mut Criterion) { - c.bench( - "eq_equal", - ParameterizedBenchmark::new( - "", - |b, x| b.iter(|| black_box(x.eq(black_box(x)))), - vec![ - H256::zero(), - H256::repeat_byte(0xAA), - H256::from([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, - 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, - ]), - H256([u8::max_value(); 32]), - ], - ), - ); + let mut group = c.benchmark_group("eq_self"); + for input in [ + H256::zero(), + H256::repeat_byte(0xAA), + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, 0x84, + 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + H256([u8::max_value(); 32]), + ] { + group.bench_with_input(BenchmarkId::from_parameter(input), &input, |b, x| { + b.iter(|| black_box(x.eq(black_box(x)))) + }); + } + group.finish(); } fn eq_nonequal(c: &mut Criterion) { - c.bench( - "eq_nonequal", - ParameterizedBenchmark::new( - "", - |b, (x, y)| b.iter(|| black_box(x.eq(black_box(y)))), - vec![ - ( - H256::zero(), - H256::from([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, - ]), - ), - (H256::repeat_byte(0xAA), H256::repeat_byte(0xA1)), - ( - H256::from([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, - 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, - ]), - H256::from([ - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, - 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, - ]), - ), - ], + let mut group = c.benchmark_group("eq_nonequal"); + for input in [ + ( + H256::zero(), + H256::from([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + ]), + ), + (H256::repeat_byte(0xAA), H256::repeat_byte(0xA1)), + ( + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, + 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + H256::from([ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, + 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), ), - ); + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.0), &input, |b, (x, y)| { + b.iter(|| black_box(x.eq(black_box(y)))) + }); + } + group.finish(); } fn compare(c: &mut Criterion) { - c.bench( - "compare", - ParameterizedBenchmark::new( - "", - |b, (x, y)| b.iter(|| black_box(x.cmp(black_box(y)))), - vec![ - ( - H256::zero(), - H256::from([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, - ]), - ), - (H256::zero(), H256::zero()), - (H256::repeat_byte(0xAA), H256::repeat_byte(0xAA)), - (H256::repeat_byte(0xAA), H256::repeat_byte(0xA1)), - ( - H256::from([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, - 0x40, 0x84, 0xC2, 0xDF, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, - ]), - H256::from([ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, - 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, - ]), - ), - ], + let mut group = c.benchmark_group("compare"); + for input in [ + ( + H256::zero(), + H256::from([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + ]), + ), + (H256::zero(), H256::zero()), + (H256::repeat_byte(0xAA), H256::repeat_byte(0xAA)), + (H256::repeat_byte(0xAA), H256::repeat_byte(0xA1)), + ( + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, + 0x84, 0xC2, 0xDF, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), + H256::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, + 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, + ]), ), - ); + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| black_box(x.cmp(black_box(y)))) + }); + } + group.finish(); } diff --git a/kvdb-rocksdb/examples/memtest.rs b/kvdb-rocksdb/examples/memtest.rs index 32097279f..30923fb40 100644 --- a/kvdb-rocksdb/examples/memtest.rs +++ b/kvdb-rocksdb/examples/memtest.rs @@ -29,8 +29,7 @@ use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; const COLUMN_COUNT: u32 = 100; #[derive(Clone)] -struct KeyValueSeed { - seed: H256, +struct KeyValue { key: H256, val: H256, } @@ -43,9 +42,9 @@ fn next(seed: H256) -> H256 { keccak(&buf[..]) } -impl KeyValueSeed { +impl KeyValue { fn with_seed(seed: H256) -> Self { - KeyValueSeed { seed, key: next(seed), val: next(next(seed)) } + KeyValue { key: next(seed), val: next(next(seed)) } } fn new() -> Self { @@ -53,7 +52,7 @@ impl KeyValueSeed { } } -impl Iterator for KeyValueSeed { +impl Iterator for KeyValue { type Item = (H256, H256); fn next(&mut self) -> Option { @@ -110,7 +109,7 @@ fn main() { let db = Database::open(&config, &dir.path()).unwrap(); let mut step = 0; - let mut keyvalues = KeyValueSeed::new(); + let mut keyvalues = KeyValue::new(); while !exit.load(AtomicOrdering::Relaxed) { let col = step % 100; @@ -138,7 +137,7 @@ fn main() { } db.write(transaction).expect("delete failed"); - keyvalues = KeyValueSeed::with_seed(seed); + keyvalues = KeyValue::with_seed(seed); if step % 10000 == 9999 { let timestamp = chrono::Local::now().format("%Y-%m-%d %H:%M:%S"); diff --git a/parity-util-mem/derive/lib.rs b/parity-util-mem/derive/lib.rs index fc28e0859..78e718635 100644 --- a/parity-util-mem/derive/lib.rs +++ b/parity-util-mem/derive/lib.rs @@ -17,7 +17,6 @@ extern crate syn; #[macro_use] extern crate synstructure; -#[cfg(not(test))] decl_derive!([MallocSizeOf, attributes(ignore_malloc_size_of)] => malloc_size_of_derive); fn malloc_size_of_derive(s: synstructure::Structure) -> proc_macro2::TokenStream { diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs index b71ab5e0c..2c63d55d4 100644 --- a/parity-util-mem/src/allocators.rs +++ b/parity-util-mem/src/allocators.rs @@ -102,7 +102,7 @@ mod usable_size { target_os = "android", target_os = "freebsd", ))] { - /// Linux/BSD call system allocator (currently malloc). + // Linux/BSD call system allocator (currently malloc). extern "C" { pub fn malloc_usable_size(ptr: *const c_void) -> usize; } diff --git a/primitive-types/impls/serde/benches/impl_serde.rs b/primitive-types/impls/serde/benches/impl_serde.rs index c7a1efea8..fee13c550 100644 --- a/primitive-types/impls/serde/benches/impl_serde.rs +++ b/primitive-types/impls/serde/benches/impl_serde.rs @@ -12,10 +12,9 @@ //! cargo bench //! ``` -use criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark}; -use serde_derive::{Deserialize, Serialize}; -// TODO(niklasad1): use `uint::construct_uint` when a new version of `uint` is released +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; use impl_serde::impl_uint_serde; +use serde_derive::{Deserialize, Serialize}; use uint::*; mod input; @@ -33,66 +32,70 @@ criterion_group!(impl_serde, u256_to_hex, hex_to_u256, bytes_to_hex, hex_to_byte criterion_main!(impl_serde); fn u256_to_hex(c: &mut Criterion) { - c.bench( - "u256_to_hex", - ParameterizedBenchmark::new( - "", - |b, x| b.iter(|| black_box(serde_json::to_string(&x))), - vec![ - U256::from(0), - U256::from(100), - U256::from(u32::max_value()), - U256::from(u64::max_value()), - U256::from(u128::max_value()), - U256([1, 2, 3, 4]), - ], - ), - ); + let mut group = c.benchmark_group("u256_to_hex"); + for input in [ + U256::from(0), + U256::from(100), + U256::from(u32::max_value()), + U256::from(u64::max_value()), + U256::from(u128::max_value()), + U256([1, 2, 3, 4]), + ] { + group.bench_with_input(BenchmarkId::from_parameter(input), &input, |b, x| { + b.iter(|| black_box(serde_json::to_string(&x))) + }); + } + group.finish(); } fn hex_to_u256(c: &mut Criterion) { - let parameters = vec![ - r#""0x0""#, - r#""0x1""#, - r#""0x10""#, - r#""0x100""#, - r#""0x1000000000000000000000000000000000000000000000000000000000000100""#, - ]; - - c.bench( - "hex_to_u256", - ParameterizedBenchmark::new("", |b, x| b.iter(|| black_box(serde_json::from_str::(&x))), parameters), - ); + let mut group = c.benchmark_group("hex_to_u256"); + for input in [ + "\"0x0\"", + "\"0x1\"", + "\"0x10\"", + "\"0x100\"", + "\"0x1000000000000000000000000000000000000000000000000000000000000100\"", + ] { + group.bench_with_input(BenchmarkId::from_parameter(input), &input, |b, x| { + b.iter(|| black_box(serde_json::from_str::(&x))) + }); + } + group.finish(); } fn bytes_to_hex(c: &mut Criterion) { - let parameters = vec![ - serde_json::from_str::(&input::HEX_64_CHARS).unwrap(), - serde_json::from_str::(&input::HEX_256_CHARS).unwrap(), - serde_json::from_str::(&input::HEX_1024_CHARS).unwrap(), - serde_json::from_str::(&input::HEX_4096_CHARS).unwrap(), - serde_json::from_str::(&input::HEX_16384_CHARS).unwrap(), - serde_json::from_str::(&input::HEX_65536_CHARS).unwrap(), + let mut group = c.benchmark_group("bytes_to_hex"); + let params = [ + input::HEX_64_CHARS, + input::HEX_256_CHARS, + input::HEX_1024_CHARS, + input::HEX_4096_CHARS, + input::HEX_16384_CHARS, + input::HEX_65536_CHARS, ]; - - c.bench( - "bytes to hex", - ParameterizedBenchmark::new("", |b, x| b.iter(|| black_box(serde_json::to_string(&x))), parameters), - ); + for param in params { + let input = serde_json::from_str::(¶m).unwrap(); + group.bench_with_input(BenchmarkId::from_parameter(param.len()), &input, |b, x| { + b.iter(|| black_box(serde_json::to_string(&x))) + }); + } + group.finish(); } fn hex_to_bytes(c: &mut Criterion) { - let parameters = vec![ + let mut group = c.benchmark_group("hex_to_bytes"); + for input in [ input::HEX_64_CHARS, input::HEX_256_CHARS, input::HEX_1024_CHARS, input::HEX_4096_CHARS, input::HEX_16384_CHARS, input::HEX_65536_CHARS, - ]; - - c.bench( - "hex to bytes", - ParameterizedBenchmark::new("", |b, x| b.iter(|| black_box(serde_json::from_str::(&x))), parameters), - ); + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.len()), &input, |b, x| { + b.iter(|| black_box(serde_json::from_str::(&x))) + }); + } + group.finish(); } diff --git a/uint/benches/bigint.rs b/uint/benches/bigint.rs index a23b66760..c092dfc5f 100644 --- a/uint/benches/bigint.rs +++ b/uint/benches/bigint.rs @@ -30,7 +30,7 @@ impl U256 { } } -use criterion::{black_box, Bencher, Criterion, ParameterizedBenchmark}; +use criterion::{black_box, Bencher, BenchmarkId, Criterion}; use num_bigint::BigUint; use rug::{integer::Order, Integer}; use std::str::FromStr; @@ -99,132 +99,113 @@ fn from_gmp(x: Integer) -> U512 { } fn u128_div(c: &mut Criterion) { - c.bench( - "u128_div", - ParameterizedBenchmark::new( - "", - |b, (x, y, z)| { - b.iter(|| { - let x = black_box(u128::from(*x) << 64 + u128::from(*y)); - black_box(x / u128::from(*z)) - }) - }, - vec![(0u64, u64::max_value(), 100u64), (u64::max_value(), u64::max_value(), 99), (42, 42, 100500)], - ), - ); + let mut group = c.benchmark_group("u128_div"); + for input in [(0u64, u64::max_value(), 100u64), (u64::max_value(), u64::max_value(), 99), (42, 42, 100500)] { + group.bench_with_input(BenchmarkId::from_parameter(input.2), &input, |b, (x, y, z)| { + b.iter(|| { + let x = black_box(u128::from(*x) << 64 + u128::from(*y)); + black_box(x / u128::from(*z)) + }) + }); + } + group.finish(); } fn u256_add(c: &mut Criterion) { - c.bench( - "u256_add", - ParameterizedBenchmark::new( - "", - |b, (x, y)| { - b.iter(|| { - let x = U256::from(*x); - let y = U256::from(*y); - black_box(x.overflowing_add(y).0) - }) - }, - vec![(0u64, 1u64), (u64::max_value(), 1), (42, 100500)], - ), - ); + let mut group = c.benchmark_group("u256_add"); + for input in [(0u64, 1u64), (u64::max_value(), 1), (42, 100500)] { + group.bench_with_input(BenchmarkId::from_parameter(input.0), &input, |b, (x, y)| { + b.iter(|| { + let x = U256::from(*x); + let y = U256::from(*y); + black_box(x.overflowing_add(y).0) + }) + }); + } + group.finish(); } fn u256_sub(c: &mut Criterion) { - c.bench( - "u256_sub", - ParameterizedBenchmark::new( - "", - |b, (x, y)| { - b.iter(|| { - let y = U256::from(*y); - black_box(x.overflowing_sub(y).0) - }) - }, - vec![(U256::MAX, 1u64), (U256::from(3), 2)], - ), - ); + let mut group = c.benchmark_group("hex_to_bytes"); + for input in [(U256::MAX, 1u64), (U256::from(3), 2)] { + group.bench_with_input(BenchmarkId::from_parameter(input.0), &input, |b, (x, y)| { + b.iter(|| { + let y = U256::from(*y); + black_box(x.overflowing_sub(y).0) + }) + }); + } + group.finish(); } fn u256_mul(c: &mut Criterion) { - c.bench( - "u256_mul", - ParameterizedBenchmark::new( - "", - |b, (x, y)| { - b.iter(|| { - let y = U256::from(*y); - black_box(x.overflowing_mul(y).0) - }) - }, - vec![ - (U256::MAX, 1u64), - (U256::from(3), u64::max_value()), - (U256::from_dec_str("21674844646682989462120101885968193938394323990565507610662749").unwrap(), 173), - ], - ), - ); + let mut group = c.benchmark_group("u256_mul"); + for input in [ + (U256::MAX, 1u64), + (U256::from(3), u64::max_value()), + (U256::from_dec_str("21674844646682989462120101885968193938394323990565507610662749").unwrap(), 173), + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| { + let y = U256::from(*y); + black_box(x.overflowing_mul(y).0) + }) + }); + } + group.finish(); } fn u512_div_mod(c: &mut Criterion) { - c.bench( - "u512_div_mod", - ParameterizedBenchmark::new( - "", - |b, (x, y)| { - b.iter(|| { - let (q, r) = x.div_mod(*y); - black_box((q, r)) - }) - }, - vec![ - (U512::MAX, U512::from(1u64)), - (U512::from(u64::max_value()), U512::from(u32::max_value())), - (U512::from(u64::max_value()), U512::from(u64::max_value() - 1)), - (U512::from(u64::max_value()), U512::from(u64::max_value() - 1)), - ( - U512::from_dec_str("3759751734479964094783137206182536765532905409829204647089173492").unwrap(), - U512::from_dec_str("21674844646682989462120101885968193938394323990565507610662749").unwrap(), - ), - ( - U512::from_str( - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", - ) - .unwrap(), - U512::from_str( - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0", - ) - .unwrap(), - ), - ( - U512::from_dec_str( - "204586912993508866875824356051724947013540127877691549342705710506008362274387533983037847993622361501550043477868832682875761627559574690771211649025" - ).unwrap(), - U512::from_dec_str( - "452312848583266388373324160190187140051835877600158453279131187530910662640" - ).unwrap(), - ), - ], + let mut group = c.benchmark_group("u512_div_mod"); + for input in [ + (U512::MAX, U512::from(1u64)), + (U512::from(u64::max_value()), U512::from(u32::max_value())), + (U512::from(u64::max_value()), U512::from(u64::max_value() - 1)), + ( + U512::from_dec_str("3759751734479964094783137206182536765532905409829204647089173492").unwrap(), + U512::from_dec_str("21674844646682989462120101885968193938394323990565507610662749").unwrap(), + ), + ( + U512::from_str( + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + ) + .unwrap(), + U512::from_str( + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0", + ) + .unwrap(), ), - ); + ( + U512::from_dec_str( + "204586912993508866875824356051724947013540127877691549342705710506008362274387533983037847993622361501550043477868832682875761627559574690771211649025" + ).unwrap(), + U512::from_dec_str( + "452312848583266388373324160190187140051835877600158453279131187530910662640" + ).unwrap(), + ), + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| { + let (q, r) = x.div_mod(*y); + black_box((q, r)) + }) + }); + } + group.finish(); } fn u256_mul_full(c: &mut Criterion) { - c.bench( - "u256_mul_full", - ParameterizedBenchmark::new( - "", - |b, (x, y)| { - b.iter(|| { - let y = *y; - let U512(ref u512words) = x.full_mul(U256([y, y, y, y])); - black_box(U256([u512words[0], u512words[2], u512words[2], u512words[3]])) - }) - }, - vec![(U256::from(42), 1u64), (U256::from(3), u64::max_value())], - ), - ); + let mut group = c.benchmark_group("hex_to_bytes"); + for input in [(U256::from(42), 1u64), (U256::from(3), u64::max_value())] { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| { + let y = *y; + let U512(ref u512words) = x.full_mul(U256([y, y, y, y])); + black_box(U256([u512words[0], u512words[2], u512words[2], u512words[3]])) + }) + }); + } + group.finish(); } fn u256_div(c: &mut Criterion) { @@ -234,41 +215,37 @@ fn u256_div(c: &mut Criterion) { } fn u256_rem(c: &mut Criterion) { - c.bench( - "u256_rem", - ParameterizedBenchmark::new( - "", - |b, (x, y)| b.iter(|| black_box(x % y)), - vec![ - (U256::MAX, U256::from(1u64)), - (U256::from(u64::max_value()), U256::from(u64::from(u32::max_value()) + 1)), - ( - U256([12767554894655550452, 16333049135534778834, 140317443000293558, 598963]), - U256([2096410819092764509, 8483673822214032535, 36306297304129857, 3453]), - ), - ( - U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), - U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0").unwrap(), - ), - ], + let mut group = c.benchmark_group("u256_rem"); + for input in [ + (U256::MAX, U256::from(1u64)), + (U256::from(u64::max_value()), U256::from(u64::from(u32::max_value()) + 1)), + ( + U256([12767554894655550452, 16333049135534778834, 140317443000293558, 598963]), + U256([2096410819092764509, 8483673822214032535, 36306297304129857, 3453]), + ), + ( + U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0").unwrap(), ), - ); + ] { + group.bench_with_input(BenchmarkId::from_parameter(input.0), &input, |b, (x, y)| b.iter(|| black_box(x % y))); + } + group.finish(); } fn u256_integer_sqrt(c: &mut Criterion) { - c.bench( - "u256_integer_sqrt", - ParameterizedBenchmark::new( - "", - |b, x| b.iter(|| black_box(x.integer_sqrt().0)), - vec![ - U256::from(u64::MAX), - U256::from(u128::MAX) + 1, - U256::from(u128::MAX - 1) * U256::from(u128::MAX - 1) - 1, - U256::MAX, - ], - ), - ); + let mut group = c.benchmark_group("u256_integer_sqrt"); + for input in [ + U256::from(u64::MAX), + U256::from(u128::MAX) + 1, + U256::from(u128::MAX - 1) * U256::from(u128::MAX - 1) - 1, + U256::MAX, + ] { + group.bench_with_input(BenchmarkId::from_parameter(input), &input, |b, x| { + b.iter(|| black_box(x.integer_sqrt().0)) + }); + } + group.finish(); } fn u512_pairs() -> Vec<(U512, U512)> { @@ -287,38 +264,47 @@ fn u512_pairs() -> Vec<(U512, U512)> { } fn u512_add(c: &mut Criterion) { - c.bench("u512_add", ParameterizedBenchmark::new("", |b, (x, y)| b.iter(|| black_box(x + y)), u512_pairs())); + let mut group = c.benchmark_group("u512_add"); + for input in u512_pairs() { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| b.iter(|| black_box(x + y))); + } + group.finish(); } fn u512_sub(c: &mut Criterion) { - c.bench( - "u512_sub", - ParameterizedBenchmark::new("", |b, (x, y)| b.iter(|| black_box(x.overflowing_sub(*y).0)), u512_pairs()), - ); + let mut group = c.benchmark_group("u512_sub"); + for input in u512_pairs() { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| black_box(x.overflowing_sub(*y).0)) + }); + } + group.finish(); } fn u512_mul(c: &mut Criterion) { - c.bench( - "u512_mul", - ParameterizedBenchmark::new("", |b, (x, y)| b.iter(|| black_box(x.overflowing_mul(*y).0)), u512_pairs()), - ); + let mut group = c.benchmark_group("u512_mul"); + for input in u512_pairs() { + group.bench_with_input(BenchmarkId::from_parameter(input.1), &input, |b, (x, y)| { + b.iter(|| black_box(x.overflowing_mul(*y).0)) + }); + } + group.finish(); } fn u512_integer_sqrt(c: &mut Criterion) { - c.bench( - "u512_integer_sqrt", - ParameterizedBenchmark::new( - "", - |b, x| b.iter(|| black_box(x.integer_sqrt().0)), - vec![ - U512::from(u32::MAX) + 1, - U512::from(u64::MAX), - (U512::from(u128::MAX) + 1) * (U512::from(u128::MAX) + 1), - U256::MAX.full_mul(U256::MAX) - 1, - U512::MAX, - ], - ), - ); + let mut group = c.benchmark_group("u512_integer_sqrt"); + for input in [ + U512::from(u32::MAX) + 1, + U512::from(u64::MAX), + (U512::from(u128::MAX) + 1) * (U512::from(u128::MAX) + 1), + U256::MAX.full_mul(U256::MAX) - 1, + U512::MAX, + ] { + group.bench_with_input(BenchmarkId::from_parameter(input), &input, |b, x| { + b.iter(|| black_box(x.integer_sqrt().0)) + }); + } + group.finish(); } fn u512_div(c: &mut Criterion) { @@ -370,11 +356,12 @@ fn u512_rem(c: &mut Criterion) { } fn conversions(c: &mut Criterion) { - c.bench( - "conversions biguint vs gmp", - ParameterizedBenchmark::new("BigUint", |b, i| bench_convert_to_biguit(b, *i), vec![0, 42, u64::max_value()]) - .with_function("gmp", |b, i| bench_convert_to_gmp(b, *i)), - ); + let mut group = c.benchmark_group("conversions biguint vs gmp"); + for input in [0, 42, u64::MAX] { + group.bench_with_input(BenchmarkId::new("BigUint", input), &input, |b, i| bench_convert_to_biguit(b, *i)); + group.bench_with_input(BenchmarkId::new("GMP", input), &input, |b, i| bench_convert_to_gmp(b, *i)); + } + group.finish(); } fn bench_convert_to_biguit(b: &mut Bencher, i: u64) { @@ -396,12 +383,13 @@ fn bench_convert_to_gmp(b: &mut Bencher, i: u64) { } fn u512_mul_u32_vs_u64(c: &mut Criterion) { - let ms = vec![1u32, 42, 10_000_001, u32::max_value()]; - c.bench( - "multiply u512 by u32 vs u64", - ParameterizedBenchmark::new("u32", |b, i| bench_u512_mul_u32(b, *i), ms) - .with_function("u64", |b, i| bench_u512_mul_u64(b, u64::from(*i))), - ); + let ms = vec![1u32, 42, 10_000_001, u32::MAX]; + let mut group = c.benchmark_group("multiply u512 by u32 vs u64"); + for input in ms { + group.bench_with_input(BenchmarkId::new("u32", input), &input, |b, i| bench_u512_mul_u32(b, *i)); + group.bench_with_input(BenchmarkId::new("u64", input), &input, |b, i| bench_u512_mul_u64(b, u64::from(*i))); + } + group.finish(); } fn bench_u512_mul_u32(b: &mut Bencher, i: u32) { @@ -421,12 +409,13 @@ fn mulmod_u512_vs_biguint_vs_gmp(c: &mut Criterion) { U256::from(u64::max_value()), U256::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1").unwrap(), ]; - c.bench( - "mulmod u512 vs biguint vs gmp", - ParameterizedBenchmark::new("u512", |b, i| bench_u512_mulmod(b, *i), mods) - .with_function("BigUint", |b, i| bench_biguint_mulmod(b, *i)) - .with_function("gmp", |b, i| bench_gmp_mulmod(b, *i)), - ); + let mut group = c.benchmark_group("mulmod u512 vs biguint vs gmp"); + for input in mods { + group.bench_with_input(BenchmarkId::new("u512", input), &input, |b, i| bench_u512_mulmod(b, *i)); + group.bench_with_input(BenchmarkId::new("BigUint", input), &input, |b, i| bench_biguint_mulmod(b, *i)); + group.bench_with_input(BenchmarkId::new("GMP", input), &input, |b, i| bench_gmp_mulmod(b, *i)); + } + group.finish(); } fn bench_biguint_mulmod(b: &mut Bencher, z: U256) { From 8941d3f12a8d7f75772062012a9daa60a9baa5e4 Mon Sep 17 00:00:00 2001 From: Andronik Date: Sun, 14 Aug 2022 22:33:06 +0200 Subject: [PATCH 271/359] rocksdb: remove and simplify a bunch of stuff (#662) * rocksdb: remove and simplify a bunch of stuff * update changelogs * Update kvdb/CHANGELOG.md Co-authored-by: cheme Co-authored-by: cheme --- kvdb-memorydb/CHANGELOG.md | 2 + kvdb-memorydb/src/lib.rs | 4 - kvdb-rocksdb/CHANGELOG.md | 4 + kvdb-rocksdb/Cargo.toml | 2 - kvdb-rocksdb/src/iter.rs | 80 ----------- kvdb-rocksdb/src/lib.rs | 281 +++++++++++++------------------------ kvdb/CHANGELOG.md | 2 + kvdb/src/lib.rs | 3 - 8 files changed, 102 insertions(+), 276 deletions(-) diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index e6dc4f85c..c9b3e5c86 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated `kvdb` to 0.12. [662](https://github.com/paritytech/parity-common/pull/662) ## [0.11.0] - 2022-02-04 ### Breaking diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 265aa3312..509835f83 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -117,10 +117,6 @@ impl KeyValueDB for InMemory { None => Box::new(None.into_iter()), } } - - fn restore(&self, _new_db: &str) -> io::Result<()> { - Err(io::Error::new(io::ErrorKind::Other, "Attempted to restore in-memory database")) - } } #[cfg(test)] diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 08ae66937..1143ee90a 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,6 +5,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +- Removed `owning_ref` from dependencies :tada:. [662](https://github.com/paritytech/parity-common/pull/662) +### Breaking +- Update `kvdb` to 0.12. [662](https://github.com/paritytech/parity-common/pull/662) + - `add_column` and `remove_last_column` now require `&mut self` ## [0.15.2] - 2022-03-20 - Disable `jemalloc` feature for `rocksdb` where it is not working. [#633](https://github.com/paritytech/parity-common/pull/633) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 0bbd1c935..4741483e7 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -14,13 +14,11 @@ harness = false [dependencies] smallvec = "1.0.0" -fs-swap = "0.2.6" kvdb = { path = "../kvdb", version = "0.11" } log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.12.0" regex = "1.3.1" -owning_ref = "0.4.0" parity-util-mem = { path = "../parity-util-mem", version = "0.11", default-features = false, features = ["std", "smallvec"] } # OpenBSD and MSVC are unteested and shouldn't enable jemalloc: diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs index 818099ca4..ca5ef7e59 100644 --- a/kvdb-rocksdb/src/iter.rs +++ b/kvdb-rocksdb/src/iter.rs @@ -16,59 +16,11 @@ //! See https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes for details. use crate::DBAndColumns; -use owning_ref::{OwningHandle, StableAddress}; -use parking_lot::RwLockReadGuard; use rocksdb::{DBIterator, Direction, IteratorMode, ReadOptions}; -use std::ops::{Deref, DerefMut}; /// A tuple holding key and value data, used as the iterator item type. pub type KeyValuePair = (Box<[u8]>, Box<[u8]>); -/// Iterator with built-in synchronization. -pub struct ReadGuardedIterator<'a, I, T> { - inner: OwningHandle>, DerefWrapper>>, -} - -// We can't implement `StableAddress` for a `RwLockReadGuard` -// directly due to orphan rules. -#[repr(transparent)] -struct UnsafeStableAddress<'a, T>(RwLockReadGuard<'a, T>); - -impl<'a, T> Deref for UnsafeStableAddress<'a, T> { - type Target = T; - - fn deref(&self) -> &Self::Target { - self.0.deref() - } -} - -// RwLockReadGuard dereferences to a stable address; qed -unsafe impl<'a, T> StableAddress for UnsafeStableAddress<'a, T> {} - -struct DerefWrapper(T); - -impl Deref for DerefWrapper { - type Target = T; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for DerefWrapper { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl<'a, I: Iterator, T> Iterator for ReadGuardedIterator<'a, I, T> { - type Item = I::Item; - - fn next(&mut self) -> Option { - self.inner.deref_mut().as_mut().and_then(|iter| iter.next()) - } -} - /// Instantiate iterators yielding `KeyValuePair`s. pub trait IterationHandler { type Iterator: Iterator; @@ -84,38 +36,6 @@ pub trait IterationHandler { fn iter_with_prefix(&self, col: u32, prefix: &[u8], read_opts: ReadOptions) -> Self::Iterator; } -impl<'a, T> ReadGuardedIterator<'a, <&'a T as IterationHandler>::Iterator, T> -where - &'a T: IterationHandler, -{ - /// Creates a new `ReadGuardedIterator` that maps `RwLock` to `RwLock`, - /// where `DBIterator` iterates over all keys. - pub fn new(read_lock: RwLockReadGuard<'a, Option>, col: u32, read_opts: ReadOptions) -> Self { - Self { inner: Self::new_inner(read_lock, |db| db.iter(col, read_opts)) } - } - - /// Creates a new `ReadGuardedIterator` that maps `RwLock` to `RwLock`, - /// where `DBIterator` iterates over keys which start with the given prefix. - pub fn new_with_prefix( - read_lock: RwLockReadGuard<'a, Option>, - col: u32, - prefix: &[u8], - read_opts: ReadOptions, - ) -> Self { - Self { inner: Self::new_inner(read_lock, |db| db.iter_with_prefix(col, prefix, read_opts)) } - } - - fn new_inner( - rlock: RwLockReadGuard<'a, Option>, - f: impl FnOnce(&'a T) -> <&'a T as IterationHandler>::Iterator, - ) -> OwningHandle>, DerefWrapper::Iterator>>> { - OwningHandle::new_with_fn(UnsafeStableAddress(rlock), move |rlock| { - let rlock = unsafe { rlock.as_ref().expect("initialized as non-null; qed") }; - DerefWrapper(rlock.as_ref().map(f)) - }) - } -} - impl<'a> IterationHandler for &'a DBAndColumns { type Iterator = DBIterator<'a>; diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index a8029c566..de18b7805 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -12,22 +12,19 @@ mod stats; use std::{ cmp, collections::HashMap, - convert::identity, - error, fs, io, mem, + error, fs, io, path::{Path, PathBuf}, result, }; use parity_util_mem::MallocSizeOf; -use parking_lot::RwLock; use rocksdb::{ BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Error, Options, ReadOptions, WriteBatch, WriteOptions, DB, }; use crate::iter::KeyValuePair; -use fs_swap::{swap, swap_nonatomic}; use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; -use log::{debug, warn}; +use log::warn; #[cfg(target_os = "linux")] use regex::Regex; @@ -293,7 +290,7 @@ impl DBAndColumns { /// Key-Value database. #[derive(MallocSizeOf)] pub struct Database { - db: RwLock>, + inner: DBAndColumns, #[ignore_malloc_size_of = "insignificant"] config: DatabaseConfig, #[ignore_malloc_size_of = "insignificant"] @@ -419,7 +416,7 @@ impl Database { }; Ok(Database { - db: RwLock::new(Some(DBAndColumns { db, column_names })), + inner: DBAndColumns { db, column_names }, config: config.clone(), path: path.as_ref().to_owned(), opts, @@ -506,80 +503,72 @@ impl Database { /// Commit transaction to database. pub fn write(&self, tr: DBTransaction) -> io::Result<()> { - match *self.db.read() { - Some(ref cfs) => { - let mut batch = WriteBatch::default(); - let ops = tr.ops; - - self.stats.tally_writes(ops.len() as u64); - self.stats.tally_transactions(1); - - let mut stats_total_bytes = 0; - - for op in ops { - let cf = cfs.cf(op.col() as usize); - - match op { - DBOp::Insert { col: _, key, value } => { - stats_total_bytes += key.len() + value.len(); - batch.put_cf(cf, &key, &value); - }, - DBOp::Delete { col: _, key } => { - // We count deletes as writes. - stats_total_bytes += key.len(); - batch.delete_cf(cf, &key); - }, - DBOp::DeletePrefix { col, prefix } => { - let end_prefix = kvdb::end_prefix(&prefix[..]); - let no_end = end_prefix.is_none(); - let end_range = end_prefix.unwrap_or_else(|| vec![u8::max_value(); 16]); - batch.delete_range_cf(cf, &prefix[..], &end_range[..]); - if no_end { - use crate::iter::IterationHandler as _; - - let prefix = if prefix.len() > end_range.len() { &prefix[..] } else { &end_range[..] }; - // We call `iter_with_prefix` directly on `cfs` to avoid taking a lock twice - // See https://github.com/paritytech/parity-common/pull/396. - let read_opts = generate_read_options(); - for (key, _) in cfs.iter_with_prefix(col, prefix, read_opts) { - batch.delete_cf(cf, &key[..]); - } - } - }, - }; - } - self.stats.tally_bytes_written(stats_total_bytes as u64); - - check_for_corruption(&self.path, cfs.db.write_opt(batch, &self.write_opts)) - }, - None => Err(other_io_err("Database is closed")), + let cfs = &self.inner; + let mut batch = WriteBatch::default(); + let ops = tr.ops; + + self.stats.tally_writes(ops.len() as u64); + self.stats.tally_transactions(1); + + let mut stats_total_bytes = 0; + + for op in ops { + let cf = cfs.cf(op.col() as usize); + + match op { + DBOp::Insert { col: _, key, value } => { + stats_total_bytes += key.len() + value.len(); + batch.put_cf(cf, &key, &value); + }, + DBOp::Delete { col: _, key } => { + // We count deletes as writes. + stats_total_bytes += key.len(); + batch.delete_cf(cf, &key); + }, + DBOp::DeletePrefix { col, prefix } => { + let end_prefix = kvdb::end_prefix(&prefix[..]); + let no_end = end_prefix.is_none(); + let end_range = end_prefix.unwrap_or_else(|| vec![u8::max_value(); 16]); + batch.delete_range_cf(cf, &prefix[..], &end_range[..]); + if no_end { + use crate::iter::IterationHandler as _; + + let prefix = if prefix.len() > end_range.len() { &prefix[..] } else { &end_range[..] }; + // We call `iter_with_prefix` directly on `cfs` to avoid taking a lock twice + // See https://github.com/paritytech/parity-common/pull/396. + let read_opts = generate_read_options(); + for (key, _) in cfs.iter_with_prefix(col, prefix, read_opts) { + batch.delete_cf(cf, &key[..]); + } + } + }, + }; } + self.stats.tally_bytes_written(stats_total_bytes as u64); + + check_for_corruption(&self.path, cfs.db.write_opt(batch, &self.write_opts)) } /// Get value by key. pub fn get(&self, col: u32, key: &[u8]) -> io::Result> { - match *self.db.read() { - Some(ref cfs) => { - if cfs.column_names.get(col as usize).is_none() { - return Err(other_io_err("column index is out of bounds")) - } - self.stats.tally_reads(1); - let value = cfs - .db - .get_pinned_cf_opt(cfs.cf(col as usize), key, &self.read_opts) - .map(|r| r.map(|v| v.to_vec())) - .map_err(other_io_err); - - match value { - Ok(Some(ref v)) => self.stats.tally_bytes_read((key.len() + v.len()) as u64), - Ok(None) => self.stats.tally_bytes_read(key.len() as u64), - _ => {}, - }; - - value - }, - None => Ok(None), + let cfs = &self.inner; + if cfs.column_names.get(col as usize).is_none() { + return Err(other_io_err("column index is out of bounds")) } + self.stats.tally_reads(1); + let value = cfs + .db + .get_pinned_cf_opt(cfs.cf(col as usize), key, &self.read_opts) + .map(|r| r.map(|v| v.to_vec())) + .map_err(other_io_err); + + match value { + Ok(Some(ref v)) => self.stats.tally_bytes_read((key.len() + v.len()) as u64), + Ok(None) => self.stats.tally_bytes_read(key.len() as u64), + _ => {}, + }; + + value } /// Get value by partial key. Prefix size should match configured prefix size. @@ -591,126 +580,56 @@ impl Database { /// Will hold a lock until the iterator is dropped /// preventing the database from being closed. pub fn iter<'a>(&'a self, col: u32) -> impl Iterator + 'a { - let read_lock = self.db.read(); - let optional = if read_lock.is_some() { - let read_opts = generate_read_options(); - let guarded = iter::ReadGuardedIterator::new(read_lock, col, read_opts); - Some(guarded) - } else { - None - }; - optional.into_iter().flat_map(identity) + let read_opts = generate_read_options(); + iter::IterationHandler::iter(&&self.inner, col, read_opts) } /// Iterator over data in the `col` database column index matching the given prefix. /// Will hold a lock until the iterator is dropped /// preventing the database from being closed. fn iter_with_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> impl Iterator + 'a { - let read_lock = self.db.read(); - let optional = if read_lock.is_some() { - let mut read_opts = generate_read_options(); - // rocksdb doesn't work with an empty upper bound - if let Some(end_prefix) = kvdb::end_prefix(prefix) { - read_opts.set_iterate_upper_bound(end_prefix); - } - let guarded = iter::ReadGuardedIterator::new_with_prefix(read_lock, col, prefix, read_opts); - Some(guarded) - } else { - None - }; - optional.into_iter().flat_map(identity) - } - - /// Close the database - fn close(&self) { - *self.db.write() = None; - } - - /// Restore the database from a copy at given path. - pub fn restore>(&self, new_db: P) -> io::Result<()> { - self.close(); - - // swap is guaranteed to be atomic - match swap(new_db.as_ref(), &self.path) { - Ok(_) => { - // ignore errors - let _ = fs::remove_dir_all(new_db.as_ref()); - }, - Err(err) => { - debug!("DB atomic swap failed: {}", err); - match swap_nonatomic(new_db.as_ref(), &self.path) { - Ok(_) => { - // ignore errors - let _ = fs::remove_dir_all(new_db); - }, - Err(err) => { - warn!("Failed to swap DB directories: {:?}", err); - return Err(io::Error::new( - io::ErrorKind::Other, - "DB restoration failed: could not swap DB directories", - )) - }, - } - }, + let mut read_opts = generate_read_options(); + // rocksdb doesn't work with an empty upper bound + if let Some(end_prefix) = kvdb::end_prefix(prefix) { + read_opts.set_iterate_upper_bound(end_prefix); } - - // reopen the database and steal handles into self - let db = Self::open(&self.config, &self.path)?; - *self.db.write() = mem::replace(&mut *db.db.write(), None); - Ok(()) + iter::IterationHandler::iter_with_prefix(&&self.inner, col, prefix, read_opts) } /// The number of column families in the db. pub fn num_columns(&self) -> u32 { - self.db - .read() - .as_ref() - .and_then(|db| if db.column_names.is_empty() { None } else { Some(db.column_names.len()) }) - .map(|n| n as u32) - .unwrap_or(0) + self.inner.column_names.len() as u32 } /// The number of keys in a column (estimated). pub fn num_keys(&self, col: u32) -> io::Result { const ESTIMATE_NUM_KEYS: &str = "rocksdb.estimate-num-keys"; - match *self.db.read() { - Some(ref cfs) => { - let cf = cfs.cf(col as usize); - match cfs.db.property_int_value_cf(cf, ESTIMATE_NUM_KEYS) { - Ok(estimate) => Ok(estimate.unwrap_or_default()), - Err(err_string) => Err(other_io_err(err_string)), - } - }, - None => Ok(0), + let cfs = &self.inner; + let cf = cfs.cf(col as usize); + match cfs.db.property_int_value_cf(cf, ESTIMATE_NUM_KEYS) { + Ok(estimate) => Ok(estimate.unwrap_or_default()), + Err(err_string) => Err(other_io_err(err_string)), } } /// Remove the last column family in the database. The deletion is definitive. - pub fn remove_last_column(&self) -> io::Result<()> { - match *self.db.write() { - Some(DBAndColumns { ref mut db, ref mut column_names }) => { - if let Some(name) = column_names.pop() { - db.drop_cf(&name).map_err(other_io_err)?; - } - Ok(()) - }, - None => Ok(()), + pub fn remove_last_column(&mut self) -> io::Result<()> { + let DBAndColumns { ref mut db, ref mut column_names } = self.inner; + if let Some(name) = column_names.pop() { + db.drop_cf(&name).map_err(other_io_err)?; } + Ok(()) } /// Add a new column family to the DB. - pub fn add_column(&self) -> io::Result<()> { - match *self.db.write() { - Some(DBAndColumns { ref mut db, ref mut column_names }) => { - let col = column_names.len() as u32; - let name = format!("col{}", col); - let col_config = self.config.column_config(&self.block_opts, col as u32); - let _ = db.create_cf(&name, &col_config).map_err(other_io_err)?; - column_names.push(name); - Ok(()) - }, - None => Ok(()), - } + pub fn add_column(&mut self) -> io::Result<()> { + let DBAndColumns { ref mut db, ref mut column_names } = self.inner; + let col = column_names.len() as u32; + let name = format!("col{}", col); + let col_config = self.config.column_config(&self.block_opts, col as u32); + let _ = db.create_cf(&name, &col_config).map_err(other_io_err)?; + column_names.push(name); + Ok(()) } /// Get RocksDB statistics. @@ -743,10 +662,7 @@ impl Database { /// /// Calling this as primary will return an error. pub fn try_catch_up_with_primary(&self) -> io::Result<()> { - match self.db.read().as_ref() { - Some(DBAndColumns { db, .. }) => db.try_catch_up_with_primary().map_err(other_io_err), - None => Ok(()), - } + self.inner.db.try_catch_up_with_primary().map_err(other_io_err) } } @@ -775,10 +691,6 @@ impl KeyValueDB for Database { Box::new(unboxed.into_iter()) } - fn restore(&self, new_db: &str) -> io::Result<()> { - Database::restore(self, new_db) - } - fn io_stats(&self, kind: kvdb::IoStatsKind) -> kvdb::IoStats { let rocksdb_stats = self.get_statistics(); let cache_hit_count = rocksdb_stats.get("block.cache.hit").map(|s| s.count).unwrap_or(0u64); @@ -930,12 +842,7 @@ mod tests { } db.write(batch).unwrap(); - { - let db = db.db.read(); - db.as_ref().map(|db| { - assert!(db.static_property_or_warn(0, "rocksdb.cur-size-all-mem-tables") > 512); - }); - } + assert!(db.inner.static_property_or_warn(0, "rocksdb.cur-size-all-mem-tables") > 512); } #[test] @@ -976,7 +883,7 @@ mod tests { // open 1, add 4. { - let db = Database::open(&config_1, tempdir.path().to_str().unwrap()).unwrap(); + let mut db = Database::open(&config_1, tempdir.path().to_str().unwrap()).unwrap(); assert_eq!(db.num_columns(), 1); for i in 2..=5 { @@ -1001,7 +908,7 @@ mod tests { // open 5, remove 4. { - let db = Database::open(&config_5, tempdir.path()).expect("open with 5 columns"); + let mut db = Database::open(&config_5, tempdir.path()).expect("open with 5 columns"); assert_eq!(db.num_columns(), 5); for i in (1..5).rev() { diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 5c5093b14..3dd994f4d 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Removed `fn restore` from `KeyValueDB` trait. [662](https://github.com/paritytech/parity-common/pull/662) ## [0.11.0] - 2022-02-04 ### Breaking diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 7693439b9..437113082 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -128,9 +128,6 @@ pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { prefix: &'a [u8], ) -> Box, Box<[u8]>)> + 'a>; - /// Attempt to replace this database with a new one located at the given path. - fn restore(&self, new_db: &str) -> io::Result<()>; - /// Query statistics. /// /// Not all kvdb implementations are able or expected to implement this, so by From 26d712dbc401f6065250baca41663c0c8298e60a Mon Sep 17 00:00:00 2001 From: Andronik Date: Mon, 15 Aug 2022 17:40:01 +0200 Subject: [PATCH 272/359] Streamline kvdb API and upgrade rocksdb to 0.19 (#661) * upgrade kvdb to 0.19 and tikv-jemallocator to 0.5 * EndOnErrorIterator * fmt * add a warning * exhaustive match * kvdb: more consistent API * kvdb: expose KeyValuePair and fmt * kvdb: changelog amends * kvdb: change DBKeyValue * kvdb-rocksdb: return an error on column OOB * no warn in size_of as before * small cleanup --- kvdb-memorydb/src/lib.rs | 31 +++++------ kvdb-rocksdb/Cargo.toml | 4 +- kvdb-rocksdb/benches/bench_read_perf.rs | 2 +- kvdb-rocksdb/src/iter.rs | 72 +++++++++++++++++++----- kvdb-rocksdb/src/lib.rs | 74 +++++++++++++++---------- kvdb-shared-tests/src/lib.rs | 24 ++++---- kvdb/CHANGELOG.md | 4 ++ kvdb/src/lib.rs | 14 +++-- parity-util-mem/Cargo.toml | 4 +- 9 files changed, 146 insertions(+), 83 deletions(-) diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index 509835f83..a6de728e2 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -6,7 +6,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; +use kvdb::{DBKeyValue, DBOp, DBTransaction, DBValue, KeyValueDB}; use parity_util_mem::MallocSizeOf; use parking_lot::RwLock; use std::{ @@ -33,23 +33,24 @@ pub fn create(num_cols: u32) -> InMemory { InMemory { columns: RwLock::new(cols) } } +fn invalid_column(col: u32) -> io::Error { + io::Error::new(io::ErrorKind::Other, format!("No such column family: {:?}", col)) +} + impl KeyValueDB for InMemory { fn get(&self, col: u32, key: &[u8]) -> io::Result> { let columns = self.columns.read(); match columns.get(&col) { - None => Err(io::Error::new(io::ErrorKind::Other, format!("No such column family: {:?}", col))), + None => Err(invalid_column(col)), Some(map) => Ok(map.get(key).cloned()), } } - fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { + fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> io::Result> { let columns = self.columns.read(); match columns.get(&col) { - None => None, - Some(map) => map - .iter() - .find(|&(ref k, _)| k.starts_with(prefix)) - .map(|(_, v)| v.to_vec().into_boxed_slice()), + None => Err(invalid_column(col)), + Some(map) => Ok(map.iter().find(|&(ref k, _)| k.starts_with(prefix)).map(|(_, v)| v.to_vec())), } } @@ -90,15 +91,13 @@ impl KeyValueDB for InMemory { Ok(()) } - fn iter<'a>(&'a self, col: u32) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, col: u32) -> Box> + 'a> { match self.columns.read().get(&col) { Some(map) => Box::new( // TODO: worth optimizing at all? - map.clone() - .into_iter() - .map(|(k, v)| (k.into_boxed_slice(), v.into_boxed_slice())), + map.clone().into_iter().map(|(k, v)| Ok((k.into(), v))), ), - None => Box::new(None.into_iter()), + None => Box::new(std::iter::once(Err(invalid_column(col)))), } } @@ -106,15 +105,15 @@ impl KeyValueDB for InMemory { &'a self, col: u32, prefix: &'a [u8], - ) -> Box, Box<[u8]>)> + 'a> { + ) -> Box> + 'a> { match self.columns.read().get(&col) { Some(map) => Box::new( map.clone() .into_iter() .filter(move |&(ref k, _)| k.starts_with(prefix)) - .map(|(k, v)| (k.into_boxed_slice(), v.into_boxed_slice())), + .map(|(k, v)| Ok((k.into(), v))), ), - None => Box::new(None.into_iter()), + None => Box::new(std::iter::once(Err(invalid_column(col)))), } } } diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 4741483e7..0b46fc215 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -26,12 +26,12 @@ parity-util-mem = { path = "../parity-util-mem", version = "0.11", default-featu [target.'cfg(any(target_os = "openbsd", target_env = "msvc"))'.dependencies.rocksdb] default-features = false features = ["snappy"] -version = "0.18.0" +version = "0.19.0" [target.'cfg(not(any(target_os = "openbsd", target_env = "msvc")))'.dependencies.rocksdb] default-features = false features = ["snappy", "jemalloc"] -version = "0.18.0" +version = "0.19.0" [dev-dependencies] alloc_counter = "0.0.4" diff --git a/kvdb-rocksdb/benches/bench_read_perf.rs b/kvdb-rocksdb/benches/bench_read_perf.rs index 78e8b773f..3b87e8bb1 100644 --- a/kvdb-rocksdb/benches/bench_read_perf.rs +++ b/kvdb-rocksdb/benches/bench_read_perf.rs @@ -187,7 +187,7 @@ fn iter(c: &mut Criterion) { let (alloc_stats, _) = count_alloc(|| { let start = Instant::now(); for _ in 0..iterations { - black_box(db.iter(0).next().unwrap()); + black_box(db.iter(0).next().unwrap().unwrap()); } elapsed = start.elapsed(); }); diff --git a/kvdb-rocksdb/src/iter.rs b/kvdb-rocksdb/src/iter.rs index ca5ef7e59..08ed32022 100644 --- a/kvdb-rocksdb/src/iter.rs +++ b/kvdb-rocksdb/src/iter.rs @@ -15,36 +15,80 @@ //! To work around this we set an upper bound to the prefix successor. //! See https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes for details. -use crate::DBAndColumns; +use crate::{other_io_err, DBAndColumns, DBKeyValue}; use rocksdb::{DBIterator, Direction, IteratorMode, ReadOptions}; +use std::io; -/// A tuple holding key and value data, used as the iterator item type. -pub type KeyValuePair = (Box<[u8]>, Box<[u8]>); - -/// Instantiate iterators yielding `KeyValuePair`s. +/// Instantiate iterators yielding `io::Result`s. pub trait IterationHandler { - type Iterator: Iterator; + type Iterator: Iterator>; /// Create an `Iterator` over a `ColumnFamily` corresponding to the passed index. Takes /// `ReadOptions` to allow configuration of the new iterator (see /// https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h#L1169). - fn iter(&self, col: u32, read_opts: ReadOptions) -> Self::Iterator; + fn iter(self, col: u32, read_opts: ReadOptions) -> Self::Iterator; /// Create an `Iterator` over a `ColumnFamily` corresponding to the passed index. Takes /// `ReadOptions` to allow configuration of the new iterator (see /// https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h#L1169). /// The `Iterator` iterates over keys which start with the provided `prefix`. - fn iter_with_prefix(&self, col: u32, prefix: &[u8], read_opts: ReadOptions) -> Self::Iterator; + fn iter_with_prefix(self, col: u32, prefix: &[u8], read_opts: ReadOptions) -> Self::Iterator; } impl<'a> IterationHandler for &'a DBAndColumns { - type Iterator = DBIterator<'a>; + type Iterator = EitherIter>, std::iter::Once>>; + + fn iter(self, col: u32, read_opts: ReadOptions) -> Self::Iterator { + match self.cf(col as usize) { + Ok(cf) => EitherIter::A(KvdbAdapter(self.db.iterator_cf_opt(cf, read_opts, IteratorMode::Start))), + Err(e) => EitherIter::B(std::iter::once(Err(e))), + } + } + + fn iter_with_prefix(self, col: u32, prefix: &[u8], read_opts: ReadOptions) -> Self::Iterator { + match self.cf(col as usize) { + Ok(cf) => EitherIter::A(KvdbAdapter(self.db.iterator_cf_opt( + cf, + read_opts, + IteratorMode::From(prefix, Direction::Forward), + ))), + Err(e) => EitherIter::B(std::iter::once(Err(e))), + } + } +} - fn iter(&self, col: u32, read_opts: ReadOptions) -> Self::Iterator { - self.db.iterator_cf_opt(self.cf(col as usize), read_opts, IteratorMode::Start) +/// Small enum to avoid boxing iterators. +pub enum EitherIter { + A(A), + B(B), +} + +impl Iterator for EitherIter +where + A: Iterator, + B: Iterator, +{ + type Item = I; + + fn next(&mut self) -> Option { + match self { + Self::A(a) => a.next(), + Self::B(b) => b.next(), + } } +} + +/// A simple wrapper that adheres to the `kvdb` interface. +pub struct KvdbAdapter(T); + +impl Iterator for KvdbAdapter +where + T: Iterator, Box<[u8]>), rocksdb::Error>>, +{ + type Item = io::Result; - fn iter_with_prefix(&self, col: u32, prefix: &[u8], read_opts: ReadOptions) -> Self::Iterator { - self.db - .iterator_cf_opt(self.cf(col as usize), read_opts, IteratorMode::From(prefix, Direction::Forward)) + fn next(&mut self) -> Option { + self.0 + .next() + .map(|r| r.map_err(other_io_err).map(|(k, v)| (k.into_vec().into(), v.into()))) } } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index de18b7805..16de2a192 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -22,8 +22,7 @@ use rocksdb::{ BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Error, Options, ReadOptions, WriteBatch, WriteOptions, DB, }; -use crate::iter::KeyValuePair; -use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; +use kvdb::{DBKeyValue, DBOp, DBTransaction, DBValue, KeyValueDB}; use log::warn; #[cfg(target_os = "linux")] @@ -40,6 +39,10 @@ where io::Error::new(io::ErrorKind::Other, e) } +fn invalid_column(col: u32) -> io::Error { + other_io_err(format!("No such column family: {:?}", col)) +} + // Used for memory budget. type MiB = usize; @@ -254,11 +257,12 @@ impl MallocSizeOf for DBAndColumns { fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { let mut total = self.column_names.size_of(ops) // we have at least one column always, so we can call property on it - + self.db - .property_int_value_cf(self.cf(0), "rocksdb.block-cache-usage") + + self.cf(0).map(|cf| self.db + .property_int_value_cf(cf, "rocksdb.block-cache-usage") .unwrap_or(Some(0)) .map(|x| x as usize) - .unwrap_or(0); + .unwrap_or(0) + ).unwrap_or(0); for v in 0..self.column_names.len() { total += self.static_property_or_warn(v, "rocksdb.estimate-table-readers-mem"); @@ -270,14 +274,22 @@ impl MallocSizeOf for DBAndColumns { } impl DBAndColumns { - fn cf(&self, i: usize) -> &ColumnFamily { + fn cf(&self, i: usize) -> io::Result<&ColumnFamily> { + let name = self.column_names.get(i).ok_or_else(|| invalid_column(i as u32))?; self.db - .cf_handle(&self.column_names[i]) - .expect("the specified column name is correct; qed") + .cf_handle(&name) + .ok_or_else(|| other_io_err(format!("invalid column name: {name}"))) } fn static_property_or_warn(&self, col: usize, prop: &str) -> usize { - match self.db.property_int_value_cf(self.cf(col), prop) { + let cf = match self.cf(col) { + Ok(cf) => cf, + Err(_) => { + warn!("RocksDB column index out of range: {}", col); + return 0 + }, + }; + match self.db.property_int_value_cf(cf, prop) { Ok(Some(v)) => v as usize, _ => { warn!("Cannot read expected static property of RocksDb database: {}", prop); @@ -513,7 +525,8 @@ impl Database { let mut stats_total_bytes = 0; for op in ops { - let cf = cfs.cf(op.col() as usize); + let col = op.col(); + let cf = cfs.cf(col as usize)?; match op { DBOp::Insert { col: _, key, value } => { @@ -531,13 +544,9 @@ impl Database { let end_range = end_prefix.unwrap_or_else(|| vec![u8::max_value(); 16]); batch.delete_range_cf(cf, &prefix[..], &end_range[..]); if no_end { - use crate::iter::IterationHandler as _; - let prefix = if prefix.len() > end_range.len() { &prefix[..] } else { &end_range[..] }; - // We call `iter_with_prefix` directly on `cfs` to avoid taking a lock twice - // See https://github.com/paritytech/parity-common/pull/396. - let read_opts = generate_read_options(); - for (key, _) in cfs.iter_with_prefix(col, prefix, read_opts) { + for result in self.iter_with_prefix(col, prefix) { + let (key, _) = result?; batch.delete_cf(cf, &key[..]); } } @@ -552,13 +561,11 @@ impl Database { /// Get value by key. pub fn get(&self, col: u32, key: &[u8]) -> io::Result> { let cfs = &self.inner; - if cfs.column_names.get(col as usize).is_none() { - return Err(other_io_err("column index is out of bounds")) - } + let cf = cfs.cf(col as usize)?; self.stats.tally_reads(1); let value = cfs .db - .get_pinned_cf_opt(cfs.cf(col as usize), key, &self.read_opts) + .get_pinned_cf_opt(cf, key, &self.read_opts) .map(|r| r.map(|v| v.to_vec())) .map_err(other_io_err); @@ -572,28 +579,31 @@ impl Database { } /// Get value by partial key. Prefix size should match configured prefix size. - pub fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { - self.iter_with_prefix(col, prefix).next().map(|(_, v)| v) + pub fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> io::Result> { + self.iter_with_prefix(col, prefix) + .next() + .transpose() + .map(|m| m.map(|(_k, v)| v)) } /// Iterator over the data in the given database column index. /// Will hold a lock until the iterator is dropped /// preventing the database from being closed. - pub fn iter<'a>(&'a self, col: u32) -> impl Iterator + 'a { + pub fn iter<'a>(&'a self, col: u32) -> impl Iterator> + 'a { let read_opts = generate_read_options(); - iter::IterationHandler::iter(&&self.inner, col, read_opts) + iter::IterationHandler::iter(&self.inner, col, read_opts) } /// Iterator over data in the `col` database column index matching the given prefix. /// Will hold a lock until the iterator is dropped /// preventing the database from being closed. - fn iter_with_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> impl Iterator + 'a { + fn iter_with_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> impl Iterator> + 'a { let mut read_opts = generate_read_options(); // rocksdb doesn't work with an empty upper bound if let Some(end_prefix) = kvdb::end_prefix(prefix) { read_opts.set_iterate_upper_bound(end_prefix); } - iter::IterationHandler::iter_with_prefix(&&self.inner, col, prefix, read_opts) + iter::IterationHandler::iter_with_prefix(&self.inner, col, prefix, read_opts) } /// The number of column families in the db. @@ -605,7 +615,7 @@ impl Database { pub fn num_keys(&self, col: u32) -> io::Result { const ESTIMATE_NUM_KEYS: &str = "rocksdb.estimate-num-keys"; let cfs = &self.inner; - let cf = cfs.cf(col as usize); + let cf = cfs.cf(col as usize)?; match cfs.db.property_int_value_cf(cf, ESTIMATE_NUM_KEYS) { Ok(estimate) => Ok(estimate.unwrap_or_default()), Err(err_string) => Err(other_io_err(err_string)), @@ -673,7 +683,7 @@ impl KeyValueDB for Database { Database::get(self, col, key) } - fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { + fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> io::Result> { Database::get_by_prefix(self, col, prefix) } @@ -681,12 +691,16 @@ impl KeyValueDB for Database { Database::write(self, transaction) } - fn iter<'a>(&'a self, col: u32) -> Box + 'a> { + fn iter<'a>(&'a self, col: u32) -> Box> + 'a> { let unboxed = Database::iter(self, col); Box::new(unboxed.into_iter()) } - fn iter_with_prefix<'a>(&'a self, col: u32, prefix: &'a [u8]) -> Box + 'a> { + fn iter_with_prefix<'a>( + &'a self, + col: u32, + prefix: &'a [u8], + ) -> Box> + 'a> { let unboxed = Database::iter_with_prefix(self, col, prefix); Box::new(unboxed.into_iter()) } diff --git a/kvdb-shared-tests/src/lib.rs b/kvdb-shared-tests/src/lib.rs index f3352faa0..391e7821c 100644 --- a/kvdb-shared-tests/src/lib.rs +++ b/kvdb-shared-tests/src/lib.rs @@ -71,7 +71,7 @@ pub fn test_iter(db: &dyn KeyValueDB) -> io::Result<()> { transaction.put(0, key2, key2); db.write(transaction)?; - let contents: Vec<_> = db.iter(0).into_iter().collect(); + let contents: Vec<_> = db.iter(0).into_iter().map(Result::unwrap).collect(); assert_eq!(contents.len(), 2); assert_eq!(&*contents[0].0, key1); assert_eq!(&*contents[0].1, key1); @@ -95,7 +95,7 @@ pub fn test_iter_with_prefix(db: &dyn KeyValueDB) -> io::Result<()> { db.write(batch)?; // empty prefix - let contents: Vec<_> = db.iter_with_prefix(0, b"").into_iter().collect(); + let contents: Vec<_> = db.iter_with_prefix(0, b"").into_iter().map(Result::unwrap).collect(); assert_eq!(contents.len(), 4); assert_eq!(&*contents[0].0, key1); assert_eq!(&*contents[1].0, key2); @@ -103,24 +103,24 @@ pub fn test_iter_with_prefix(db: &dyn KeyValueDB) -> io::Result<()> { assert_eq!(&*contents[3].0, key4); // prefix a - let contents: Vec<_> = db.iter_with_prefix(0, b"a").into_iter().collect(); + let contents: Vec<_> = db.iter_with_prefix(0, b"a").into_iter().map(Result::unwrap).collect(); assert_eq!(contents.len(), 3); assert_eq!(&*contents[0].0, key2); assert_eq!(&*contents[1].0, key3); assert_eq!(&*contents[2].0, key4); // prefix abc - let contents: Vec<_> = db.iter_with_prefix(0, b"abc").into_iter().collect(); + let contents: Vec<_> = db.iter_with_prefix(0, b"abc").into_iter().map(Result::unwrap).collect(); assert_eq!(contents.len(), 2); assert_eq!(&*contents[0].0, key3); assert_eq!(&*contents[1].0, key4); // prefix abcde - let contents: Vec<_> = db.iter_with_prefix(0, b"abcde").into_iter().collect(); + let contents: Vec<_> = db.iter_with_prefix(0, b"abcde").into_iter().map(Result::unwrap).collect(); assert_eq!(contents.len(), 0); // prefix 0 - let contents: Vec<_> = db.iter_with_prefix(0, b"0").into_iter().collect(); + let contents: Vec<_> = db.iter_with_prefix(0, b"0").into_iter().map(Result::unwrap).collect(); assert_eq!(contents.len(), 1); assert_eq!(&*contents[0].0, key1); Ok(()) @@ -254,7 +254,7 @@ pub fn test_complex(db: &dyn KeyValueDB) -> io::Result<()> { assert_eq!(&*db.get(0, key1)?.unwrap(), b"cat"); - let contents: Vec<_> = db.iter(0).into_iter().collect(); + let contents: Vec<_> = db.iter(0).into_iter().map(Result::unwrap).collect(); assert_eq!(contents.len(), 5); assert_eq!(contents[0].0.to_vec(), key1.to_vec()); assert_eq!(&*contents[0].1, b"cat"); @@ -262,9 +262,9 @@ pub fn test_complex(db: &dyn KeyValueDB) -> io::Result<()> { assert_eq!(&*contents[1].1, b"dog"); let mut prefix_iter = db.iter_with_prefix(0, b"04c0"); - assert_eq!(*prefix_iter.next().unwrap().1, b"caterpillar"[..]); - assert_eq!(*prefix_iter.next().unwrap().1, b"beef"[..]); - assert_eq!(*prefix_iter.next().unwrap().1, b"fish"[..]); + assert_eq!(*prefix_iter.next().unwrap().unwrap().1, b"caterpillar"[..]); + assert_eq!(*prefix_iter.next().unwrap().unwrap().1, b"beef"[..]); + assert_eq!(*prefix_iter.next().unwrap().unwrap().1, b"fish"[..]); let mut batch = db.transaction(); batch.delete(0, key1); @@ -283,8 +283,8 @@ pub fn test_complex(db: &dyn KeyValueDB) -> io::Result<()> { assert!(db.get(0, key1)?.is_none()); assert_eq!(&*db.get(0, key3)?.unwrap(), b"elephant"); - assert_eq!(&*db.get_by_prefix(0, key3).unwrap(), b"elephant"); - assert_eq!(&*db.get_by_prefix(0, key2).unwrap(), b"dog"); + assert_eq!(&*db.get_by_prefix(0, key3).unwrap().unwrap(), b"elephant"); + assert_eq!(&*db.get_by_prefix(0, key2).unwrap().unwrap(), b"dog"); let mut transaction = db.transaction(); transaction.put(0, key1, b"horse"); diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 3dd994f4d..05823f1bc 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -7,6 +7,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] ### Breaking - Removed `fn restore` from `KeyValueDB` trait. [662](https://github.com/paritytech/parity-common/pull/662) +- Streamlined API. [661](https://github.com/paritytech/parity-common/pull/661) + - `fn get_by_prefix` return type changed to `io::Result>` + - `fn has_prefix` return type changed to `io::Result` + - Iterator item changed to `io::Result` ## [0.11.0] - 2022-02-04 ### Breaking diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 437113082..155a9ef73 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -20,6 +20,8 @@ pub const PREFIX_LEN: usize = 12; pub type DBValue = Vec; /// Database keys. pub type DBKey = SmallVec<[u8; 32]>; +/// A tuple holding key and value data, used in the iterator item type. +pub type DBKeyValue = (DBKey, DBValue); pub use io_stats::{IoStats, Kind as IoStatsKind}; @@ -112,13 +114,13 @@ pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { fn get(&self, col: u32, key: &[u8]) -> io::Result>; /// Get the first value matching the given prefix. - fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option>; + fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> io::Result>; /// Write a transaction of changes to the backing store. fn write(&self, transaction: DBTransaction) -> io::Result<()>; /// Iterate over the data for a given column. - fn iter<'a>(&'a self, col: u32) -> Box, Box<[u8]>)> + 'a>; + fn iter<'a>(&'a self, col: u32) -> Box> + 'a>; /// Iterate over the data for a given column, returning all key/value pairs /// where the key starts with the given prefix. @@ -126,12 +128,12 @@ pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { &'a self, col: u32, prefix: &'a [u8], - ) -> Box, Box<[u8]>)> + 'a>; + ) -> Box> + 'a>; /// Query statistics. /// /// Not all kvdb implementations are able or expected to implement this, so by - /// default, empty statistics is returned. Also, not all kvdb implementation + /// default, empty statistics is returned. Also, not all kvdb implementations /// can return every statistic or configured to do so (some statistics gathering /// may impede the performance and might be off by default). fn io_stats(&self, _kind: IoStatsKind) -> IoStats { @@ -144,8 +146,8 @@ pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { } /// Check for the existence of a value by prefix. - fn has_prefix(&self, col: u32, prefix: &[u8]) -> bool { - self.get_by_prefix(col, prefix).is_some() + fn has_prefix(&self, col: u32, prefix: &[u8]) -> io::Result { + self.get_by_prefix(col, prefix).map(|opt| opt.is_some()) } } diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index a8d73cc32..c378d11e6 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -35,11 +35,11 @@ primitive-types = { version = "0.11", path = "../primitive-types", default-featu winapi = { version = "0.3.8", features = ["heapapi"] } [target.'cfg(not(target_os = "windows"))'.dependencies.tikv-jemallocator] -version = "0.4.1" +version = "0.5.0" optional = true [target.'cfg(not(target_os = "windows"))'.dependencies.tikv-jemalloc-ctl] -version = "0.4.2" +version = "0.5.0" optional = true [features] From df78257df826669658087936578f300cbd8b85d6 Mon Sep 17 00:00:00 2001 From: Andronik Date: Tue, 16 Aug 2022 11:34:40 +0200 Subject: [PATCH 273/359] primitive-types: use weak dep feature (#664) --- primitive-types/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index e09b21215..f65c10507 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Primitive types shared by Ethereum and Substrate" edition = "2021" -rust-version = "1.56.1" +rust-version = "1.60.0" [dependencies] fixed-hash = { version = "0.7", path = "../fixed-hash", default-features = false } @@ -19,7 +19,7 @@ scale-info-crate = { package = "scale-info", version = ">=0.9, <3", features = [ [features] default = ["std"] -std = ["uint/std", "fixed-hash/std", "impl-codec/std"] +std = ["uint/std", "fixed-hash/std", "impl-codec?/std"] byteorder = ["fixed-hash/byteorder"] rustc-hex = ["fixed-hash/rustc-hex"] serde = ["std", "impl-serde", "impl-serde/std"] From 77ddc33a2773bab5d7169f5f29199182713d463d Mon Sep 17 00:00:00 2001 From: Andronik Date: Tue, 16 Aug 2022 11:34:52 +0200 Subject: [PATCH 274/359] uint: introduce abs_diff (#665) --- uint/src/uint.rs | 9 +++++++++ uint/tests/uint_tests.rs | 11 +++++++++++ 2 files changed, 20 insertions(+) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index da5361d2e..1b633fc79 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1103,6 +1103,15 @@ macro_rules! construct_uint { } } + /// Computes the absolute difference between self and other. + pub fn abs_diff(self, other: $name) -> $name { + if self > other { + self.overflowing_sub(other).0 + } else { + other.overflowing_sub(self).0 + } + } + /// Multiply with overflow, returning a flag if it does. #[inline(always)] pub fn overflowing_mul(self, other: $name) -> ($name, bool) { diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 026cc243e..f378423f1 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -149,6 +149,17 @@ fn uint256_checked_ops() { assert_eq!(z.checked_neg(), Some(z)); } +#[test] +fn uint256_abs_diff() { + let zero = U256::zero(); + let max = U256::MAX; + + assert_eq!(zero.abs_diff(zero), zero); + assert_eq!(max.abs_diff(max), zero); + assert_eq!(zero.abs_diff(max), max); + assert_eq!(max.abs_diff(zero), max); +} + #[test] fn uint256_from() { let e = U256([10, 0, 0, 0]); From 076ba7a90054021098c72de09fbafc1ce7b3ae95 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Aug 2022 12:07:02 +0200 Subject: [PATCH 275/359] build(deps): update sysinfo requirement from 0.25.1 to 0.26.0 (#670) Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Release notes](https://github.com/GuillaumeGomez/sysinfo/releases) - [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) --- updated-dependencies: - dependency-name: sysinfo dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 0b46fc215..675083b0d 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -41,6 +41,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.9" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.25.1" +sysinfo = "0.26.0" ctrlc = "3.1.4" chrono = "0.4" From 706c989b3a9b2ba93418e5af583580f536d6a7bb Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 31 Aug 2022 18:53:01 +0100 Subject: [PATCH 276/359] Support deserializing from borrowed or owned bytes, too (#668) * Support deserializing from borrowed or owned bytes, too * cargo fmt * Fix comments * H256 et al can deserialize u8 sequences, too * add changelog entry and bump version ready for release * propagate the bump Co-authored-by: Andronik --- ethbloom/Cargo.toml | 2 +- ethereum-types/Cargo.toml | 2 +- primitive-types/Cargo.toml | 2 +- primitive-types/impls/serde/CHANGELOG.md | 3 +- primitive-types/impls/serde/Cargo.toml | 2 +- primitive-types/impls/serde/src/serialize.rs | 134 ++++++++++++++++++- 6 files changed, 136 insertions(+), 9 deletions(-) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 95a84eebb..df2b546a0 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -14,7 +14,7 @@ rust-version = "1.56.1" tiny-keccak = { version = "2.0", features = ["keccak"] } crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false } -impl-serde = { path = "../primitive-types/impls/serde", version = "0.3", default-features = false, optional = true } +impl-serde = { path = "../primitive-types/impls/serde", version = "0.4", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 3449beccd..596e8364f 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -13,7 +13,7 @@ ethbloom = { path = "../ethbloom", version = "0.12", default-features = false } fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.9", default-features = false } primitive-types = { path = "../primitive-types", version = "0.11", features = ["byteorder", "rustc-hex"], default-features = false } -impl-serde = { path = "../primitive-types/impls/serde", version = "0.3.2", default-features = false, optional = true } +impl-serde = { path = "../primitive-types/impls/serde", version = "0.4.0", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index f65c10507..5eb5ec049 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -11,7 +11,7 @@ rust-version = "1.60.0" [dependencies] fixed-hash = { version = "0.7", path = "../fixed-hash", default-features = false } uint = { version = "0.9.0", path = "../uint", default-features = false } -impl-serde = { version = "0.3.1", path = "impls/serde", default-features = false, optional = true } +impl-serde = { version = "0.4.0", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "impls/codec", default-features = false, optional = true } impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } diff --git a/primitive-types/impls/serde/CHANGELOG.md b/primitive-types/impls/serde/CHANGELOG.md index 5d143f916..43c06ddf5 100644 --- a/primitive-types/impls/serde/CHANGELOG.md +++ b/primitive-types/impls/serde/CHANGELOG.md @@ -4,7 +4,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ -## [Unreleased] +## [0.4.0] - 2022-08-31 +- Support deserializing H256 et al from bytes or sequences of bytes, too. [#668](https://github.com/paritytech/parity-common/pull/668) - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.3.2] - 2021-11-10 diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index f94c754ac..91915edf9 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-serde" -version = "0.3.2" +version = "0.4.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index ee3a57c05..1426d3ddf 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -187,8 +187,8 @@ pub enum ExpectedLen<'a> { impl<'a> fmt::Display for ExpectedLen<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { match *self { - ExpectedLen::Exact(ref v) => write!(fmt, "length of {}", v.len() * 2), - ExpectedLen::Between(min, ref v) => write!(fmt, "length between ({}; {}]", min * 2, v.len() * 2), + ExpectedLen::Exact(ref v) => write!(fmt, "{} bytes", v.len()), + ExpectedLen::Between(min, ref v) => write!(fmt, "between ({}; {}] bytes", min, v.len()), } } } @@ -205,7 +205,7 @@ where type Value = Vec; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a (both 0x-prefixed or not) hex string") + write!(formatter, "a (both 0x-prefixed or not) hex string or byte array") } fn visit_str(self, v: &str) -> Result { @@ -215,6 +215,22 @@ where fn visit_string(self, v: String) -> Result { self.visit_str(&v) } + + fn visit_bytes(self, v: &[u8]) -> Result { + Ok(v.to_vec()) + } + + fn visit_byte_buf(self, v: Vec) -> Result { + Ok(v) + } + + fn visit_seq>(self, mut seq: A) -> Result { + let mut bytes = vec![]; + while let Some(n) = seq.next_element::()? { + bytes.push(n); + } + Ok(bytes) + } } deserializer.deserialize_str(Visitor) @@ -234,7 +250,7 @@ where type Value = usize; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a (both 0x-prefixed or not) hex string with {}", self.len) + write!(formatter, "a (both 0x-prefixed or not) hex string or byte array containing {}", self.len) } fn visit_str(self, v: &str) -> Result { @@ -261,6 +277,38 @@ where fn visit_string(self, v: String) -> Result { self.visit_str(&v) } + + fn visit_bytes(self, v: &[u8]) -> Result { + let len = v.len(); + let is_len_valid = match self.len { + ExpectedLen::Exact(ref slice) => len == slice.len(), + ExpectedLen::Between(min, ref slice) => len <= slice.len() && len > min, + }; + + if !is_len_valid { + return Err(E::invalid_length(v.len(), &self)) + } + + let bytes = match self.len { + ExpectedLen::Exact(slice) => slice, + ExpectedLen::Between(_, slice) => slice, + }; + + bytes[..len].copy_from_slice(v); + Ok(len) + } + + fn visit_byte_buf(self, v: Vec) -> Result { + self.visit_bytes(&v) + } + + fn visit_seq>(self, mut seq: A) -> Result { + let mut v = vec![]; + while let Some(n) = seq.next_element::()? { + v.push(n); + } + self.visit_byte_buf(v) + } } deserializer.deserialize_str(Visitor { len }) @@ -367,4 +415,82 @@ mod tests { assert_eq!(from_hex("102"), Ok(vec![1, 2])); assert_eq!(from_hex("f"), Ok(vec![0xf])); } + + #[test] + fn should_deserialize_from_owned_bytes() { + type BytesDeserializer<'a> = serde::de::value::BytesDeserializer<'a, serde::de::value::Error>; + + // using `deserialize` to decode owned bytes. + let des = BytesDeserializer::new(&[1, 2, 3, 4, 5]); + let deserialized: Vec = deserialize(des).unwrap(); + assert_eq!(deserialized, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode owned bytes into buffer with fixed length. + let des = BytesDeserializer::new(&[1, 2, 3, 4, 5]); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Exact(&mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 5); + assert_eq!(output, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode owned bytes into buffer with min/max length. + let des = BytesDeserializer::new(&[1, 2, 3]); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Between(2, &mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 3); + assert_eq!(output, vec![1, 2, 3, 0, 0]); + } + + #[test] + fn should_deserialize_from_borrowed_bytes() { + type BytesDeserializer<'a> = serde::de::value::BorrowedBytesDeserializer<'a, serde::de::value::Error>; + + // using `deserialize` to decode borrowed bytes. + let des = BytesDeserializer::new(&[1, 2, 3, 4, 5]); + let deserialized: Vec = deserialize(des).unwrap(); + assert_eq!(deserialized, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode borrowed bytes into buffer with fixed length. + let des = BytesDeserializer::new(&[1, 2, 3, 4, 5]); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Exact(&mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 5); + assert_eq!(output, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode borrowed bytes into buffer with min/max length. + let des = BytesDeserializer::new(&[1, 2, 3]); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Between(2, &mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 3); + assert_eq!(output, vec![1, 2, 3, 0, 0]); + } + + #[test] + fn should_deserialize_from_u8_sequence() { + use serde::de::value::SeqDeserializer; + + // using `deserialize` to decode a sequence of bytes. + let des = SeqDeserializer::<_, serde::de::value::Error>::new([1u8, 2, 3, 4, 5].into_iter()); + let deserialized: Vec = deserialize(des).unwrap(); + assert_eq!(deserialized, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode a sequence of bytes into a buffer with fixed length. + let des = SeqDeserializer::<_, serde::de::value::Error>::new([1u8, 2, 3, 4, 5].into_iter()); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Exact(&mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 5); + assert_eq!(output, vec![1, 2, 3, 4, 5]); + + // using `deserialize` to decode a sequence of bytes into a buffer with min/max length. + let des = SeqDeserializer::<_, serde::de::value::Error>::new([1u8, 2, 3].into_iter()); + let mut output = vec![0, 0, 0, 0, 0]; + let expected_len = ExpectedLen::Between(2, &mut *output); + let n = deserialize_check_len(des, expected_len).unwrap(); + assert_eq!(n, 3); + assert_eq!(output, vec![1, 2, 3, 0, 0]); + } } From aeca7d64f4e90f1bb2c984a27a64f47b58c6216c Mon Sep 17 00:00:00 2001 From: James Wilson Date: Fri, 2 Sep 2022 19:52:21 +0100 Subject: [PATCH 277/359] Allow deserializing from newtype structs too (#672) * Allow deserializing from newtype structs too * update changelog * cargo fmt * Update changelog date Co-authored-by: Andronik Co-authored-by: Andronik --- primitive-types/impls/serde/CHANGELOG.md | 3 ++- primitive-types/impls/serde/src/serialize.rs | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/primitive-types/impls/serde/CHANGELOG.md b/primitive-types/impls/serde/CHANGELOG.md index 43c06ddf5..0757363ea 100644 --- a/primitive-types/impls/serde/CHANGELOG.md +++ b/primitive-types/impls/serde/CHANGELOG.md @@ -4,8 +4,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ -## [0.4.0] - 2022-08-31 +## [0.4.0] - 2022-09-02 - Support deserializing H256 et al from bytes or sequences of bytes, too. [#668](https://github.com/paritytech/parity-common/pull/668) +- Support deserializing H256 et al from newtype structs containing anything compatible, too. [#672](https://github.com/paritytech/parity-common/pull/672) - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.3.2] - 2021-11-10 diff --git a/primitive-types/impls/serde/src/serialize.rs b/primitive-types/impls/serde/src/serialize.rs index 1426d3ddf..3017170e6 100644 --- a/primitive-types/impls/serde/src/serialize.rs +++ b/primitive-types/impls/serde/src/serialize.rs @@ -231,6 +231,10 @@ where } Ok(bytes) } + + fn visit_newtype_struct>(self, deserializer: D) -> Result { + deserializer.deserialize_bytes(self) + } } deserializer.deserialize_str(Visitor) @@ -309,6 +313,10 @@ where } self.visit_byte_buf(v) } + + fn visit_newtype_struct>(self, deserializer: D) -> Result { + deserializer.deserialize_bytes(self) + } } deserializer.deserialize_str(Visitor { len }) From f16979b15a65bd553954754779de759e627df701 Mon Sep 17 00:00:00 2001 From: Andronik Date: Fri, 2 Sep 2022 20:53:25 +0200 Subject: [PATCH 278/359] Update CONTRIBUTING.md (#671) --- CONTRIBUTING.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bbe6e0c0e..961bb363b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,9 +4,7 @@ parity-common welcomes contribution from everyone in the form of suggestions, bu reports, pull requests, and feedback. This document gives some guidance if you are thinking of helping us. -Please reach out here in a GitHub issue or in the parity channel on [gitter] if we can do anything to help you contribute. - -[gitter]: https://gitter.im/paritytech/parity +Please reach out here in a GitHub issue if we can do anything to help you contribute. ## Submitting bug reports and feature requests @@ -24,7 +22,7 @@ solve with the feature, any ideas for how parity-common could support solving th As many crates in the rust ecosystem, all crates in parity-common follow [semantic versioning]. This means bumping PATCH version on bug fixes that don't break backwards compatibility, MINOR version on new features and MAJOR version otherwise (MAJOR.MINOR.PATCH). Versions < 1.0 are considered to have the format 0.MAJOR.MINOR, which means bumping MINOR version for all non-breaking changes. -If you bump a dependency that is publicly exposed in a crate's API (e.g. `pub use dependency;` or `pub field: dependency::Dependency`) and the version transition for the dependency was semver-breaking, then it is considered to be a breaking change for the consuming crate as well. To put it simply, if your change could cause a compilation error in user's code, it is a breaking change. +For checking whether a change is SemVer-breaking, please refer to https://doc.rust-lang.org/cargo/reference/semver.html. Bumping versions should be done in a separate from regular code changes PR. From 9642a0e53e33c9663307e7036d01475a2326dad8 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 2 Sep 2022 22:53:41 +0400 Subject: [PATCH 279/359] Upgrade quickcheck in uint, fixed-hash (#674) * Upgrade uint:quickcheck * Upgrade fixed-hash:quickcheck * cargo fmt * Generate a probability for len of an Arbitrary in a more fancy way * Get rid of quickcheck->qc renaming * Fix %0 in arbitrary for $uint Co-authored-by: Andronik Co-authored-by: Andronik --- fixed-hash/Cargo.toml | 2 +- fixed-hash/src/hash.rs | 5 ++--- uint/Cargo.toml | 4 +--- uint/src/lib.rs | 6 +----- uint/src/uint.rs | 25 +++++++++++++++---------- uint/tests/uint_tests.rs | 2 +- 6 files changed, 21 insertions(+), 23 deletions(-) diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 27ad7d217..d66dad70e 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -16,7 +16,7 @@ features = ["quickcheck", "api-dummy"] [dependencies] byteorder = { version = "1.4.2", optional = true, default-features = false } -quickcheck = { version = "0.9.0", optional = true } +quickcheck = { version = "1", optional = true } rand = { version = "0.8.0", optional = true, default-features = false } rustc-hex = { version = "2.0.1", optional = true, default-features = false } static_assertions = "1.0.0" diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 232245868..9dc356cdc 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -629,9 +629,8 @@ macro_rules! impl_quickcheck_for_fixed_hash { macro_rules! impl_quickcheck_for_fixed_hash { ( $name:ident ) => { impl $crate::quickcheck::Arbitrary for $name { - fn arbitrary(g: &mut G) -> Self { - let mut res = [0u8; $crate::core_::mem::size_of::()]; - g.fill_bytes(&mut res[..Self::len_bytes()]); + fn arbitrary(g: &mut $crate::quickcheck::Gen) -> Self { + let res: [u8; Self::len_bytes()] = $crate::core_::array::from_fn(|_| u8::arbitrary(g)); Self::from(res) } } diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 1de346d84..7be3e29b1 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -13,8 +13,7 @@ rust-version = "1.56.1" [dependencies] byteorder = { version = "1.4.2", default-features = false } crunchy = { version = "0.2.2", default-features = false } -qc = { package = "quickcheck", version = "0.9.0", optional = true } -rand07 = { package = "rand", version = "0.7", default-features = false, optional = true } +quickcheck = { version = "1", optional = true } hex = { version = "0.4", default-features = false } static_assertions = "1.0.0" arbitrary = { version = "1.0", optional = true } @@ -22,7 +21,6 @@ arbitrary = { version = "1.0", optional = true } [features] default = ["std"] std = ["byteorder/std", "crunchy/std", "hex/std"] -quickcheck = ["qc", "rand07"] [[example]] name = "modular" diff --git a/uint/src/lib.rs b/uint/src/lib.rs index 83ab957a2..e259c79d7 100644 --- a/uint/src/lib.rs +++ b/uint/src/lib.rs @@ -23,11 +23,7 @@ pub use hex; #[cfg(feature = "quickcheck")] #[doc(hidden)] -pub use qc; - -#[cfg(feature = "quickcheck")] -#[doc(hidden)] -pub use rand07; +pub use quickcheck; #[cfg(feature = "arbitrary")] #[doc(hidden)] diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 1b633fc79..728957dcc 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1741,28 +1741,33 @@ macro_rules! construct_uint { #[doc(hidden)] macro_rules! impl_quickcheck_arbitrary_for_uint { ($uint: ty, $n_bytes: tt) => { - impl $crate::qc::Arbitrary for $uint { - fn arbitrary(g: &mut G) -> Self { - let mut res = [0u8; $n_bytes]; - - use $crate::rand07::Rng; - let p: f64 = $crate::rand07::rngs::OsRng.gen(); + impl $crate::quickcheck::Arbitrary for $uint { + fn arbitrary(g: &mut $crate::quickcheck::Gen) -> Self { + let p = usize::arbitrary(g) % 100; // make it more likely to generate smaller numbers that // don't use up the full $n_bytes let range = // 10% chance to generate number that uses up to $n_bytes - if p < 0.1 { + if p < 10 { $n_bytes // 10% chance to generate number that uses up to $n_bytes / 2 - } else if p < 0.2 { + } else if p < 20 { $n_bytes / 2 // 80% chance to generate number that uses up to $n_bytes / 5 } else { $n_bytes / 5 }; - let size = g.gen_range(0, range); - g.fill_bytes(&mut res[..size]); + let range = $crate::core_::cmp::max(range, 1); + let size: usize = usize::arbitrary(g) % range; + + let res: [u8; $n_bytes] = $crate::core_::array::from_fn(|i| { + if i > size { + 0 + } else { + u8::arbitrary(g) + } + }); res.as_ref().into() } diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index f378423f1..9ef7f2e19 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -1157,7 +1157,7 @@ pub mod laws { macro_rules! uint_laws { ($mod_name:ident, $uint_ty:ident) => { mod $mod_name { - use qc::{TestResult, quickcheck}; + use quickcheck::{TestResult, quickcheck}; use super::$uint_ty; quickcheck! { From 7a9cc3b03c5311c24623c66fe8a750903cf34484 Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Fri, 2 Sep 2022 21:53:53 +0300 Subject: [PATCH 280/359] ethereum-types: make ethbloom optional (#625) * ethereum-types: make ethbloom optional * Apply suggestions from code review Co-authored-by: Andronik --- ethereum-types/Cargo.toml | 8 ++++---- ethereum-types/src/lib.rs | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 596e8364f..20f132b6f 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -6,10 +6,10 @@ license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Ethereum types" edition = "2021" -rust-version = "1.56.1" +rust-version = "1.60.0" [dependencies] -ethbloom = { path = "../ethbloom", version = "0.12", default-features = false } +ethbloom = { path = "../ethbloom", version = "0.12", optional = true, default-features = false } fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.9", default-features = false } primitive-types = { path = "../primitive-types", version = "0.11", features = ["byteorder", "rustc-hex"], default-features = false } @@ -22,8 +22,8 @@ scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = serde_json = "1.0.41" [features] -default = ["std", "rlp", "serialize"] -std = ["uint-crate/std", "fixed-hash/std", "ethbloom/std", "primitive-types/std"] +default = ["std", "ethbloom", "rlp", "serialize"] +std = ["uint-crate/std", "fixed-hash/std", "ethbloom?/std", "primitive-types/std"] serialize = ["impl-serde", "primitive-types/serde_no_std", "ethbloom/serialize"] arbitrary = ["ethbloom/arbitrary", "fixed-hash/arbitrary", "uint-crate/arbitrary"] rlp = ["impl-rlp", "ethbloom/rlp", "primitive-types/rlp"] diff --git a/ethereum-types/src/lib.rs b/ethereum-types/src/lib.rs index 56963f28d..ee4dccc75 100644 --- a/ethereum-types/src/lib.rs +++ b/ethereum-types/src/lib.rs @@ -11,6 +11,7 @@ mod hash; mod uint; +#[cfg(feature = "ethbloom")] pub use ethbloom::{Bloom, BloomRef, Input as BloomInput}; pub use hash::{BigEndianHash, H128, H160, H256, H264, H32, H512, H520, H64}; pub use uint::{FromDecStrErr, FromStrRadixErr, FromStrRadixErrKind, U128, U256, U512, U64}; From 62aa409ac8ee3b9e5227ba81a6c9512ec0828c2e Mon Sep 17 00:00:00 2001 From: Andronik Date: Mon, 5 Sep 2022 15:11:55 +0200 Subject: [PATCH 281/359] kvdb-rocksdb: do not attepmt to repair (#667) --- kvdb-rocksdb/src/lib.rs | 52 +++-------------------------------------- 1 file changed, 3 insertions(+), 49 deletions(-) diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 16de2a192..beb137f56 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -12,14 +12,13 @@ mod stats; use std::{ cmp, collections::HashMap, - error, fs, io, + error, io, path::{Path, PathBuf}, - result, }; use parity_util_mem::MallocSizeOf; use rocksdb::{ - BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Error, Options, ReadOptions, WriteBatch, WriteOptions, DB, + BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Options, ReadOptions, WriteBatch, WriteOptions, DB, }; use kvdb::{DBKeyValue, DBOp, DBTransaction, DBValue, KeyValueDB}; @@ -319,24 +318,6 @@ pub struct Database { stats: stats::RunningDbStats, } -#[inline] -fn check_for_corruption>(path: P, res: result::Result) -> io::Result { - if let Err(ref s) = res { - if is_corrupted(s) { - warn!("DB corrupted: {}. Repair will be triggered on next restart", s); - let _ = fs::File::create(path.as_ref().join(Database::CORRUPTION_FILE_NAME)); - } - } - - res.map_err(other_io_err) -} - -fn is_corrupted(err: &Error) -> bool { - err.as_ref().starts_with("Corruption:") || - err.as_ref() - .starts_with("Invalid argument: You have to open all column families") -} - /// Generate the options for RocksDB, based on the given `DatabaseConfig`. fn generate_options(config: &DatabaseConfig) -> Options { let mut opts = Options::default(); @@ -395,8 +376,6 @@ fn generate_block_based_options(config: &DatabaseConfig) -> io::Result = (0..config.columns).map(|c| format!("col{}", c)).collect(); let write_opts = WriteOptions::default(); let read_opts = generate_read_options(); @@ -471,18 +442,6 @@ impl Database { Ok(match db { Ok(db) => db, - Err(ref s) if is_corrupted(s) => { - warn!("DB corrupted: {}, attempting repair", s); - DB::repair(&opts, path.as_ref()).map_err(other_io_err)?; - - let cf_descriptors: Vec<_> = (0..config.columns) - .map(|i| { - ColumnFamilyDescriptor::new(column_names[i as usize], config.column_config(&block_opts, i)) - }) - .collect(); - - DB::open_cf_descriptors(&opts, path, cf_descriptors).map_err(other_io_err)? - }, Err(s) => return Err(other_io_err(s)), }) } @@ -499,11 +458,6 @@ impl Database { Ok(match db { Ok(db) => db, - Err(ref s) if is_corrupted(s) => { - warn!("DB corrupted: {}, attempting repair", s); - DB::repair(&opts, path.as_ref()).map_err(other_io_err)?; - DB::open_cf_as_secondary(&opts, path, secondary_path, column_names).map_err(other_io_err)? - }, Err(s) => return Err(other_io_err(s)), }) } @@ -555,7 +509,7 @@ impl Database { } self.stats.tally_bytes_written(stats_total_bytes as u64); - check_for_corruption(&self.path, cfs.db.write_opt(batch, &self.write_opts)) + cfs.db.write_opt(batch, &self.write_opts).map_err(other_io_err) } /// Get value by key. From 6e5fe5013530f81c929ec8255aa80b7afc5153d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Sep 2022 11:08:22 +0200 Subject: [PATCH 282/359] build(deps): update criterion requirement from 0.3.0 to 0.4.0 (#676) Updates the requirements on [criterion](https://github.com/bheisler/criterion.rs) to permit the latest version. - [Release notes](https://github.com/bheisler/criterion.rs/releases) - [Changelog](https://github.com/bheisler/criterion.rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/bheisler/criterion.rs/compare/0.3.0...0.4.0) --- updated-dependencies: - dependency-name: criterion dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ethbloom/Cargo.toml | 2 +- fixed-hash/Cargo.toml | 2 +- keccak-hash/Cargo.toml | 2 +- kvdb-rocksdb/Cargo.toml | 2 +- rlp/Cargo.toml | 2 +- uint/Cargo.toml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index df2b546a0..4ef5bd6b8 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -20,7 +20,7 @@ impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", defau scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } [dev-dependencies] -criterion = "0.3.0" +criterion = "0.4.0" rand = "0.8.0" hex-literal = "0.3.1" diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index d66dad70e..7ce60cd58 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -24,7 +24,7 @@ arbitrary = { version = "1.0", optional = true } [dev-dependencies] rand_xorshift = "0.3.0" -criterion = "0.3.0" +criterion = "0.4.0" rand = { version = "0.8.0", default-features = false, features = ["std_rng"] } [features] diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 8229dc88b..c5133a307 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -15,7 +15,7 @@ primitive-types = { path = "../primitive-types", version = "0.11", default-featu [dev-dependencies] tempfile = "3.1.0" -criterion = "0.3.0" +criterion = "0.4.0" [features] default = ["std"] diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 675083b0d..b41bd7d38 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -35,7 +35,7 @@ version = "0.19.0" [dev-dependencies] alloc_counter = "0.0.4" -criterion = "0.3" +criterion = "0.4" ethereum-types = { path = "../ethereum-types" } kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.9" } rand = "0.8.0" diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 83022e5af..c1a4ce7a3 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -14,7 +14,7 @@ rustc-hex = { version = "2.0.1", default-features = false } rlp-derive = { version = "0.1", path = "../rlp-derive", optional = true } [dev-dependencies] -criterion = "0.3.0" +criterion = "0.4.0" hex-literal = "0.3.1" primitive-types = { path = "../primitive-types", version = "0.11", features = ["impl-rlp"] } diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 7be3e29b1..2f2852916 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -30,7 +30,7 @@ name = "uint_tests" required-features = ["std"] [dev-dependencies] -criterion = "0.3.0" +criterion = "0.4.0" num-bigint = "0.4.0" [target.'cfg(all(unix, target_arch = "x86_64"))'.dev-dependencies] From 4d799dc082ba0a54bb305dc218947e5828ec3143 Mon Sep 17 00:00:00 2001 From: Andronik Date: Mon, 19 Sep 2022 18:06:19 +0200 Subject: [PATCH 283/359] parity-util-mem: remove wee_alloc (#678) --- parity-util-mem/Cargo.toml | 3 --- parity-util-mem/src/allocators.rs | 5 ----- parity-util-mem/src/lib.rs | 7 ------- 3 files changed, 15 deletions(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index c378d11e6..ae772e519 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -18,7 +18,6 @@ build = "build.rs" [dependencies] cfg-if = "1.0.0" dlmalloc = { version = "0.2.1", features = ["global"], optional = true } -wee_alloc = { version = "0.4.5", optional = true } lru = { version = "0.7", optional = true } hashbrown = { version = "0.12", optional = true } mimalloc = { version = "0.1.18", optional = true } @@ -47,8 +46,6 @@ default = ["std", "ethereum-impls", "lru", "hashbrown", "smallvec", "primitive-t std = ["parking_lot"] # use dlmalloc as global allocator dlmalloc-global = ["dlmalloc", "estimate-heapsize"] -# use wee_alloc as global allocator -weealloc-global = ["wee_alloc", "estimate-heapsize"] # use jemalloc as global allocator jemalloc-global = ["tikv-jemallocator", "tikv-jemalloc-ctl"] # use mimalloc as global allocator diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs index 2c63d55d4..576e93314 100644 --- a/parity-util-mem/src/allocators.rs +++ b/parity-util-mem/src/allocators.rs @@ -10,25 +10,21 @@ //! Features are: //! - windows: //! - no features: default implementation from servo `heapsize` crate -//! - weealloc: default to `estimate_size` //! - dlmalloc: default to `estimate_size` //! - jemalloc: default windows allocator is used instead //! - mimalloc: use mimallocator crate //! - arch x86: //! - no features: use default alloc //! - jemalloc: use tikv-jemallocator crate -//! - weealloc: default to `estimate_size` //! - dlmalloc: default to `estimate_size` //! - mimalloc: use mimallocator crate //! - arch x86/macos: //! - no features: use default alloc, requires using `estimate_size` //! - jemalloc: use tikv-jemallocator crate -//! - weealloc: default to `estimate_size` //! - dlmalloc: default to `estimate_size` //! - mimalloc: use mimallocator crate //! - arch wasm32: //! - no features: default to `estimate_size` -//! - weealloc: default to `estimate_size` //! - dlmalloc: default to `estimate_size` //! - jemalloc: compile error //! - mimalloc: compile error (until https://github.com/microsoft/mimalloc/pull/32 is merged) @@ -50,7 +46,6 @@ mod usable_size { if #[cfg(any( target_arch = "wasm32", feature = "estimate-heapsize", - feature = "weealloc-global", feature = "dlmalloc-global", ))] { diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs index 68771a2e0..4531c63f9 100644 --- a/parity-util-mem/src/lib.rs +++ b/parity-util-mem/src/lib.rs @@ -32,13 +32,6 @@ cfg_if::cfg_if! { #[global_allocator] pub static ALLOC: dlmalloc::GlobalDlmalloc = dlmalloc::GlobalDlmalloc; - mod memory_stats_noop; - use memory_stats_noop as memory_stats; - } else if #[cfg(feature = "weealloc-global")] { - /// Global allocator - #[global_allocator] - pub static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; - mod memory_stats_noop; use memory_stats_noop as memory_stats; } else if #[cfg(all( From 738e7a3fc6e732d1637bced06e5ed4f6196eddcf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Sep 2022 18:07:30 +0200 Subject: [PATCH 284/359] build(deps): update lru requirement from 0.7 to 0.8 (#675) Updates the requirements on [lru](https://github.com/jeromefroe/lru-rs) to permit the latest version. - [Release notes](https://github.com/jeromefroe/lru-rs/releases) - [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/jeromefroe/lru-rs/compare/0.7.0...0.8.0) --- updated-dependencies: - dependency-name: lru dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andronik --- parity-util-mem/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index ae772e519..9032d941b 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -18,7 +18,7 @@ build = "build.rs" [dependencies] cfg-if = "1.0.0" dlmalloc = { version = "0.2.1", features = ["global"], optional = true } -lru = { version = "0.7", optional = true } +lru = { version = "0.8", optional = true } hashbrown = { version = "0.12", optional = true } mimalloc = { version = "0.1.18", optional = true } libmimalloc-sys = { version = "0.1.14", optional = true } From 6aa58bfeaa870b4f7387e50319004151443f7297 Mon Sep 17 00:00:00 2001 From: Andronik Date: Tue, 20 Sep 2022 15:03:30 +0200 Subject: [PATCH 285/359] release is coming (#680) * parity-util-mem: bump version and update changelog * prepare for more releases * update kvdb-rocksdb * moar updates * publish kvdb-shared-tests too * Back to the future * fix compilation --- ethbloom/CHANGELOG.md | 3 +++ ethbloom/Cargo.toml | 4 ++-- ethereum-types/CHANGELOG.md | 6 ++++++ ethereum-types/Cargo.toml | 8 ++++---- fixed-hash/CHANGELOG.md | 5 +++++ fixed-hash/Cargo.toml | 2 +- keccak-hash/CHANGELOG.md | 4 ++++ keccak-hash/Cargo.toml | 4 ++-- kvdb-memorydb/CHANGELOG.md | 3 +++ kvdb-memorydb/Cargo.toml | 8 ++++---- kvdb-rocksdb/CHANGELOG.md | 7 +++++-- kvdb-rocksdb/Cargo.toml | 8 ++++---- kvdb-shared-tests/CHANGELOG.md | 4 ++++ kvdb-shared-tests/Cargo.toml | 4 ++-- kvdb/CHANGELOG.md | 7 +++++-- kvdb/Cargo.toml | 4 ++-- parity-util-mem/CHANGELOG.md | 8 ++++++++ parity-util-mem/Cargo.toml | 6 +++--- primitive-types/CHANGELOG.md | 5 +++++ primitive-types/Cargo.toml | 4 ++-- rlp/Cargo.toml | 4 ++-- uint/CHANGELOG.md | 8 ++++++-- uint/Cargo.toml | 2 +- 23 files changed, 83 insertions(+), 35 deletions(-) diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 7f46176fd..801efb666 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.13.0] - 2022-09-20 +- Updated `fixed-hash` to 0.8. [#680](https://github.com/paritytech/parity-common/pull/680) + ## [0.12.1] - 2022-02-07 - Updated `scale-info` to ">=1.0, <3". [#627](https://github.com/paritytech/parity-common/pull/627) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 4ef5bd6b8..0b9044b45 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.12.1" +version = "0.13.0" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT OR Apache-2.0" @@ -13,7 +13,7 @@ rust-version = "1.56.1" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } -fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false } +fixed-hash = { path = "../fixed-hash", version = "0.8", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.4", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 7e1829d81..43115efa2 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -6,6 +6,12 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.14.0] - 2022-09-20 +- Updated `fixed-hash` to 0.8. [#680](https://github.com/paritytech/parity-common/pull/680) +- Updated `primitive-types` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) +- Updated `ethbloom` to 0.13. [#680](https://github.com/paritytech/parity-common/pull/680) +- Made `ethbloom` optional. [#625](https://github.com/paritytech/parity-common/pull/625) + ## [0.13.1] - 2022-02-07 - Updated `scale-info` to ">=1.0, <3". [#627](https://github.com/paritytech/parity-common/pull/627) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 20f132b6f..68c76036c 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.13.1" +version = "0.14.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -9,10 +9,10 @@ edition = "2021" rust-version = "1.60.0" [dependencies] -ethbloom = { path = "../ethbloom", version = "0.12", optional = true, default-features = false } -fixed-hash = { path = "../fixed-hash", version = "0.7", default-features = false, features = ["byteorder", "rustc-hex"] } +ethbloom = { path = "../ethbloom", version = "0.13", optional = true, default-features = false } +fixed-hash = { path = "../fixed-hash", version = "0.8", default-features = false, features = ["byteorder", "rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.9", default-features = false } -primitive-types = { path = "../primitive-types", version = "0.11", features = ["byteorder", "rustc-hex"], default-features = false } +primitive-types = { path = "../primitive-types", version = "0.12", features = ["byteorder", "rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.4.0", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } diff --git a/fixed-hash/CHANGELOG.md b/fixed-hash/CHANGELOG.md index 8fe709b9f..89fcb1c3d 100644 --- a/fixed-hash/CHANGELOG.md +++ b/fixed-hash/CHANGELOG.md @@ -5,7 +5,12 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.8.0] - 2022-09-20 +### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `arbitrary` to 1.0. [#530](https://github.com/paritytech/parity-common/pull/530) +- Updated `quickcheck` to 1.0. [#674](https://github.com/paritytech/parity-common/pull/674) ## [0.7.0] - 2021-01-05 ### Breaking diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 7ce60cd58..f878d513a 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fixed-hash" -version = "0.7.0" +version = "0.8.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index 7e9373f67..24d554a21 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.12.0] - 2022-09-20 +### Breaking +- Updated `parity-util-mem` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) + ## [0.9.0] - 2022-02-04 ### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index c5133a307..d5b5797a8 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "keccak-hash" -version = "0.9.0" +version = "0.10.0" description = "`keccak-hash` is a set of utility functions to facilitate working with Keccak hashes (256/512 bits long)." authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" @@ -11,7 +11,7 @@ rust-version = "1.56.1" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } -primitive-types = { path = "../primitive-types", version = "0.11", default-features = false } +primitive-types = { path = "../primitive-types", version = "0.12", default-features = false } [dev-dependencies] tempfile = "3.1.0" diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index c9b3e5c86..f90495763 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -5,8 +5,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.12.0] - 2022-09-20 ### Breaking - Updated `kvdb` to 0.12. [662](https://github.com/paritytech/parity-common/pull/662) +- Updated `parity-util-mem` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) ## [0.11.0] - 2022-02-04 ### Breaking diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 6f7767956..78ac0038c 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.11.0" +version = "0.12.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -9,12 +9,12 @@ edition = "2021" rust-version = "1.56.1" [dependencies] -parity-util-mem = { path = "../parity-util-mem", version = "0.11", default-features = false, features = ["std"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.12", default-features = false, features = ["std"] } parking_lot = "0.12.0" -kvdb = { version = "0.11", path = "../kvdb" } +kvdb = { version = "0.12", path = "../kvdb" } [dev-dependencies] -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.9" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.10" } [features] default = [] diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 1143ee90a..6f289070a 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -5,9 +5,12 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -- Removed `owning_ref` from dependencies :tada:. [662](https://github.com/paritytech/parity-common/pull/662) + +## [0.16.0] - 2022-09-20 +- Removed `owning_ref` from dependencies :tada:. [#662](https://github.com/paritytech/parity-common/pull/662) +- No longer attempt to repair on `open`. [#667](https://github.com/paritytech/parity-common/pull/667) ### Breaking -- Update `kvdb` to 0.12. [662](https://github.com/paritytech/parity-common/pull/662) +- Updated `kvdb` to 0.12. [#662](https://github.com/paritytech/parity-common/pull/662) - `add_column` and `remove_last_column` now require `&mut self` ## [0.15.2] - 2022-03-20 diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index b41bd7d38..3337fdfa8 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.15.2" +version = "0.16.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -14,12 +14,12 @@ harness = false [dependencies] smallvec = "1.0.0" -kvdb = { path = "../kvdb", version = "0.11" } +kvdb = { path = "../kvdb", version = "0.12" } log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.12.0" regex = "1.3.1" -parity-util-mem = { path = "../parity-util-mem", version = "0.11", default-features = false, features = ["std", "smallvec"] } +parity-util-mem = { path = "../parity-util-mem", version = "0.12", default-features = false, features = ["std", "smallvec"] } # OpenBSD and MSVC are unteested and shouldn't enable jemalloc: # https://github.com/tikv/jemallocator/blob/52de4257fab3e770f73d5174c12a095b49572fba/jemalloc-sys/build.rs#L26-L27 @@ -37,7 +37,7 @@ version = "0.19.0" alloc_counter = "0.0.4" criterion = "0.4" ethereum-types = { path = "../ethereum-types" } -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.9" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.10" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } diff --git a/kvdb-shared-tests/CHANGELOG.md b/kvdb-shared-tests/CHANGELOG.md index 197bfd68f..6085d10ad 100644 --- a/kvdb-shared-tests/CHANGELOG.md +++ b/kvdb-shared-tests/CHANGELOG.md @@ -5,7 +5,11 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.10.0] - 2022-09-20 +### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `kvdb` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) ### Breaking - Updated `kvdb` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index 1ab98c5ef..2766290f9 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-shared-tests" -version = "0.9.0" +version = "0.10.0" authors = ["Parity Technologies "] edition = "2021" rust-version = "1.56.1" @@ -8,4 +8,4 @@ description = "Shared tests for kvdb functionality, to be executed against actua license = "MIT OR Apache-2.0" [dependencies] -kvdb = { path = "../kvdb", version = "0.11" } +kvdb = { path = "../kvdb", version = "0.12" } diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index 05823f1bc..d6e7b9c05 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -5,12 +5,15 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.12.0] - 2022-09-20 ### Breaking -- Removed `fn restore` from `KeyValueDB` trait. [662](https://github.com/paritytech/parity-common/pull/662) -- Streamlined API. [661](https://github.com/paritytech/parity-common/pull/661) +- Removed `fn restore` from `KeyValueDB` trait. [#662](https://github.com/paritytech/parity-common/pull/662) +- Streamlined API. [#661](https://github.com/paritytech/parity-common/pull/661) - `fn get_by_prefix` return type changed to `io::Result>` - `fn has_prefix` return type changed to `io::Result` - Iterator item changed to `io::Result` +- Updated `parity-util-mem` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) ## [0.11.0] - 2022-02-04 ### Breaking diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 4a805e026..552d0149e 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.11.0" +version = "0.12.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" @@ -10,4 +10,4 @@ rust-version = "1.56.1" [dependencies] smallvec = "1.0.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.11", default-features = false } +parity-util-mem = { path = "../parity-util-mem", version = "0.12", default-features = false } diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md index 4f0d1fa87..f7b73a4ee 100644 --- a/parity-util-mem/CHANGELOG.md +++ b/parity-util-mem/CHANGELOG.md @@ -6,6 +6,14 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.12.0] - 2022-09-20 +### Breaking +- Updated `tikv-jemallocator` to 0.5. [#661](https://github.com/paritytech/parity-common/pull/661) +- Updated `lru` to 0.8. [#675](https://github.com/paritytech/parity-common/pull/675) +- Removed `weealloc-global` feature. [#678](https://github.com/paritytech/parity-common/pull/678) +- Updated `primitive-types` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) +- Updated `ethereum-types` to 0.14. [#680](https://github.com/paritytech/parity-common/pull/680) + ## [0.11.0] - 2022-02-04 ### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml index 9032d941b..4e9e8ccfa 100644 --- a/parity-util-mem/Cargo.toml +++ b/parity-util-mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "parity-util-mem" -version = "0.11.0" +version = "0.12.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Collection of memory related utilities" @@ -26,9 +26,9 @@ parity-util-mem-derive = { path = "derive", version = "0.1" } impl-trait-for-tuples = "0.2.0" smallvec = { version = "1.0.0", optional = true } -ethereum-types = { version = "0.13.0", optional = true, path = "../ethereum-types" } +ethereum-types = { version = "0.14.0", optional = true, path = "../ethereum-types" } parking_lot = { version = "0.12.0", optional = true } -primitive-types = { version = "0.11", path = "../primitive-types", default-features = false, optional = true } +primitive-types = { version = "0.12", path = "../primitive-types", default-features = false, optional = true } [target.'cfg(target_os = "windows")'.dependencies] winapi = { version = "0.3.8", features = ["heapapi"] } diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index e02524e30..fdaa89e19 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -6,6 +6,11 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.12.0] - 2022-09-20 +### Breaking +- Updated `fixed-hash` to 0.8. [#680](https://github.com/paritytech/parity-common/pull/680) +- Uses weak-dependency feature of cargo. [#664](https://github.com/paritytech/parity-common/pull/664) + ## [0.11.1] - 2022-02-07 - Updated `scale-info` to ">=0.9, <3". [#627](https://github.com/paritytech/parity-common/pull/627) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 5eb5ec049..bfbc9c490 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.11.1" +version = "0.12.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -9,7 +9,7 @@ edition = "2021" rust-version = "1.60.0" [dependencies] -fixed-hash = { version = "0.7", path = "../fixed-hash", default-features = false } +fixed-hash = { version = "0.8", path = "../fixed-hash", default-features = false } uint = { version = "0.9.0", path = "../uint", default-features = false } impl-serde = { version = "0.4.0", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "impls/codec", default-features = false, optional = true } diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index c1a4ce7a3..234e7dd74 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -11,12 +11,12 @@ rust-version = "1.56.1" [dependencies] bytes = { version = "1", default-features = false } rustc-hex = { version = "2.0.1", default-features = false } -rlp-derive = { version = "0.1", path = "../rlp-derive", optional = true } +rlp-derive = { version = "0.1", path = "../rlp-derive", optional = true } [dev-dependencies] criterion = "0.4.0" hex-literal = "0.3.1" -primitive-types = { path = "../primitive-types", version = "0.11", features = ["impl-rlp"] } +primitive-types = { path = "../primitive-types", version = "0.12", features = ["impl-rlp"] } [features] default = ["std"] diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 359b6e443..8e4690719 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -5,8 +5,12 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -- Make `one` const. [#650](https://github.com/paritytech/parity-common/pull/650) -- Make `max_value` const. [#652](https://github.com/paritytech/parity-common/pull/652) + +## [0.9.4] - 2022-09-20 +- Made `one` const. [#650](https://github.com/paritytech/parity-common/pull/650) +- Made `max_value` const. [#652](https://github.com/paritytech/parity-common/pull/652) +- Made `is_zero` const. [#639](https://github.com/paritytech/parity-common/pull/639) +- Added `abs_diff`. [#665](https://github.com/paritytech/parity-common/pull/665) ## [0.9.3] - 2022-02-04 - Simplified and faster `div_mod`. [#478](https://github.com/paritytech/parity-common/pull/478) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 2f2852916..0e4ed8aea 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -4,7 +4,7 @@ homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" name = "uint" -version = "0.9.3" +version = "0.9.4" authors = ["Parity Technologies "] readme = "README.md" edition = "2021" From baebea7c3c0f90d825b7024a5ffe97870f819317 Mon Sep 17 00:00:00 2001 From: Artem Vorotnikov Date: Fri, 30 Sep 2022 20:37:40 +0500 Subject: [PATCH 286/359] Add if_ethbloom conditional macro (#682) --- ethereum-types/src/lib.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/ethereum-types/src/lib.rs b/ethereum-types/src/lib.rs index ee4dccc75..f59a61a11 100644 --- a/ethereum-types/src/lib.rs +++ b/ethereum-types/src/lib.rs @@ -20,3 +20,19 @@ pub type Address = H160; pub type Secret = H256; pub type Public = H512; pub type Signature = H520; + +/// Conditional compilation depending on whether ethereum-types is built with ethbloom support. +#[cfg(feature = "ethbloom")] +#[macro_export] +macro_rules! if_ethbloom { + ($($tt:tt)*) => { + $($tt)* + }; +} + +#[cfg(not(feature = "ethbloom"))] +#[macro_export] +#[doc(hidden)] +macro_rules! if_ethbloom { + ($($tt:tt)*) => {}; +} From 7b10e2b1eefac7db9715b9edcaa39ed8bf966dbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Oct 2022 11:17:28 +0200 Subject: [PATCH 287/359] build(deps): bump Swatinem/rust-cache from 2.0.0 to 2.0.1 (#683) * build(deps): bump Swatinem/rust-cache from 2.0.0 to 2.0.1 Bumps [Swatinem/rust-cache](https://github.com/Swatinem/rust-cache) from 2.0.0 to 2.0.1. - [Release notes](https://github.com/Swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/Swatinem/rust-cache/compare/v2.0.0...v2.0.1) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * add hash Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sergejs Kostjucenko --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d6a8de07f..577c1a42e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@v2.0.0 + uses: Swatinem/rust-cache@22c9328bcba27aa81a32b1bef27c7e3c78052531 # v2.0.1 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@v2.0.0 + uses: Swatinem/rust-cache@22c9328bcba27aa81a32b1bef27c7e3c78052531 # v2.0.1 - run: rustup target add wasm32-unknown-unknown @@ -138,7 +138,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@v2.0.0 + uses: Swatinem/rust-cache@22c9328bcba27aa81a32b1bef27c7e3c78052531 # v2.0.1 - uses: actions-rs/cargo@v1 with: From 3c807b51efdf2e8959b803f41f62eb6788766ac8 Mon Sep 17 00:00:00 2001 From: Andronik Date: Fri, 21 Oct 2022 11:53:37 +0200 Subject: [PATCH 288/359] rlp: release 0.5.2 (#686) * rlp: release 0.5.2 * Revert "rlp: release 0.5.2" This reverts commit 86676b08e89cb99fdff0c882453ad0746dd070cf. * rlp: release 0.5.2 without 2021 edition changes --- rlp/CHANGELOG.md | 2 ++ rlp/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index 072ace3f6..0d5e343ac 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -6,6 +6,8 @@ The format is based on [Keep a Changelog]. ## [Unreleased] - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) + +## [0.5.2] - 2022-10-21 - Add optional `derive` feature. [#613](https://github.com/paritytech/parity-common/pull/613) ## [0.5.1] - 2021-07-30 diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 234e7dd74..c61433ecd 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rlp" -version = "0.5.1" +version = "0.5.2" description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" From 26c1423bacac3c248c45ea058cdee213d23adfb3 Mon Sep 17 00:00:00 2001 From: drskalman <35698397+drskalman@users.noreply.github.com> Date: Thu, 27 Oct 2022 09:37:38 +0000 Subject: [PATCH 289/359] add `H384` and `H768` hash types (#684) * add H384 and H768 hash types * implement `impl-serde`, `impl-codec`, `impl-rlp` for `H384` and `H768` types. * remove redundant double impl of serde for H384 --- primitive-types/src/lib.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index b2f019802..dd372a9eb 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -62,11 +62,21 @@ construct_fixed_hash! { #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct H256(32); } +construct_fixed_hash! { + /// Fixed-size uninterpreted hash type with 48 bytes (384 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] + pub struct H384(48); +} construct_fixed_hash! { /// Fixed-size uninterpreted hash type with 64 bytes (512 bits) size. #[cfg_attr(feature = "scale-info", derive(TypeInfo))] pub struct H512(64); } +construct_fixed_hash! { + /// Fixed-size uninterpreted hash type with 96 bytes (768 bits) size. + #[cfg_attr(feature = "scale-info", derive(TypeInfo))] + pub struct H768(96); +} #[cfg(feature = "num-traits")] mod num_traits { @@ -90,7 +100,9 @@ mod serde { impl_fixed_hash_serde!(H128, 16); impl_fixed_hash_serde!(H160, 20); impl_fixed_hash_serde!(H256, 32); + impl_fixed_hash_serde!(H384, 48); impl_fixed_hash_serde!(H512, 64); + impl_fixed_hash_serde!(H768, 96); } #[cfg(feature = "impl-codec")] @@ -105,7 +117,9 @@ mod codec { impl_fixed_hash_codec!(H128, 16); impl_fixed_hash_codec!(H160, 20); impl_fixed_hash_codec!(H256, 32); + impl_fixed_hash_codec!(H384, 48); impl_fixed_hash_codec!(H512, 64); + impl_fixed_hash_codec!(H768, 96); } #[cfg(feature = "impl-rlp")] @@ -120,7 +134,9 @@ mod rlp { impl_fixed_hash_rlp!(H128, 16); impl_fixed_hash_rlp!(H160, 20); impl_fixed_hash_rlp!(H256, 32); + impl_fixed_hash_rlp!(H384, 48); impl_fixed_hash_rlp!(H512, 64); + impl_fixed_hash_rlp!(H768, 96); } impl_fixed_hash_conversions!(H256, H160); From 09371a1c63e315c9c390a9c761f1863a5b97be47 Mon Sep 17 00:00:00 2001 From: Andronik Date: Thu, 27 Oct 2022 16:12:01 +0200 Subject: [PATCH 290/359] primitive-types: release 0.12.1 (#688) --- primitive-types/CHANGELOG.md | 3 +++ primitive-types/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index fdaa89e19..e33e0cd5f 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.12.1] - 2022-20-27 +- Added `H384` and `H768` types. [#684](https://github.com/paritytech/parity-common/pull/684) + ## [0.12.0] - 2022-09-20 ### Breaking - Updated `fixed-hash` to 0.8. [#680](https://github.com/paritytech/parity-common/pull/680) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index bfbc9c490..ea159ade9 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.12.0" +version = "0.12.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" From 0373dec4e658819051b22a4e7f0acd8603d066e0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Nov 2022 17:09:35 +0100 Subject: [PATCH 291/359] build(deps): bump Swatinem/rust-cache from 2.0.1 to 2.0.2 (#689) Bumps [Swatinem/rust-cache](https://github.com/Swatinem/rust-cache) from 2.0.1 to 2.0.2. - [Release notes](https://github.com/Swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/Swatinem/rust-cache/compare/22c9328bcba27aa81a32b1bef27c7e3c78052531...b5ec9edd911d3bf82c74038b0a28791e0aa24d6f) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 577c1a42e..5551c2908 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@22c9328bcba27aa81a32b1bef27c7e3c78052531 # v2.0.1 + uses: Swatinem/rust-cache@b5ec9edd911d3bf82c74038b0a28791e0aa24d6f # v2.0.2 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@22c9328bcba27aa81a32b1bef27c7e3c78052531 # v2.0.1 + uses: Swatinem/rust-cache@b5ec9edd911d3bf82c74038b0a28791e0aa24d6f # v2.0.2 - run: rustup target add wasm32-unknown-unknown @@ -138,7 +138,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@22c9328bcba27aa81a32b1bef27c7e3c78052531 # v2.0.1 + uses: Swatinem/rust-cache@b5ec9edd911d3bf82c74038b0a28791e0aa24d6f # v2.0.2 - uses: actions-rs/cargo@v1 with: From 82b21476e7fbcbb83dbb0181bab46c88735b3e7c Mon Sep 17 00:00:00 2001 From: halo3mic <46010359+halo3mic@users.noreply.github.com> Date: Mon, 7 Nov 2022 15:37:16 +0100 Subject: [PATCH 292/359] Uint: Add support for bit and,or,xor assign traits (#690) * Uint: Add support for bit and,or,xor assign traits * Add tests for bit-assign * satisfy rustfmt --- uint/src/uint.rs | 18 ++++++++++++++++++ uint/tests/uint_tests.rs | 41 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 728957dcc..2366543f4 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -1508,6 +1508,12 @@ macro_rules! construct_uint { } } + impl $crate::core_::ops::BitAndAssign<$name> for $name { + fn bitand_assign(&mut self, rhs: $name) { + *self = *self & rhs; + } + } + impl $crate::core_::ops::BitXor<$name> for $name { type Output = $name; @@ -1523,6 +1529,12 @@ macro_rules! construct_uint { } } + impl $crate::core_::ops::BitXorAssign<$name> for $name { + fn bitxor_assign(&mut self, rhs: $name) { + *self = *self ^ rhs; + } + } + impl $crate::core_::ops::BitOr<$name> for $name { type Output = $name; @@ -1538,6 +1550,12 @@ macro_rules! construct_uint { } } + impl $crate::core_::ops::BitOrAssign<$name> for $name { + fn bitor_assign(&mut self, rhs: $name) { + *self = *self | rhs; + } + } + impl $crate::core_::ops::Not for $name { type Output = $name; diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index 9ef7f2e19..a830e488e 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -1151,6 +1151,47 @@ fn trailing_zeros() { assert_eq!(U256::from("0000000000000000000000000000000000000000000000000000000000000000").trailing_zeros(), 256); } +#[test] +fn bit_assign() { + fn check(a: U256, b: U256) { + // and + { + let mut x = a; + x &= b; + assert_eq!(x, a & b); + } + // or + { + let mut x = a; + x |= b; + assert_eq!(x, a | b); + } + // xor + { + let mut x = a; + x ^= b; + assert_eq!(x, a ^ b); + } + // shr + { + let mut x = a; + x >>= b; + assert_eq!(x, a >> b); + } + // shl + { + let mut x = a; + x <<= b; + assert_eq!(x, a << b); + } + } + + check(U256::from(9), U256::from(999999)); + check(U256::from(0), U256::from(0)); + check(U256::from(23432), U256::from(u32::MAX)); + check(U256::MAX, U256::zero()); +} + #[cfg(feature = "quickcheck")] pub mod laws { use super::construct_uint; From bb5c7f19cc5cb65898871a072d680e857015e7e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Nov 2022 09:52:11 +0100 Subject: [PATCH 293/359] build(deps): bump Swatinem/rust-cache from 2.0.2 to 2.1.0 (#692) Bumps [Swatinem/rust-cache](https://github.com/Swatinem/rust-cache) from 2.0.2 to 2.1.0. - [Release notes](https://github.com/Swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/Swatinem/rust-cache/compare/b5ec9edd911d3bf82c74038b0a28791e0aa24d6f...b894d59a8d236e2979b247b80dac8d053ab340dd) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5551c2908..a9f91b74e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@b5ec9edd911d3bf82c74038b0a28791e0aa24d6f # v2.0.2 + uses: Swatinem/rust-cache@b894d59a8d236e2979b247b80dac8d053ab340dd # v2.1.0 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@b5ec9edd911d3bf82c74038b0a28791e0aa24d6f # v2.0.2 + uses: Swatinem/rust-cache@b894d59a8d236e2979b247b80dac8d053ab340dd # v2.1.0 - run: rustup target add wasm32-unknown-unknown @@ -138,7 +138,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@b5ec9edd911d3bf82c74038b0a28791e0aa24d6f # v2.0.2 + uses: Swatinem/rust-cache@b894d59a8d236e2979b247b80dac8d053ab340dd # v2.1.0 - uses: actions-rs/cargo@v1 with: From fc75eaad1bb6f1cc40d6b7aa6a3b9093a841eddc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Nov 2022 11:54:25 +0100 Subject: [PATCH 294/359] build(deps): bump Swatinem/rust-cache from 2.1.0 to 2.2.0 (#694) Bumps [Swatinem/rust-cache](https://github.com/Swatinem/rust-cache) from 2.1.0 to 2.2.0. - [Release notes](https://github.com/Swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/Swatinem/rust-cache/compare/b894d59a8d236e2979b247b80dac8d053ab340dd...359a70e43a0bb8a13953b04a90f76428b4959bb6) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a9f91b74e..6af8dc363 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@b894d59a8d236e2979b247b80dac8d053ab340dd # v2.1.0 + uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # v2.2.0 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@b894d59a8d236e2979b247b80dac8d053ab340dd # v2.1.0 + uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # v2.2.0 - run: rustup target add wasm32-unknown-unknown @@ -138,7 +138,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@b894d59a8d236e2979b247b80dac8d053ab340dd # v2.1.0 + uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # v2.2.0 - uses: actions-rs/cargo@v1 with: From 806ca48a95c06014e0fde74a1382dc128da62206 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 29 Nov 2022 09:53:58 -0500 Subject: [PATCH 295/359] Remove `parity-util-mem` (#696) * Remove `parity-util-mem` * Remove reference to `parity-util-mem` from CI --- .github/workflows/ci.yml | 20 -------------------- kvdb-memorydb/Cargo.toml | 1 - kvdb-memorydb/src/lib.rs | 3 +-- kvdb-rocksdb/Cargo.toml | 1 - kvdb-rocksdb/examples/memtest.rs | 1 - kvdb-rocksdb/src/lib.rs | 29 ----------------------------- kvdb/Cargo.toml | 1 - kvdb/src/lib.rs | 2 +- 8 files changed, 2 insertions(+), 56 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6af8dc363..1fa9c2746 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -106,26 +106,6 @@ jobs: command: test args: -p uint --target=mips64-unknown-linux-gnuabi64 - - name: Test parity-util-mem on Android - if: runner.os == 'Linux' - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: test - args: -p parity-util-mem --target=aarch64-linux-android - - - name: Test parity-util-mem estimate-heapsize - run: cargo test -p parity-util-mem --features='estimate-heapsize' - - - name: Test parity-util-mem jemalloc-global - run: cargo test -p parity-util-mem --features='jemalloc-global' - - - name: Test parity-util-mem mimalloc-global - run: cargo test -p parity-util-mem --features='mimalloc-global' - - - name: Test parity-util-mem dlmalloc-global - run: cargo test -p parity-util-mem --no-default-features --features='dlmalloc-global' - test_windows: name: Test Windows runs-on: windows-latest diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index 78ac0038c..ca52d8a1b 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -9,7 +9,6 @@ edition = "2021" rust-version = "1.56.1" [dependencies] -parity-util-mem = { path = "../parity-util-mem", version = "0.12", default-features = false, features = ["std"] } parking_lot = "0.12.0" kvdb = { version = "0.12", path = "../kvdb" } diff --git a/kvdb-memorydb/src/lib.rs b/kvdb-memorydb/src/lib.rs index a6de728e2..67773b1ac 100644 --- a/kvdb-memorydb/src/lib.rs +++ b/kvdb-memorydb/src/lib.rs @@ -7,7 +7,6 @@ // except according to those terms. use kvdb::{DBKeyValue, DBOp, DBTransaction, DBValue, KeyValueDB}; -use parity_util_mem::MallocSizeOf; use parking_lot::RwLock; use std::{ collections::{BTreeMap, HashMap}, @@ -16,7 +15,7 @@ use std::{ /// A key-value database fulfilling the `KeyValueDB` trait, living in memory. /// This is generally intended for tests and is not particularly optimized. -#[derive(Default, MallocSizeOf)] +#[derive(Default)] pub struct InMemory { columns: RwLock, DBValue>>>, } diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 3337fdfa8..a932b34ad 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -19,7 +19,6 @@ log = "0.4.8" num_cpus = "1.10.1" parking_lot = "0.12.0" regex = "1.3.1" -parity-util-mem = { path = "../parity-util-mem", version = "0.12", default-features = false, features = ["std", "smallvec"] } # OpenBSD and MSVC are unteested and shouldn't enable jemalloc: # https://github.com/tikv/jemallocator/blob/52de4257fab3e770f73d5174c12a095b49572fba/jemalloc-sys/build.rs#L26-L27 diff --git a/kvdb-rocksdb/examples/memtest.rs b/kvdb-rocksdb/examples/memtest.rs index 30923fb40..e41521bd5 100644 --- a/kvdb-rocksdb/examples/memtest.rs +++ b/kvdb-rocksdb/examples/memtest.rs @@ -145,7 +145,6 @@ fn main() { println!("{}", timestamp); println!("\tData written: {} keys - {} Mb", step + 1, ((step + 1) * 64 * 128) / 1024 / 1024); println!("\tProcess memory used as seen by the OS: {} Mb", proc_memory_usage() / 1024); - println!("\tMemory used as reported by rocksdb: {} Mb\n", parity_util_mem::malloc_size(&db) / 1024 / 1024); } step += 1; diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index beb137f56..487cd578b 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -16,7 +16,6 @@ use std::{ path::{Path, PathBuf}, }; -use parity_util_mem::MallocSizeOf; use rocksdb::{ BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Options, ReadOptions, WriteBatch, WriteOptions, DB, }; @@ -252,26 +251,6 @@ struct DBAndColumns { column_names: Vec, } -impl MallocSizeOf for DBAndColumns { - fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { - let mut total = self.column_names.size_of(ops) - // we have at least one column always, so we can call property on it - + self.cf(0).map(|cf| self.db - .property_int_value_cf(cf, "rocksdb.block-cache-usage") - .unwrap_or(Some(0)) - .map(|x| x as usize) - .unwrap_or(0) - ).unwrap_or(0); - - for v in 0..self.column_names.len() { - total += self.static_property_or_warn(v, "rocksdb.estimate-table-readers-mem"); - total += self.static_property_or_warn(v, "rocksdb.cur-size-all-mem-tables"); - } - - total - } -} - impl DBAndColumns { fn cf(&self, i: usize) -> io::Result<&ColumnFamily> { let name = self.column_names.get(i).ok_or_else(|| invalid_column(i as u32))?; @@ -299,22 +278,14 @@ impl DBAndColumns { } /// Key-Value database. -#[derive(MallocSizeOf)] pub struct Database { inner: DBAndColumns, - #[ignore_malloc_size_of = "insignificant"] config: DatabaseConfig, - #[ignore_malloc_size_of = "insignificant"] path: PathBuf, - #[ignore_malloc_size_of = "insignificant"] opts: Options, - #[ignore_malloc_size_of = "insignificant"] write_opts: WriteOptions, - #[ignore_malloc_size_of = "insignificant"] read_opts: ReadOptions, - #[ignore_malloc_size_of = "insignificant"] block_opts: BlockBasedOptions, - #[ignore_malloc_size_of = "insignificant"] stats: stats::RunningDbStats, } diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index 552d0149e..a926dffcb 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -10,4 +10,3 @@ rust-version = "1.56.1" [dependencies] smallvec = "1.0.0" -parity-util-mem = { path = "../parity-util-mem", version = "0.12", default-features = false } diff --git a/kvdb/src/lib.rs b/kvdb/src/lib.rs index 155a9ef73..f44461cf0 100644 --- a/kvdb/src/lib.rs +++ b/kvdb/src/lib.rs @@ -104,7 +104,7 @@ impl DBTransaction { /// /// The API laid out here, along with the `Sync` bound implies interior synchronization for /// implementation. -pub trait KeyValueDB: Sync + Send + parity_util_mem::MallocSizeOf { +pub trait KeyValueDB: Sync + Send { /// Helper to create a new transaction. fn transaction(&self) -> DBTransaction { DBTransaction::new() From c41d51df8a314150c46cbbff31f8140a35dfb02c Mon Sep 17 00:00:00 2001 From: ordian Date: Tue, 29 Nov 2022 18:18:24 +0100 Subject: [PATCH 296/359] release is coming (#697) * update ethereum-types and uint changelogs * purge parity-util-mem * cargo set-version --bump minor -p kvdb * cargo set-version --bump patch -p uint * cargo set-version --bump patch -p ethereum-types * kvdb-rocksdb: rm unused path field * kvdb-rocksdb: rm unused method * update kvdb&co * rm extra newline --- Cargo.toml | 2 - ethereum-types/CHANGELOG.md | 3 + ethereum-types/Cargo.toml | 2 +- kvdb-memorydb/CHANGELOG.md | 3 + kvdb-memorydb/Cargo.toml | 6 +- kvdb-rocksdb/CHANGELOG.md | 3 + kvdb-rocksdb/Cargo.toml | 7 +- kvdb-rocksdb/src/lib.rs | 47 - kvdb-shared-tests/CHANGELOG.md | 3 + kvdb-shared-tests/Cargo.toml | 4 +- kvdb/CHANGELOG.md | 4 + kvdb/Cargo.toml | 2 +- parity-util-mem/CHANGELOG.md | 92 -- parity-util-mem/Cargo.toml | 56 -- parity-util-mem/README.md | 31 - parity-util-mem/build.rs | 1 - parity-util-mem/derive/CHANGELOG.md | 8 - parity-util-mem/derive/Cargo.toml | 18 - parity-util-mem/derive/lib.rs | 77 -- parity-util-mem/src/allocators.rs | 147 --- parity-util-mem/src/ethereum_impls.rs | 14 - parity-util-mem/src/lib.rs | 150 --- parity-util-mem/src/malloc_size.rs | 911 ------------------- parity-util-mem/src/memory_stats_jemalloc.rs | 32 - parity-util-mem/src/memory_stats_noop.rs | 31 - parity-util-mem/src/primitives_impls.rs | 26 - parity-util-mem/src/sizeof.rs | 51 -- parity-util-mem/tests/derive.rs | 85 -- primitive-types/Cargo.toml | 2 +- primitive-types/impls/num-traits/Cargo.toml | 2 +- primitive-types/impls/serde/Cargo.toml | 2 +- uint/CHANGELOG.md | 3 + uint/Cargo.toml | 2 +- 33 files changed, 33 insertions(+), 1794 deletions(-) delete mode 100644 parity-util-mem/CHANGELOG.md delete mode 100644 parity-util-mem/Cargo.toml delete mode 100644 parity-util-mem/README.md delete mode 100644 parity-util-mem/build.rs delete mode 100644 parity-util-mem/derive/CHANGELOG.md delete mode 100644 parity-util-mem/derive/Cargo.toml delete mode 100644 parity-util-mem/derive/lib.rs delete mode 100644 parity-util-mem/src/allocators.rs delete mode 100644 parity-util-mem/src/ethereum_impls.rs delete mode 100644 parity-util-mem/src/lib.rs delete mode 100644 parity-util-mem/src/malloc_size.rs delete mode 100644 parity-util-mem/src/memory_stats_jemalloc.rs delete mode 100644 parity-util-mem/src/memory_stats_noop.rs delete mode 100644 parity-util-mem/src/primitives_impls.rs delete mode 100644 parity-util-mem/src/sizeof.rs delete mode 100644 parity-util-mem/tests/derive.rs diff --git a/Cargo.toml b/Cargo.toml index fcd164017..60011793a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,9 +10,7 @@ members = [ "rlp", "rlp-derive", "uint", - "parity-util-mem", "primitive-types", "ethereum-types", "ethbloom", - "parity-util-mem/derive" ] diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 43115efa2..1fc54e5e6 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.14.1] - 2022-11-29 +- Added `if_ethbloom` conditional macro. [#682](https://github.com/paritytech/parity-common/pull/682) + ## [0.14.0] - 2022-09-20 - Updated `fixed-hash` to 0.8. [#680](https://github.com/paritytech/parity-common/pull/680) - Updated `primitive-types` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 68c76036c..b5a5302ec 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.14.0" +version = "0.14.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/kvdb-memorydb/CHANGELOG.md b/kvdb-memorydb/CHANGELOG.md index f90495763..c8f3c12fe 100644 --- a/kvdb-memorydb/CHANGELOG.md +++ b/kvdb-memorydb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.13.0] - 2022-11-29 +- Removed `parity-util-mem` support. [#696](https://github.com/paritytech/parity-common/pull/696) + ## [0.12.0] - 2022-09-20 ### Breaking - Updated `kvdb` to 0.12. [662](https://github.com/paritytech/parity-common/pull/662) diff --git a/kvdb-memorydb/Cargo.toml b/kvdb-memorydb/Cargo.toml index ca52d8a1b..3b82d5fda 100644 --- a/kvdb-memorydb/Cargo.toml +++ b/kvdb-memorydb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-memorydb" -version = "0.12.0" +version = "0.13.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "A key-value in-memory database that implements the `KeyValueDB` trait" @@ -10,10 +10,10 @@ rust-version = "1.56.1" [dependencies] parking_lot = "0.12.0" -kvdb = { version = "0.12", path = "../kvdb" } +kvdb = { version = "0.13", path = "../kvdb" } [dev-dependencies] -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.10" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.11" } [features] default = [] diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 6f289070a..32bbd3810 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.17.0] - 2022-11-29 +- Removed `parity-util-mem` support. [#696](https://github.com/paritytech/parity-common/pull/696) + ## [0.16.0] - 2022-09-20 - Removed `owning_ref` from dependencies :tada:. [#662](https://github.com/paritytech/parity-common/pull/662) - No longer attempt to repair on `open`. [#667](https://github.com/paritytech/parity-common/pull/667) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index a932b34ad..5e920fee5 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.16.0" +version = "0.17.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -14,8 +14,7 @@ harness = false [dependencies] smallvec = "1.0.0" -kvdb = { path = "../kvdb", version = "0.12" } -log = "0.4.8" +kvdb = { path = "../kvdb", version = "0.13" } num_cpus = "1.10.1" parking_lot = "0.12.0" regex = "1.3.1" @@ -36,7 +35,7 @@ version = "0.19.0" alloc_counter = "0.0.4" criterion = "0.4" ethereum-types = { path = "../ethereum-types" } -kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.10" } +kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.11" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 487cd578b..5c231413d 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -21,7 +21,6 @@ use rocksdb::{ }; use kvdb::{DBKeyValue, DBOp, DBTransaction, DBValue, KeyValueDB}; -use log::warn; #[cfg(target_os = "linux")] use regex::Regex; @@ -258,30 +257,12 @@ impl DBAndColumns { .cf_handle(&name) .ok_or_else(|| other_io_err(format!("invalid column name: {name}"))) } - - fn static_property_or_warn(&self, col: usize, prop: &str) -> usize { - let cf = match self.cf(col) { - Ok(cf) => cf, - Err(_) => { - warn!("RocksDB column index out of range: {}", col); - return 0 - }, - }; - match self.db.property_int_value_cf(cf, prop) { - Ok(Some(v)) => v as usize, - _ => { - warn!("Cannot read expected static property of RocksDb database: {}", prop); - 0 - }, - } - } } /// Key-Value database. pub struct Database { inner: DBAndColumns, config: DatabaseConfig, - path: PathBuf, opts: Options, write_opts: WriteOptions, read_opts: ReadOptions, @@ -372,7 +353,6 @@ impl Database { Ok(Database { inner: DBAndColumns { db, column_names }, config: config.clone(), - path: path.as_ref().to_owned(), opts, read_opts, write_opts, @@ -757,33 +737,6 @@ mod tests { Ok(()) } - #[test] - fn mem_tables_size() { - let tempdir = TempfileBuilder::new().prefix("").tempdir().unwrap(); - - let config = DatabaseConfig { - max_open_files: 512, - memory_budget: HashMap::new(), - compaction: CompactionProfile::default(), - columns: 11, - keep_log_file_num: 1, - enable_statistics: false, - secondary: None, - max_total_wal_size: None, - create_if_missing: true, - }; - - let db = Database::open(&config, tempdir.path().to_str().unwrap()).unwrap(); - - let mut batch = db.transaction(); - for i in 0u32..10000u32 { - batch.put(i / 1000 + 1, &i.to_le_bytes(), &(i * 17).to_le_bytes()); - } - db.write(batch).unwrap(); - - assert!(db.inner.static_property_or_warn(0, "rocksdb.cur-size-all-mem-tables") > 512); - } - #[test] #[cfg(target_os = "linux")] fn df_to_rotational() { diff --git a/kvdb-shared-tests/CHANGELOG.md b/kvdb-shared-tests/CHANGELOG.md index 6085d10ad..767ffbcd9 100644 --- a/kvdb-shared-tests/CHANGELOG.md +++ b/kvdb-shared-tests/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.11.0] - 2022-11-29 +- Removed `parity-util-mem` support. [#696](https://github.com/paritytech/parity-common/pull/696) + ## [0.10.0] - 2022-09-20 ### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) diff --git a/kvdb-shared-tests/Cargo.toml b/kvdb-shared-tests/Cargo.toml index 2766290f9..6da808795 100644 --- a/kvdb-shared-tests/Cargo.toml +++ b/kvdb-shared-tests/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-shared-tests" -version = "0.10.0" +version = "0.11.0" authors = ["Parity Technologies "] edition = "2021" rust-version = "1.56.1" @@ -8,4 +8,4 @@ description = "Shared tests for kvdb functionality, to be executed against actua license = "MIT OR Apache-2.0" [dependencies] -kvdb = { path = "../kvdb", version = "0.12" } +kvdb = { path = "../kvdb", version = "0.13" } diff --git a/kvdb/CHANGELOG.md b/kvdb/CHANGELOG.md index d6e7b9c05..5e4305c6f 100644 --- a/kvdb/CHANGELOG.md +++ b/kvdb/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] + +## [0.13.0] - 2022-11-29 +- Removed `parity-util-mem` support. [#696](https://github.com/paritytech/parity-common/pull/696) + ## [0.12.0] - 2022-09-20 ### Breaking - Removed `fn restore` from `KeyValueDB` trait. [#662](https://github.com/paritytech/parity-common/pull/662) diff --git a/kvdb/Cargo.toml b/kvdb/Cargo.toml index a926dffcb..c66b024b8 100644 --- a/kvdb/Cargo.toml +++ b/kvdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb" -version = "0.12.0" +version = "0.13.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "Generic key-value trait" diff --git a/parity-util-mem/CHANGELOG.md b/parity-util-mem/CHANGELOG.md deleted file mode 100644 index f7b73a4ee..000000000 --- a/parity-util-mem/CHANGELOG.md +++ /dev/null @@ -1,92 +0,0 @@ -# Changelog - -The format is based on [Keep a Changelog]. - -[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ - -## [Unreleased] - -## [0.12.0] - 2022-09-20 -### Breaking -- Updated `tikv-jemallocator` to 0.5. [#661](https://github.com/paritytech/parity-common/pull/661) -- Updated `lru` to 0.8. [#675](https://github.com/paritytech/parity-common/pull/675) -- Removed `weealloc-global` feature. [#678](https://github.com/paritytech/parity-common/pull/678) -- Updated `primitive-types` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) -- Updated `ethereum-types` to 0.14. [#680](https://github.com/paritytech/parity-common/pull/680) - -## [0.11.0] - 2022-02-04 -### Breaking -- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) -- Updated `primitive-types` to 0.11. [#623](https://github.com/paritytech/parity-common/pull/623) -- Updated `ethereum-types` to 0.13. [#623](https://github.com/paritytech/parity-common/pull/623) -- Updated `lru` to 0.7. [#595](https://github.com/paritytech/parity-common/pull/595) -- Updated `parking_lot` to 0.12. [#619](https://github.com/paritytech/parity-common/pull/619) -- Updated `hashbrown` to 0.12. [#612](https://github.com/paritytech/parity-common/pull/612) - -## [0.10.2] - 2021-09-20 -- Switched from `jemallocator` to `tikv-jemallocator`. [#589](https://github.com/paritytech/parity-common/pull/589) - -## [0.10.1] - 2021-09-15 -- Added support for memory stats gathering, ported over from `polkadot`. [#588](https://github.com/paritytech/parity-common/pull/588) - -## [0.10.0] - 2021-07-02 -- Fixed `malloc_usable_size` for FreeBSD. [#553](https://github.com/paritytech/parity-common/pull/553) - -### Breaking -- Updated `ethereum-types` to 0.12. [#556](https://github.com/paritytech/parity-common/pull/556) -- Updated `primitive-types` to 0.10. [#556](https://github.com/paritytech/parity-common/pull/556) -- Updated `hashbrown` to 0.11. [#533](https://github.com/paritytech/parity-common/pull/533) - -## [0.9.0] - 2021-01-27 -### Breaking -- Updated `ethereum-types` to 0.11. [#510](https://github.com/paritytech/parity-common/pull/510) -- Updated `primitive-types` to 0.9. [#510](https://github.com/paritytech/parity-common/pull/510) - -## [0.8.0] - 2021-01-05 -- Updated dlmalloc to 0.2.1. [#452](https://github.com/paritytech/parity-common/pull/452) -### Breaking -- Updated `ethereum-types` to 0.10. [#463](https://github.com/paritytech/parity-common/pull/463) -- Updated `parking_lot` to 0.11.1. [#470](https://github.com/paritytech/parity-common/pull/470) - -## [0.7.0] - 2020-06-24 -- Added `const_size` to `MallocSizeOf` to optimize it for flat collections. [#398](https://github.com/paritytech/parity-common/pull/398) -- Exported `MallocShallowSizeOf`. [#399](https://github.com/paritytech/parity-common/pull/399) -- Updated dependencies. - -## [0.6.1] - 2020-04-15 -- Fix compilation on Windows for no-std. [#375](https://github.com/paritytech/parity-common/pull/375) -- Prevent multiple versions from being linked into the same program. [#363](https://github.com/paritytech/parity-common/pull/363) - -## [0.6.0] - 2020-03-13 -- Updated dependencies. [#361](https://github.com/paritytech/parity-common/pull/361) - -## [0.5.2] - 2020-03-13 -- License changed from GPL3 to dual MIT/Apache2. [#342](https://github.com/paritytech/parity-common/pull/342) -- Updated mimalloc dependency. [#352](https://github.com/paritytech/parity-common/pull/352) -- Use malloc for `usable_size` on Android. [#355](https://github.com/paritytech/parity-common/pull/355) - -## [0.5.1] - 2019-02-05 -- Add different mode for malloc_size_of_is_0 macro dealing with generics. [#334](https://github.com/paritytech/parity-common/pull/334) - -## [0.5.0] - 2019-02-05 -- Bump parking_lot to 0.10. [#332](https://github.com/paritytech/parity-common/pull/332) - -## [0.4.2] - 2020-02-04 -- Implementation of `MallocSizeOf` for `BTreeSet`. [#325](https://github.com/paritytech/parity-common/pull/325) -- Split off implementation of `MallocSizeOf` for `primitive-types`. [#323](https://github.com/paritytech/parity-common/pull/323) - -## [0.4.1] - 2020-01-06 -- Implementation of `MallocSizeOf` for SmallVec no longer requires ethereum `ethereum-impls` feature. [#307](https://github.com/paritytech/parity-common/pull/307) - -## [0.4.0] - 2020-01-01 -- Added implementation of `MallocSizeOf` for non-std `hashbrown::HashMap` and `lru::LRUMap`. [#293](https://github.com/paritytech/parity-common/pull/293) -- Introduced our own version of `#[derive(MallocSizeOf)]` [#291](https://github.com/paritytech/parity-common/pull/291) -- Added implementation of `MallocSizeOf` for `parking_lot` locking primitives. [#290](https://github.com/paritytech/parity-common/pull/290) -- Added default implementation of `MallocSizeOf` for tuples up to 12. [#300](https://github.com/paritytech/parity-common/pull/300) - -## [0.3.0] - 2019-12-19 -- Remove `MallocSizeOf` impls for `ElasticArray` and implement it for `SmallVec` (32 and 36). [#282](https://github.com/paritytech/parity-common/pull/282) - -## [0.2.1] - 2019-10-24 -### Dependencies -- Updated dependencies. [#239](https://github.com/paritytech/parity-common/pull/239) diff --git a/parity-util-mem/Cargo.toml b/parity-util-mem/Cargo.toml deleted file mode 100644 index 4e9e8ccfa..000000000 --- a/parity-util-mem/Cargo.toml +++ /dev/null @@ -1,56 +0,0 @@ -[package] -name = "parity-util-mem" -version = "0.12.0" -authors = ["Parity Technologies "] -repository = "https://github.com/paritytech/parity-common" -description = "Collection of memory related utilities" -license = "MIT OR Apache-2.0" -edition = "2021" -rust-version = "1.56.1" - -# Prevent multiple versions from being linked into the same program. -links = "parity-util-mem-ban-duplicates" -# `links` requires a build script to be present: -# https://doc.rust-lang.org/cargo/reference/build-scripts.html#the-links-manifest-key -# so we use an empty build script -build = "build.rs" - -[dependencies] -cfg-if = "1.0.0" -dlmalloc = { version = "0.2.1", features = ["global"], optional = true } -lru = { version = "0.8", optional = true } -hashbrown = { version = "0.12", optional = true } -mimalloc = { version = "0.1.18", optional = true } -libmimalloc-sys = { version = "0.1.14", optional = true } -parity-util-mem-derive = { path = "derive", version = "0.1" } -impl-trait-for-tuples = "0.2.0" - -smallvec = { version = "1.0.0", optional = true } -ethereum-types = { version = "0.14.0", optional = true, path = "../ethereum-types" } -parking_lot = { version = "0.12.0", optional = true } -primitive-types = { version = "0.12", path = "../primitive-types", default-features = false, optional = true } - -[target.'cfg(target_os = "windows")'.dependencies] -winapi = { version = "0.3.8", features = ["heapapi"] } - -[target.'cfg(not(target_os = "windows"))'.dependencies.tikv-jemallocator] -version = "0.5.0" -optional = true - -[target.'cfg(not(target_os = "windows"))'.dependencies.tikv-jemalloc-ctl] -version = "0.5.0" -optional = true - -[features] -default = ["std", "ethereum-impls", "lru", "hashbrown", "smallvec", "primitive-types"] -std = ["parking_lot"] -# use dlmalloc as global allocator -dlmalloc-global = ["dlmalloc", "estimate-heapsize"] -# use jemalloc as global allocator -jemalloc-global = ["tikv-jemallocator", "tikv-jemalloc-ctl"] -# use mimalloc as global allocator -mimalloc-global = ["mimalloc", "libmimalloc-sys"] -# implement additional types -ethereum-impls = ["ethereum-types", "primitive-types"] -# Full estimate: no call to allocator -estimate-heapsize = [] diff --git a/parity-util-mem/README.md b/parity-util-mem/README.md deleted file mode 100644 index 8b7dd7bd1..000000000 --- a/parity-util-mem/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# parity-util-mem - -Collection of memory related utilities. - -## WARNING - -When `parity-util-mem` is used as a dependency with any of the global allocator features enabled, -it must be the sole place where a global allocator is defined. -The only exception to this rule is when used in a `no_std` context or when the `estimate-heapsize` feature is used. - -Because of that, it must be present in the dependency tree with a single version. -Starting from version 0.6.1, having duplicate versions of `parity-util-mem` will lead -to a compile-time error. It still will be possible to have 0.5 and 0.6.1 versions in the same binary though. - -Unless heeded you risk UB; see discussion in [issue 364]. - -[issue 364]: https://github.com/paritytech/parity-common/issues/364 - -## Features - -- estimate-heapsize : Do not use allocator, but `size_of` or `size_of_val`. - -Others features define global allocator, see `src/alloc.rs`. - -## Dependency - -This crate groups common dependency, a patched copy of unpublished [`malloc_size_of`](https://github.com/servo/servo/tree/master/components/malloc_size_of) from servo project is copied and partially reexported. - -`Malloc_size_of` code is used internally as a module with a few modification to be able to implement type locally. - -For existing code using deprecated `HeapsizeOf` crate, calls to `heapsize_of_children` should be replace by calls to `size_of`. diff --git a/parity-util-mem/build.rs b/parity-util-mem/build.rs deleted file mode 100644 index f328e4d9d..000000000 --- a/parity-util-mem/build.rs +++ /dev/null @@ -1 +0,0 @@ -fn main() {} diff --git a/parity-util-mem/derive/CHANGELOG.md b/parity-util-mem/derive/CHANGELOG.md deleted file mode 100644 index c9a41d07a..000000000 --- a/parity-util-mem/derive/CHANGELOG.md +++ /dev/null @@ -1,8 +0,0 @@ -# Changelog - -The format is based on [Keep a Changelog]. - -[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ - -## [Unreleased] -- Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) diff --git a/parity-util-mem/derive/Cargo.toml b/parity-util-mem/derive/Cargo.toml deleted file mode 100644 index 02d6a9d71..000000000 --- a/parity-util-mem/derive/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "parity-util-mem-derive" -version = "0.1.0" -authors = ["Parity Technologies "] -license = "MIT OR Apache-2.0" -description = "Crate for memory reporting" -repository = "https://github.com/paritytech/pariry-common/parity-util-mem/derive" -edition = "2021" -rust-version = "1.56.1" - -[lib] -path = "lib.rs" -proc-macro = true - -[dependencies] -proc-macro2 = "1" -syn = { version = "1", features = ["full"] } -synstructure = "0.12" diff --git a/parity-util-mem/derive/lib.rs b/parity-util-mem/derive/lib.rs deleted file mode 100644 index 78e718635..000000000 --- a/parity-util-mem/derive/lib.rs +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A crate for deriving the MallocSizeOf trait. -//! -//! This is a copy of Servo malloc_size_of_derive code, modified to work with -//! our `parity_util_mem` library - -extern crate proc_macro2; -#[macro_use] -extern crate syn; -#[macro_use] -extern crate synstructure; - -decl_derive!([MallocSizeOf, attributes(ignore_malloc_size_of)] => malloc_size_of_derive); - -fn malloc_size_of_derive(s: synstructure::Structure) -> proc_macro2::TokenStream { - let match_body = s.each(|binding| { - let ignore = binding.ast().attrs.iter().any(|attr| match attr.parse_meta().unwrap() { - syn::Meta::Path(ref path) | syn::Meta::List(syn::MetaList { ref path, .. }) - if path.is_ident("ignore_malloc_size_of") => - { - panic!( - "#[ignore_malloc_size_of] should have an explanation, \ - e.g. #[ignore_malloc_size_of = \"because reasons\"]" - ); - }, - syn::Meta::NameValue(syn::MetaNameValue { ref path, .. }) if path.is_ident("ignore_malloc_size_of") => true, - _ => false, - }); - if ignore { - None - } else if let syn::Type::Array(..) = binding.ast().ty { - Some(quote! { - for item in #binding.iter() { - sum += parity_util_mem::MallocSizeOf::size_of(item, ops); - } - }) - } else { - Some(quote! { - sum += parity_util_mem::MallocSizeOf::size_of(#binding, ops); - }) - } - }); - - let ast = s.ast(); - let name = &ast.ident; - let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); - let mut where_clause = where_clause.unwrap_or(&parse_quote!(where)).clone(); - for param in ast.generics.type_params() { - let ident = ¶m.ident; - where_clause - .predicates - .push(parse_quote!(#ident: parity_util_mem::MallocSizeOf)); - } - - let tokens = quote! { - impl #impl_generics parity_util_mem::MallocSizeOf for #name #ty_generics #where_clause { - #[inline] - #[allow(unused_variables, unused_mut, unreachable_code)] - fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { - let mut sum = 0; - match *self { - #match_body - } - sum - } - } - }; - - tokens -} diff --git a/parity-util-mem/src/allocators.rs b/parity-util-mem/src/allocators.rs deleted file mode 100644 index 576e93314..000000000 --- a/parity-util-mem/src/allocators.rs +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! default allocator management -//! Features are: -//! - windows: -//! - no features: default implementation from servo `heapsize` crate -//! - dlmalloc: default to `estimate_size` -//! - jemalloc: default windows allocator is used instead -//! - mimalloc: use mimallocator crate -//! - arch x86: -//! - no features: use default alloc -//! - jemalloc: use tikv-jemallocator crate -//! - dlmalloc: default to `estimate_size` -//! - mimalloc: use mimallocator crate -//! - arch x86/macos: -//! - no features: use default alloc, requires using `estimate_size` -//! - jemalloc: use tikv-jemallocator crate -//! - dlmalloc: default to `estimate_size` -//! - mimalloc: use mimallocator crate -//! - arch wasm32: -//! - no features: default to `estimate_size` -//! - dlmalloc: default to `estimate_size` -//! - jemalloc: compile error -//! - mimalloc: compile error (until https://github.com/microsoft/mimalloc/pull/32 is merged) - -#[cfg(feature = "std")] -use crate::malloc_size::MallocUnconditionalSizeOf; -use crate::malloc_size::{MallocSizeOf, MallocSizeOfOps, VoidPtrToSizeFn}; -#[cfg(not(feature = "std"))] -use core::ffi::c_void; -#[cfg(feature = "std")] -use std::os::raw::c_void; - -mod usable_size { - - use super::*; - - cfg_if::cfg_if! { - - if #[cfg(any( - target_arch = "wasm32", - feature = "estimate-heapsize", - feature = "dlmalloc-global", - ))] { - - // do not try system allocator - - /// Warning this is for compatibility only. - /// This function does panic: `estimate-heapsize` feature needs to be activated - /// to avoid this function call. - pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { - unreachable!("estimate heapsize only") - } - - } else if #[cfg(target_os = "windows")] { - - use winapi::um::heapapi::{GetProcessHeap, HeapSize, HeapValidate}; - use winapi::ctypes::c_void as winapi_c_void; - - /// Get the size of a heap block. - /// Call windows allocator through `winapi` crate - pub unsafe extern "C" fn malloc_usable_size(mut ptr: *const c_void) -> usize { - - let heap = GetProcessHeap(); - - if HeapValidate(heap, 0, ptr as *const winapi_c_void) == 0 { - ptr = *(ptr as *const *const c_void).offset(-1); - } - - HeapSize(heap, 0, ptr as *const winapi_c_void) as usize - } - - } else if #[cfg(feature = "jemalloc-global")] { - - /// Use of jemalloc usable size C function through jemallocator crate call. - pub unsafe extern "C" fn malloc_usable_size(ptr: *const c_void) -> usize { - tikv_jemallocator::usable_size(ptr) - } - - } else if #[cfg(feature = "mimalloc-global")] { - - /// Use of mimalloc usable size C function through mimalloc_sys crate call. - pub unsafe extern "C" fn malloc_usable_size(ptr: *const c_void) -> usize { - // mimalloc doesn't actually mutate the value ptr points to, - // but requires a mut pointer in the API - libmimalloc_sys::mi_usable_size(ptr as *mut _) - } - - } else if #[cfg(any( - target_os = "linux", - target_os = "android", - target_os = "freebsd", - ))] { - // Linux/BSD call system allocator (currently malloc). - extern "C" { - pub fn malloc_usable_size(ptr: *const c_void) -> usize; - } - - } else { - // default allocator for non linux or windows system use estimate - pub unsafe extern "C" fn malloc_usable_size(_ptr: *const c_void) -> usize { - unreachable!("estimate heapsize or feature allocator needed") - } - - } - - } - - /// No enclosing function defined. - #[inline] - pub fn new_enclosing_size_fn() -> Option { - None - } -} - -/// Get a new instance of a MallocSizeOfOps -pub fn new_malloc_size_ops() -> MallocSizeOfOps { - MallocSizeOfOps::new(usable_size::malloc_usable_size, usable_size::new_enclosing_size_fn(), None) -} - -/// Extension methods for `MallocSizeOf` trait, do not implement -/// directly. -/// It allows getting heapsize without exposing `MallocSizeOfOps` -/// (a single default `MallocSizeOfOps` is used for each call). -pub trait MallocSizeOfExt: MallocSizeOf { - /// Method to launch a heapsize measurement with a - /// fresh state. - fn malloc_size_of(&self) -> usize { - let mut ops = new_malloc_size_ops(); - ::size_of(self, &mut ops) - } -} - -impl MallocSizeOfExt for T {} - -#[cfg(feature = "std")] -impl MallocSizeOf for std::sync::Arc { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.unconditional_size_of(ops) - } -} diff --git a/parity-util-mem/src/ethereum_impls.rs b/parity-util-mem/src/ethereum_impls.rs deleted file mode 100644 index c296d2d40..000000000 --- a/parity-util-mem/src/ethereum_impls.rs +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Implementation of `MallocSize` for common ethereum types: fixed hashes -//! and uints. - -use ethereum_types::{Bloom, H128, H264, H32, H520, H64, U64}; - -malloc_size_of_is_0!(U64, H32, H64, H128, H264, H520, Bloom); diff --git a/parity-util-mem/src/lib.rs b/parity-util-mem/src/lib.rs deleted file mode 100644 index 4531c63f9..000000000 --- a/parity-util-mem/src/lib.rs +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Crate for parity memory management related utilities. -//! It includes global allocator choice, heap measurement and -//! memory erasure. - -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(not(feature = "std"))] -extern crate alloc; - -cfg_if::cfg_if! { - if #[cfg(all( - feature = "jemalloc-global", - not(target_os = "windows"), - not(target_arch = "wasm32") - ))] { - /// Global allocator - #[global_allocator] - pub static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; - - mod memory_stats_jemalloc; - use memory_stats_jemalloc as memory_stats; - } else if #[cfg(feature = "dlmalloc-global")] { - /// Global allocator - #[global_allocator] - pub static ALLOC: dlmalloc::GlobalDlmalloc = dlmalloc::GlobalDlmalloc; - - mod memory_stats_noop; - use memory_stats_noop as memory_stats; - } else if #[cfg(all( - feature = "mimalloc-global", - not(target_arch = "wasm32") - ))] { - /// Global allocator - #[global_allocator] - pub static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc; - - mod memory_stats_noop; - use memory_stats_noop as memory_stats; - } else { - // default allocator used - mod memory_stats_noop; - use memory_stats_noop as memory_stats; - } -} - -pub mod allocators; - -#[cfg(any( - all(any(target_os = "macos", target_os = "ios"), not(feature = "jemalloc-global"),), - feature = "estimate-heapsize" -))] -pub mod sizeof; - -/// This is a copy of patched crate `malloc_size_of` as a module. -/// We need to have it as an inner module to be able to define our own traits implementation, -/// if at some point the trait become standard enough we could use the right way of doing it -/// by implementing it in our type traits crates. At this time moving this trait to the primitive -/// types level would impact too much of the dependencies to be easily manageable. -#[macro_use] -mod malloc_size; - -#[cfg(feature = "ethereum-impls")] -pub mod ethereum_impls; - -#[cfg(feature = "primitive-types")] -pub mod primitives_impls; - -pub use allocators::MallocSizeOfExt; -pub use malloc_size::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps}; - -pub use parity_util_mem_derive::*; - -/// Heap size of structure. -/// -/// Structure can be anything that implements MallocSizeOf. -pub fn malloc_size(t: &T) -> usize { - MallocSizeOf::size_of(t, &mut allocators::new_malloc_size_ops()) -} - -/// An error related to the memory stats gathering. -#[derive(Clone, Debug)] -pub struct MemoryStatsError(memory_stats::Error); - -#[cfg(feature = "std")] -impl std::fmt::Display for MemoryStatsError { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - self.0.fmt(fmt) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for MemoryStatsError {} - -/// Snapshot of collected memory metrics. -#[non_exhaustive] -#[derive(Debug, Clone)] -pub struct MemoryAllocationSnapshot { - /// Total resident memory, in bytes. - pub resident: u64, - /// Total allocated memory, in bytes. - pub allocated: u64, -} - -/// Accessor to the allocator internals. -#[derive(Clone)] -pub struct MemoryAllocationTracker(self::memory_stats::MemoryAllocationTracker); - -impl MemoryAllocationTracker { - /// Create an instance of an allocation tracker. - pub fn new() -> Result { - self::memory_stats::MemoryAllocationTracker::new() - .map(MemoryAllocationTracker) - .map_err(MemoryStatsError) - } - - /// Create an allocation snapshot. - pub fn snapshot(&self) -> Result { - self.0.snapshot().map_err(MemoryStatsError) - } -} - -#[cfg(feature = "std")] -#[cfg(test)] -mod test { - use super::{malloc_size, MallocSizeOf, MallocSizeOfExt}; - use std::sync::Arc; - - #[test] - fn test_arc() { - let val = Arc::new("test".to_string()); - let s = val.malloc_size_of(); - assert!(s > 0); - } - - #[test] - fn test_dyn() { - trait Augmented: MallocSizeOf {} - impl Augmented for Vec {} - let val: Arc = Arc::new(vec![0u8; 1024]); - assert!(malloc_size(&*val) > 1000); - } -} diff --git a/parity-util-mem/src/malloc_size.rs b/parity-util-mem/src/malloc_size.rs deleted file mode 100644 index 907726674..000000000 --- a/parity-util-mem/src/malloc_size.rs +++ /dev/null @@ -1,911 +0,0 @@ -// Copyright 2016-2017 The Servo Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A crate for measuring the heap usage of data structures in a way that -//! integrates with Firefox's memory reporting, particularly the use of -//! mozjemalloc and DMD. In particular, it has the following features. -//! - It isn't bound to a particular heap allocator. -//! - It provides traits for both "shallow" and "deep" measurement, which gives -//! flexibility in the cases where the traits can't be used. -//! - It allows for measuring blocks even when only an interior pointer can be -//! obtained for heap allocations, e.g. `HashSet` and `HashMap`. (This relies -//! on the heap allocator having suitable support, which mozjemalloc has.) -//! - It allows handling of types like `Rc` and `Arc` by providing traits that -//! are different to the ones for non-graph structures. -//! -//! Suggested uses are as follows. -//! - When possible, use the `MallocSizeOf` trait. (Deriving support is -//! provided by the `malloc_size_of_derive` crate.) -//! - If you need an additional synchronization argument, provide a function -//! that is like the standard trait method, but with the extra argument. -//! - If you need multiple measurements for a type, provide a function named -//! `add_size_of` that takes a mutable reference to a struct that contains -//! the multiple measurement fields. -//! - When deep measurement (via `MallocSizeOf`) cannot be implemented for a -//! type, shallow measurement (via `MallocShallowSizeOf`) in combination with -//! iteration can be a useful substitute. -//! - `Rc` and `Arc` are always tricky, which is why `MallocSizeOf` is not (and -//! should not be) implemented for them. -//! - If an `Rc` or `Arc` is known to be a "primary" reference and can always -//! be measured, it should be measured via the `MallocUnconditionalSizeOf` -//! trait. -//! - If an `Rc` or `Arc` should be measured only if it hasn't been seen -//! before, it should be measured via the `MallocConditionalSizeOf` trait. -//! - Using universal function call syntax is a good idea when measuring boxed -//! fields in structs, because it makes it clear that the Box is being -//! measured as well as the thing it points to. E.g. -//! ` as MallocSizeOf>::size_of(field, ops)`. - -//! This is an extended version of the Servo internal malloc_size crate. -//! We should occasionally track the upstream changes/fixes and reintroduce them here, whenever applicable. - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -#[cfg(feature = "std")] -mod rstd { - pub use std::*; -} -#[cfg(not(feature = "std"))] -mod rstd { - pub use core::*; - pub mod collections { - pub use alloc::collections::*; - pub use vec_deque::VecDeque; - } -} - -#[cfg(feature = "std")] -use std::sync::Arc; - -#[cfg(not(feature = "std"))] -pub use alloc::boxed::Box; -#[cfg(not(feature = "std"))] -use core::ffi::c_void; -#[cfg(feature = "std")] -use rstd::hash::Hash; -use rstd::{ - marker::PhantomData, - mem::size_of, - ops::{Deref, DerefMut, Range}, -}; -#[cfg(feature = "std")] -use std::hash::BuildHasher; -#[cfg(feature = "std")] -use std::os::raw::c_void; - -/// A C function that takes a pointer to a heap allocation and returns its size. -pub type VoidPtrToSizeFn = unsafe extern "C" fn(ptr: *const c_void) -> usize; - -/// A closure implementing a stateful predicate on pointers. -pub type VoidPtrToBoolFnMut = dyn FnMut(*const c_void) -> bool; - -/// Operations used when measuring heap usage of data structures. -pub struct MallocSizeOfOps { - /// A function that returns the size of a heap allocation. - size_of_op: VoidPtrToSizeFn, - - /// Like `size_of_op`, but can take an interior pointer. Optional because - /// not all allocators support this operation. If it's not provided, some - /// memory measurements will actually be computed estimates rather than - /// real and accurate measurements. - enclosing_size_of_op: Option, - - /// Check if a pointer has been seen before, and remember it for next time. - /// Useful when measuring `Rc`s and `Arc`s. Optional, because many places - /// don't need it. - have_seen_ptr_op: Option>, -} - -impl MallocSizeOfOps { - pub fn new( - size_of: VoidPtrToSizeFn, - malloc_enclosing_size_of: Option, - have_seen_ptr: Option>, - ) -> Self { - MallocSizeOfOps { - size_of_op: size_of, - enclosing_size_of_op: malloc_enclosing_size_of, - have_seen_ptr_op: have_seen_ptr, - } - } - - /// Check if an allocation is empty. This relies on knowledge of how Rust - /// handles empty allocations, which may change in the future. - fn is_empty(ptr: *const T) -> bool { - // The correct condition is this: - // `ptr as usize <= ::std::mem::align_of::()` - // But we can't call align_of() on a ?Sized T. So we approximate it - // with the following. 256 is large enough that it should always be - // larger than the required alignment, but small enough that it is - // always in the first page of memory and therefore not a legitimate - // address. - return ptr as *const usize as usize <= 256 - } - - /// Call `size_of_op` on `ptr`, first checking that the allocation isn't - /// empty, because some types (such as `Vec`) utilize empty allocations. - pub unsafe fn malloc_size_of(&self, ptr: *const T) -> usize { - if MallocSizeOfOps::is_empty(ptr) { - 0 - } else { - (self.size_of_op)(ptr as *const c_void) - } - } - - /// Is an `enclosing_size_of_op` available? - pub fn has_malloc_enclosing_size_of(&self) -> bool { - self.enclosing_size_of_op.is_some() - } - - /// Call `enclosing_size_of_op`, which must be available, on `ptr`, which - /// must not be empty. - pub unsafe fn malloc_enclosing_size_of(&self, ptr: *const T) -> usize { - assert!(!MallocSizeOfOps::is_empty(ptr)); - (self.enclosing_size_of_op.unwrap())(ptr as *const c_void) - } - - /// Call `have_seen_ptr_op` on `ptr`. - pub fn have_seen_ptr(&mut self, ptr: *const T) -> bool { - let have_seen_ptr_op = self.have_seen_ptr_op.as_mut().expect("missing have_seen_ptr_op"); - have_seen_ptr_op(ptr as *const c_void) - } -} - -/// Trait for measuring the "deep" heap usage of a data structure. This is the -/// most commonly-used of the traits. -pub trait MallocSizeOf { - /// Measure the heap usage of all descendant heap-allocated structures, but - /// not the space taken up by the value itself. - /// If `T::size_of` is a constant, consider implementing `constant_size` as well. - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize; - - /// Used to optimize `MallocSizeOf` implementation for collections - /// like `Vec` and `HashMap` to avoid iterating over them unnecessarily. - /// The `Self: Sized` bound is for object safety. - fn constant_size() -> Option - where - Self: Sized, - { - None - } -} - -/// Trait for measuring the "shallow" heap usage of a container. -pub trait MallocShallowSizeOf { - /// Measure the heap usage of immediate heap-allocated descendant - /// structures, but not the space taken up by the value itself. Anything - /// beyond the immediate descendants must be measured separately, using - /// iteration. - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -/// Like `MallocSizeOf`, but with a different name so it cannot be used -/// accidentally with derive(MallocSizeOf). For use with types like `Rc` and -/// `Arc` when appropriate (e.g. when measuring a "primary" reference). -pub trait MallocUnconditionalSizeOf { - /// Measure the heap usage of all heap-allocated descendant structures, but - /// not the space taken up by the value itself. - fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -/// `MallocUnconditionalSizeOf` combined with `MallocShallowSizeOf`. -pub trait MallocUnconditionalShallowSizeOf { - /// `unconditional_size_of` combined with `shallow_size_of`. - fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -/// Like `MallocSizeOf`, but only measures if the value hasn't already been -/// measured. For use with types like `Rc` and `Arc` when appropriate (e.g. -/// when there is no "primary" reference). -pub trait MallocConditionalSizeOf { - /// Measure the heap usage of all heap-allocated descendant structures, but - /// not the space taken up by the value itself, and only if that heap usage - /// hasn't already been measured. - fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -/// `MallocConditionalSizeOf` combined with `MallocShallowSizeOf`. -pub trait MallocConditionalShallowSizeOf { - /// `conditional_size_of` combined with `shallow_size_of`. - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize; -} - -#[cfg(not(any( - all(any(target_os = "macos", target_os = "ios"), not(feature = "jemalloc-global"),), - feature = "estimate-heapsize" -)))] -pub mod inner_allocator_use { - - use super::*; - - #[cfg(not(feature = "std"))] - use alloc::string::String; - - impl MallocShallowSizeOf for Box { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(&**self) } - } - } - - impl MallocShallowSizeOf for Vec { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(self.as_ptr()) } - } - } - - // currently this seems only fine with jemalloc - #[cfg(feature = "std")] - #[cfg(all(feature = "jemalloc-global", not(target_os = "windows")))] - impl MallocUnconditionalShallowSizeOf for Arc { - fn unconditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(arc_ptr(self)) } - } - } - - #[cfg(feature = "std")] - #[cfg(not(all(feature = "jemalloc-global", not(target_os = "windows"))))] - impl MallocUnconditionalShallowSizeOf for Arc { - fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - size_of::() - } - } - - impl MallocSizeOf for String { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - unsafe { ops.malloc_size_of(self.as_ptr()) } - } - } -} - -impl<'a, T: ?Sized> MallocSizeOf for &'a T { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - // Zero makes sense for a non-owning reference. - 0 - } - fn constant_size() -> Option { - Some(0) - } -} - -impl MallocSizeOf for Box { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.shallow_size_of(ops) + (**self).size_of(ops) - } -} - -#[impl_trait_for_tuples::impl_for_tuples(12)] -impl MallocSizeOf for Tuple { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut result = 0; - for_tuples!( #( result += Tuple.size_of(ops); )* ); - result - } - fn constant_size() -> Option { - let mut result = Some(0); - for_tuples!( #( result = result.and_then(|s| Tuple::constant_size().map(|t| s + t)); )* ); - result - } -} - -impl MallocSizeOf for Option { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if let Some(val) = self.as_ref() { - val.size_of(ops) - } else { - 0 - } - } - fn constant_size() -> Option { - T::constant_size().filter(|s| *s == 0) - } -} - -impl MallocSizeOf for Result { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - match *self { - Ok(ref x) => x.size_of(ops), - Err(ref e) => e.size_of(ops), - } - } - fn constant_size() -> Option { - // Result has constant size iff T::constant_size == E::constant_size - T::constant_size().and_then(|t| E::constant_size().filter(|e| *e == t)) - } -} - -impl MallocSizeOf for rstd::cell::Cell { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.get().size_of(ops) - } - fn constant_size() -> Option { - T::constant_size() - } -} - -impl MallocSizeOf for rstd::cell::RefCell { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.borrow().size_of(ops) - } - fn constant_size() -> Option { - T::constant_size() - } -} - -#[cfg(feature = "std")] -impl<'a, B: ?Sized + ToOwned> MallocSizeOf for std::borrow::Cow<'a, B> -where - B::Owned: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - match *self { - std::borrow::Cow::Borrowed(_) => 0, - std::borrow::Cow::Owned(ref b) => b.size_of(ops), - } - } -} - -impl MallocSizeOf for [T] { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = 0; - if let Some(t) = T::constant_size() { - n += self.len() * t; - } else { - n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) - } - n - } -} - -impl MallocSizeOf for Vec { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - if let Some(t) = T::constant_size() { - n += self.len() * t; - } else { - n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) - } - n - } -} - -impl MallocShallowSizeOf for rstd::collections::VecDeque { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.has_malloc_enclosing_size_of() { - if let Some(front) = self.front() { - // The front element is an interior pointer. - unsafe { ops.malloc_enclosing_size_of(&*front) } - } else { - // This assumes that no memory is allocated when the VecDeque is empty. - 0 - } - } else { - // An estimate. - self.capacity() * size_of::() - } - } -} - -impl MallocSizeOf for rstd::collections::VecDeque { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - if let Some(t) = T::constant_size() { - n += self.len() * t; - } else { - n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) - } - n - } -} - -#[cfg(feature = "std")] -impl MallocShallowSizeOf for std::collections::HashSet -where - T: Eq + Hash, - S: BuildHasher, -{ - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.has_malloc_enclosing_size_of() { - // The first value from the iterator gives us an interior pointer. - // `ops.malloc_enclosing_size_of()` then gives us the storage size. - // This assumes that the `HashSet`'s contents (values and hashes) - // are all stored in a single contiguous heap allocation. - self.iter().next().map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) }) - } else { - // An estimate. - self.capacity() * (size_of::() + size_of::()) - } - } -} - -#[cfg(feature = "std")] -impl MallocSizeOf for std::collections::HashSet -where - T: Eq + Hash + MallocSizeOf, - S: BuildHasher, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - if let Some(t) = T::constant_size() { - n += self.len() * t; - } else { - n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) - } - n - } -} - -impl MallocSizeOf for rstd::cmp::Reverse { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.0.size_of(ops) - } - fn constant_size() -> Option { - I::constant_size() - } -} - -#[cfg(feature = "std")] -impl MallocShallowSizeOf for std::collections::HashMap { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - // See the implementation for std::collections::HashSet for details. - if ops.has_malloc_enclosing_size_of() { - self.values().next().map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) - } else { - self.capacity() * (size_of::() + size_of::() + size_of::()) - } - } -} - -#[cfg(feature = "std")] -impl MallocSizeOf for std::collections::HashMap -where - K: MallocSizeOf, - V: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { - n += self.len() * (k + v) - } else { - n = self.iter().fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) - } - n - } -} - -impl MallocShallowSizeOf for rstd::collections::BTreeMap { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.has_malloc_enclosing_size_of() { - self.values().next().map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) - } else { - self.len() * (size_of::() + size_of::() + size_of::()) - } - } -} - -impl MallocSizeOf for rstd::collections::BTreeMap -where - K: MallocSizeOf, - V: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { - n += self.len() * (k + v) - } else { - n = self.iter().fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) - } - n - } -} - -impl MallocShallowSizeOf for rstd::collections::BTreeSet { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.has_malloc_enclosing_size_of() { - // See implementation for HashSet how this works. - self.iter().next().map_or(0, |t| unsafe { ops.malloc_enclosing_size_of(t) }) - } else { - // An estimate. - self.len() * (size_of::() + size_of::()) - } - } -} - -impl MallocSizeOf for rstd::collections::BTreeSet -where - T: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - if let Some(t) = T::constant_size() { - n += self.len() * t; - } else { - n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) - } - n - } -} - -// XXX: we don't want MallocSizeOf to be defined for Rc and Arc. If negative -// trait bounds are ever allowed, this code should be uncommented. -// (We do have a compile-fail test for this: -// rc_arc_must_not_derive_malloc_size_of.rs) -// impl !MallocSizeOf for Arc { } -// impl !MallocShallowSizeOf for Arc { } - -#[cfg(feature = "std")] -fn arc_ptr(s: &Arc) -> *const T { - &(**s) as *const T -} - -#[cfg(feature = "std")] -impl MallocUnconditionalSizeOf for Arc { - fn unconditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.unconditional_shallow_size_of(ops) + (**self).size_of(ops) - } -} - -#[cfg(feature = "std")] -impl MallocConditionalShallowSizeOf for Arc { - fn conditional_shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_shallow_size_of(ops) - } - } -} - -#[cfg(feature = "std")] -impl MallocConditionalSizeOf for Arc { - fn conditional_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - if ops.have_seen_ptr(arc_ptr(self)) { - 0 - } else { - self.unconditional_size_of(ops) - } - } -} - -/// If a mutex is stored directly as a member of a data type that is being measured, -/// it is the unique owner of its contents and deserves to be measured. -/// -/// If a mutex is stored inside of an Arc value as a member of a data type that is being measured, -/// the Arc will not be automatically measured so there is no risk of overcounting the mutex's -/// contents. -/// -/// The same reasoning applies to RwLock. -#[cfg(feature = "std")] -impl MallocSizeOf for std::sync::Mutex { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.lock().unwrap().size_of(ops) - } -} - -#[cfg(feature = "std")] -impl MallocSizeOf for parking_lot::Mutex { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.lock().size_of(ops) - } -} - -#[cfg(feature = "std")] -impl MallocSizeOf for std::sync::RwLock { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.read().unwrap().size_of(ops) - } -} - -#[cfg(feature = "std")] -impl MallocSizeOf for parking_lot::RwLock { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.read().size_of(ops) - } -} - -/// Implement notion of 0 allocation size for some type(s). -/// -/// if used for generics, by default it will require that generaic arguments -/// should implement `MallocSizeOf`. This can be avoided with passing "any: " -/// in front of type list. -/// -/// ```rust -/// use parity_util_mem::{malloc_size, malloc_size_of_is_0}; -/// -/// struct Data

{ -/// phantom: std::marker::PhantomData

, -/// } -/// -/// malloc_size_of_is_0!(any: Data

); -/// -/// // MallocSizeOf is NOT implemented for [u8; 333] -/// assert_eq!(malloc_size(&Data::<[u8; 333]> { phantom: std::marker::PhantomData }), 0); -/// ``` -/// -/// and when no "any: " -/// -/// ```rust -/// use parity_util_mem::{malloc_size, malloc_size_of_is_0}; -/// -/// struct Data(pub T); -/// -/// // generic argument (`T`) must be `impl MallocSizeOf` -/// malloc_size_of_is_0!(Data); -/// -/// assert_eq!(malloc_size(&Data(0u8)), 0); -/// ``` -#[macro_export] -macro_rules! malloc_size_of_is_0( - ($($ty:ty),+) => ( - $( - impl $crate::MallocSizeOf for $ty { - #[inline(always)] - fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { - 0 - } - #[inline(always)] - fn constant_size() -> Option { Some(0) } - } - )+ - ); - (any: $($ty:ident<$($gen:ident),+>),+) => ( - $( - impl<$($gen),+> $crate::MallocSizeOf for $ty<$($gen),+> { - #[inline(always)] - fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { - 0 - } - #[inline(always)] - fn constant_size() -> Option { Some(0) } - } - )+ - ); - ($($ty:ident<$($gen:ident),+>),+) => ( - $( - impl<$($gen: $crate::MallocSizeOf),+> $crate::MallocSizeOf for $ty<$($gen),+> { - #[inline(always)] - fn size_of(&self, _: &mut $crate::MallocSizeOfOps) -> usize { - 0 - } - #[inline(always)] - fn constant_size() -> Option { Some(0) } - } - )+ - ); -); - -malloc_size_of_is_0!(bool, char, str); -malloc_size_of_is_0!(u8, u16, u32, u64, u128, usize); -malloc_size_of_is_0!(i8, i16, i32, i64, i128, isize); -malloc_size_of_is_0!(f32, f64); - -malloc_size_of_is_0!(rstd::sync::atomic::AtomicBool); -malloc_size_of_is_0!(rstd::sync::atomic::AtomicIsize); -malloc_size_of_is_0!(rstd::sync::atomic::AtomicUsize); - -malloc_size_of_is_0!(Range, Range, Range, Range, Range); -malloc_size_of_is_0!(Range, Range, Range, Range, Range); -malloc_size_of_is_0!(Range, Range); -malloc_size_of_is_0!(any: PhantomData); - -/// Measurable that defers to inner value and used to verify MallocSizeOf implementation in a -/// struct. -#[derive(Clone)] -pub struct Measurable(pub T); - -impl Deref for Measurable { - type Target = T; - - fn deref(&self) -> &T { - &self.0 - } -} - -impl DerefMut for Measurable { - fn deref_mut(&mut self) -> &mut T { - &mut self.0 - } -} - -#[cfg(feature = "hashbrown")] -impl MallocShallowSizeOf for hashbrown::HashMap { - fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - // See the implementation for std::collections::HashSet for details. - if ops.has_malloc_enclosing_size_of() { - self.values().next().map_or(0, |v| unsafe { ops.malloc_enclosing_size_of(v) }) - } else { - self.capacity() * (size_of::() + size_of::() + size_of::()) - } - } -} - -#[cfg(feature = "hashbrown")] -impl MallocSizeOf for hashbrown::HashMap -where - K: MallocSizeOf, - V: MallocSizeOf, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = self.shallow_size_of(ops); - if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { - n += self.len() * (k + v) - } else { - n = self.iter().fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) - } - n - } -} - -#[cfg(feature = "lru")] -impl MallocSizeOf for lru::LruCache -where - K: MallocSizeOf + rstd::cmp::Eq + rstd::hash::Hash, - V: MallocSizeOf, - S: rstd::hash::BuildHasher, -{ - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = 0; - if let (Some(k), Some(v)) = (K::constant_size(), V::constant_size()) { - n += self.len() * (k + v) - } else { - n = self.iter().fold(n, |acc, (k, v)| acc + k.size_of(ops) + v.size_of(ops)) - } - n - } -} - -malloc_size_of_is_0!( - [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 9], [u8; 10], [u8; 11], [u8; 12], - [u8; 13], [u8; 14], [u8; 15], [u8; 16], [u8; 17], [u8; 18], [u8; 19], [u8; 20], [u8; 21], [u8; 22], [u8; 23], - [u8; 24], [u8; 25], [u8; 26], [u8; 27], [u8; 28], [u8; 29], [u8; 30], [u8; 31], [u8; 32] -); - -macro_rules! impl_smallvec { - ($size: expr) => { - #[cfg(feature = "smallvec")] - impl MallocSizeOf for smallvec::SmallVec<[T; $size]> - where - T: MallocSizeOf, - { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - let mut n = if self.spilled() { self.capacity() * core::mem::size_of::() } else { 0 }; - if let Some(t) = T::constant_size() { - n += self.len() * t; - } else { - n = self.iter().fold(n, |acc, elem| acc + elem.size_of(ops)) - } - n - } - } - }; -} - -impl_smallvec!(32); // kvdb uses this -impl_smallvec!(36); // trie-db uses this - -#[cfg(feature = "std")] -malloc_size_of_is_0!(std::time::Instant); -#[cfg(feature = "std")] -malloc_size_of_is_0!(std::time::Duration); - -#[cfg(all(test, feature = "std"))] // tests are using std implementations -mod tests { - use crate::{allocators::new_malloc_size_ops, MallocSizeOf, MallocSizeOfOps}; - use smallvec::SmallVec; - use std::{collections::BTreeSet, mem}; - impl_smallvec!(3); - - #[test] - fn test_smallvec_stack_allocated_type() { - let mut v: SmallVec<[u8; 3]> = SmallVec::new(); - let mut ops = new_malloc_size_ops(); - assert_eq!(v.size_of(&mut ops), 0); - v.push(1); - v.push(2); - v.push(3); - assert_eq!(v.size_of(&mut ops), 0); - assert!(!v.spilled()); - v.push(4); - assert!(v.spilled(), "SmallVec spills when going beyond the capacity of the inner backing array"); - assert_eq!(v.size_of(&mut ops), 4); // 4 u8s on the heap - } - - #[test] - fn test_smallvec_boxed_stack_allocated_type() { - let mut v: SmallVec<[Box; 3]> = SmallVec::new(); - let mut ops = new_malloc_size_ops(); - assert_eq!(v.size_of(&mut ops), 0); - v.push(Box::new(1u8)); - v.push(Box::new(2u8)); - v.push(Box::new(3u8)); - assert!(v.size_of(&mut ops) >= 3); - assert!(!v.spilled()); - v.push(Box::new(4u8)); - assert!(v.spilled(), "SmallVec spills when going beyond the capacity of the inner backing array"); - let mut ops = new_malloc_size_ops(); - let expected_min_allocs = mem::size_of::>() * 4 + 4; - assert!(v.size_of(&mut ops) >= expected_min_allocs); - } - - #[test] - fn test_smallvec_heap_allocated_type() { - let mut v: SmallVec<[String; 3]> = SmallVec::new(); - let mut ops = new_malloc_size_ops(); - assert_eq!(v.size_of(&mut ops), 0); - v.push("COW".into()); - v.push("PIG".into()); - v.push("DUCK".into()); - assert!(!v.spilled()); - assert!(v.size_of(&mut ops) >= "COW".len() + "PIG".len() + "DUCK".len()); - v.push("ÖWL".into()); - assert!(v.spilled()); - let mut ops = new_malloc_size_ops(); - let expected_min_allocs = mem::size_of::() * 4 + "ÖWL".len() + "COW".len() + "PIG".len() + "DUCK".len(); - assert!(v.size_of(&mut ops) >= expected_min_allocs); - } - - #[test] - fn test_large_vec() { - const N: usize = 128 * 1024 * 1024; - let val = vec![1u8; N]; - let mut ops = new_malloc_size_ops(); - assert!(val.size_of(&mut ops) >= N); - assert!(val.size_of(&mut ops) < 2 * N); - } - - #[test] - fn btree_set() { - let mut set = BTreeSet::new(); - for t in 0..100 { - set.insert(vec![t]); - } - // ~36 per value - assert!(crate::malloc_size(&set) > 3000); - } - - #[test] - fn special_malloc_size_of_0() { - struct Data

{ - phantom: std::marker::PhantomData

, - } - - malloc_size_of_is_0!(any: Data

); - - // MallocSizeOf is not implemented for [u8; 333] - assert_eq!(crate::malloc_size(&Data::<[u8; 333]> { phantom: std::marker::PhantomData }), 0); - } - - #[test] - fn constant_size() { - struct AlwaysTwo(Vec); - - impl MallocSizeOf for AlwaysTwo { - fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { - self.0.size_of(ops) - } - fn constant_size() -> Option { - Some(2) - } - } - - assert_eq!(AlwaysTwo::constant_size(), Some(2)); - assert_eq!(std::cmp::Reverse::::constant_size(), Some(0)); - assert_eq!(std::cell::RefCell::::constant_size(), Some(0)); - assert_eq!(std::cell::Cell::::constant_size(), Some(0)); - assert_eq!(Result::<(), ()>::constant_size(), Some(0)); - assert_eq!(<(AlwaysTwo, (), [u8; 32], AlwaysTwo)>::constant_size(), Some(2 + 2)); - assert_eq!(Option::::constant_size(), Some(0)); - assert_eq!(<&String>::constant_size(), Some(0)); - - assert_eq!(::constant_size(), None); - assert_eq!(std::borrow::Cow::::constant_size(), None); - assert_eq!(Result::<(), String>::constant_size(), None); - assert_eq!(Option::::constant_size(), None); - } -} diff --git a/parity-util-mem/src/memory_stats_jemalloc.rs b/parity-util-mem/src/memory_stats_jemalloc.rs deleted file mode 100644 index 22081d64c..000000000 --- a/parity-util-mem/src/memory_stats_jemalloc.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2021 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use tikv_jemalloc_ctl::Error; -use tikv_jemalloc_ctl::{epoch, stats}; - -#[derive(Clone)] -pub struct MemoryAllocationTracker { - epoch: tikv_jemalloc_ctl::epoch_mib, - allocated: stats::allocated_mib, - resident: stats::resident_mib, -} - -impl MemoryAllocationTracker { - pub fn new() -> Result { - Ok(Self { epoch: epoch::mib()?, allocated: stats::allocated::mib()?, resident: stats::resident::mib()? }) - } - - pub fn snapshot(&self) -> Result { - // update stats by advancing the allocation epoch - self.epoch.advance()?; - - let allocated: u64 = self.allocated.read()? as _; - let resident: u64 = self.resident.read()? as _; - Ok(crate::MemoryAllocationSnapshot { allocated, resident }) - } -} diff --git a/parity-util-mem/src/memory_stats_noop.rs b/parity-util-mem/src/memory_stats_noop.rs deleted file mode 100644 index cf077c6f8..000000000 --- a/parity-util-mem/src/memory_stats_noop.rs +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#[derive(Clone, Debug)] -pub struct Unimplemented; -pub use Unimplemented as Error; - -#[cfg(feature = "std")] -impl std::fmt::Display for Unimplemented { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.write_str("unimplemented") - } -} - -#[derive(Clone)] -pub struct MemoryAllocationTracker {} - -impl MemoryAllocationTracker { - pub fn new() -> Result { - Err(Error) - } - - pub fn snapshot(&self) -> Result { - unimplemented!(); - } -} diff --git a/parity-util-mem/src/primitives_impls.rs b/parity-util-mem/src/primitives_impls.rs deleted file mode 100644 index cf98bc211..000000000 --- a/parity-util-mem/src/primitives_impls.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Implementation of `MallocSize` primitive types. - -use primitive_types::{H160, H256, H512, U128, U256, U512}; - -malloc_size_of_is_0!(U128, U256, U512, H160, H256, H512); - -#[cfg(test)] -mod tests { - - use primitive_types::H256; - - #[test] - fn smoky() { - let v = vec![H256::zero(), H256::zero()]; - - assert!(crate::MallocSizeOfExt::malloc_size_of(&v) >= 64); - } -} diff --git a/parity-util-mem/src/sizeof.rs b/parity-util-mem/src/sizeof.rs deleted file mode 100644 index 3d60913e4..000000000 --- a/parity-util-mem/src/sizeof.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Estimation for heapsize calculation. Usable to replace call to allocator method (for some -//! allocators or simply because we just need a deterministic cunsumption measurement). - -use crate::malloc_size::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps, MallocUnconditionalShallowSizeOf}; -#[cfg(not(feature = "std"))] -use alloc::boxed::Box; -#[cfg(not(feature = "std"))] -use alloc::string::String; -#[cfg(not(feature = "std"))] -use alloc::sync::Arc; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -#[cfg(not(feature = "std"))] -use core::mem::{size_of, size_of_val}; - -#[cfg(feature = "std")] -use std::mem::{size_of, size_of_val}; -#[cfg(feature = "std")] -use std::sync::Arc; - -impl MallocShallowSizeOf for Box { - fn shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - size_of_val(&**self) - } -} - -impl MallocSizeOf for String { - fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - self.capacity() * size_of::() - } -} - -impl MallocShallowSizeOf for Vec { - fn shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - self.capacity() * size_of::() - } -} - -impl MallocUnconditionalShallowSizeOf for Arc { - fn unconditional_shallow_size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { - size_of::() - } -} diff --git a/parity-util-mem/tests/derive.rs b/parity-util-mem/tests/derive.rs deleted file mode 100644 index 63825ba61..000000000 --- a/parity-util-mem/tests/derive.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2020 Parity Technologies -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![cfg(feature = "std")] - -use parity_util_mem::{MallocSizeOf, MallocSizeOfExt}; - -#[test] -fn derive_vec() { - #[derive(MallocSizeOf)] - struct Trivia { - v: Vec, - } - - let t = Trivia { v: vec![0u8; 1024] }; - - assert!(t.malloc_size_of() > 1000); -} - -#[test] -fn derive_hashmap() { - #[derive(MallocSizeOf, Default)] - struct Trivia { - hm: std::collections::HashMap>, - } - - let mut t = Trivia::default(); - - t.hm.insert(1, vec![0u8; 2048]); - - assert!(t.malloc_size_of() > 2000); -} - -#[test] -fn derive_ignore() { - #[derive(MallocSizeOf, Default)] - struct Trivia { - hm: std::collections::HashMap>, - #[ignore_malloc_size_of = "I don't like vectors"] - v: Vec, - } - - let mut t = Trivia::default(); - - t.hm.insert(1, vec![0u8; 2048]); - t.v = vec![0u8; 1024]; - assert!(t.malloc_size_of() < 3000); -} - -#[test] -#[cfg(all(feature = "lru", feature = "hashbrown"))] -fn derive_morecomplex() { - #[derive(MallocSizeOf)] - struct Trivia { - hm: hashbrown::HashMap>, - cache: lru::LruCache>, - } - - let mut t = Trivia { hm: hashbrown::HashMap::new(), cache: lru::LruCache::unbounded() }; - - t.hm.insert(1, vec![0u8; 2048]); - t.cache.put(1, vec![0u8; 2048]); - t.cache.put(2, vec![0u8; 4096]); - - assert!(t.malloc_size_of() > 8000); -} - -#[test] -fn derive_tuple() { - #[derive(MallocSizeOf)] - struct Trivia { - tp1: (), - tp2: (Vec, Vec), - } - - let t = Trivia { tp1: (), tp2: (vec![7u8; 1024], vec![9u8; 1024]) }; - - assert!(t.malloc_size_of() > 2000); - assert!(t.malloc_size_of() < 3000); -} diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index ea159ade9..06c1901ce 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -10,7 +10,7 @@ rust-version = "1.60.0" [dependencies] fixed-hash = { version = "0.8", path = "../fixed-hash", default-features = false } -uint = { version = "0.9.0", path = "../uint", default-features = false } +uint = { version = "0.9.5", path = "../uint", default-features = false } impl-serde = { version = "0.4.0", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "impls/codec", default-features = false, optional = true } impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } diff --git a/primitive-types/impls/num-traits/Cargo.toml b/primitive-types/impls/num-traits/Cargo.toml index bbcd3f02f..4209de17b 100644 --- a/primitive-types/impls/num-traits/Cargo.toml +++ b/primitive-types/impls/num-traits/Cargo.toml @@ -11,4 +11,4 @@ rust-version = "1.56.1" [dependencies] num-traits = { version = "0.2", default-features = false } integer-sqrt = "0.1" -uint = { version = "0.9.1", path = "../../../uint", default-features = false } +uint = { version = "0.9.5", path = "../../../uint", default-features = false } diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index 91915edf9..b572a2806 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -19,7 +19,7 @@ serde = { version = "1.0.101", default-features = false, features = ["alloc"] } criterion = "0.3.0" serde_derive = "1.0.101" serde_json = "1.0.41" -uint = { version = "0.9.0", path = "../../../uint" } +uint = { version = "0.9.5", path = "../../../uint" } [[bench]] name = "impl_serde" diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 8e4690719..103758218 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.9.5] - 2022-11-29 +- Implemented bitwise assign traits. [#690](https://github.com/paritytech/parity-common/pull/690) + ## [0.9.4] - 2022-09-20 - Made `one` const. [#650](https://github.com/paritytech/parity-common/pull/650) - Made `max_value` const. [#652](https://github.com/paritytech/parity-common/pull/652) diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 0e4ed8aea..b1d2e8752 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -4,7 +4,7 @@ homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" name = "uint" -version = "0.9.4" +version = "0.9.5" authors = ["Parity Technologies "] readme = "README.md" edition = "2021" From 78ec931469005b380967135817975fcd78eaf268 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Jan 2023 11:39:45 +0100 Subject: [PATCH 297/359] build(deps): update sysinfo requirement from 0.26.0 to 0.27.5 (#706) Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Release notes](https://github.com/GuillaumeGomez/sysinfo/releases) - [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) --- updated-dependencies: - dependency-name: sysinfo dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 5e920fee5..0e7839fd3 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -39,6 +39,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.11" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.26.0" +sysinfo = "0.27.5" ctrlc = "3.1.4" chrono = "0.4" From 8ed1307ce432cc330f54b8ff195bed2724c5fe24 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 26 Jan 2023 11:56:38 +0900 Subject: [PATCH 298/359] Add bounded-collections crate (#708) * Add bounded-collections crate This new crate contains the bounded types that we see in Substrate, and traits that are used to support the implementation of such types. * cargo fmt * Fixes * Only run tests when std is enabled * Rename bounded-collections to core-types * Rename core-types back to bounded-collections * Fixes --- Cargo.toml | 1 + bounded-collections/Cargo.toml | 22 + bounded-collections/src/bounded_btree_map.rs | 618 +++++++++ bounded-collections/src/bounded_btree_set.rs | 479 +++++++ bounded-collections/src/bounded_vec.rs | 1268 ++++++++++++++++++ bounded-collections/src/lib.rs | 263 ++++ bounded-collections/src/weak_bounded_vec.rs | 518 +++++++ 7 files changed, 3169 insertions(+) create mode 100644 bounded-collections/Cargo.toml create mode 100644 bounded-collections/src/bounded_btree_map.rs create mode 100644 bounded-collections/src/bounded_btree_set.rs create mode 100644 bounded-collections/src/bounded_vec.rs create mode 100644 bounded-collections/src/lib.rs create mode 100644 bounded-collections/src/weak_bounded_vec.rs diff --git a/Cargo.toml b/Cargo.toml index 60011793a..c2a4432d3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ members = [ "rlp-derive", "uint", "primitive-types", + "bounded-collections", "ethereum-types", "ethbloom", ] diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml new file mode 100644 index 000000000..bcedae9eb --- /dev/null +++ b/bounded-collections/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "bounded-collections" +version = "0.1.0" +authors = ["Parity Technologies "] +license = "MIT OR Apache-2.0" +homepage = "https://github.com/paritytech/parity-common" +description = "Bounded types and their supporting traits used in Substrate" +edition = "2021" +rust-version = "1.60.0" + +[dependencies] +serde = { version = "1.0.101", default-features = false } +codec = { version = "3.0.0", default-features = false, features = ["max-encoded-len"], package = "parity-scale-codec" } +scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false } +log = { version = "0.4.17", default-features = false } + +[dev-dependencies] +serde_json = "1.0.41" + +[features] +default = ["std"] +std = ["serde/std", "serde/derive"] diff --git a/bounded-collections/src/bounded_btree_map.rs b/bounded-collections/src/bounded_btree_map.rs new file mode 100644 index 000000000..3d064fa73 --- /dev/null +++ b/bounded-collections/src/bounded_btree_map.rs @@ -0,0 +1,618 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support a bounded BTreeMap. + +use crate::{Get, TryCollect}; +use alloc::collections::BTreeMap; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; + +/// A bounded map based on a B-Tree. +/// +/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing +/// the amount of work performed in a search. See [`BTreeMap`] for more details. +/// +/// Unlike a standard `BTreeMap`, there is an enforced upper limit to the number of items in the +/// map. All internal operations ensure this bound is respected. +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] +pub struct BoundedBTreeMap(BTreeMap, PhantomData); + +impl Decode for BoundedBTreeMap +where + K: Decode + Ord, + V: Decode, + S: Get, +{ + fn decode(input: &mut I) -> Result { + let inner = BTreeMap::::decode(input)?; + if inner.len() > S::get() as usize { + return Err("BoundedBTreeMap exceeds its limit".into()) + } + Ok(Self(inner, PhantomData)) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + BTreeMap::::skip(input) + } +} + +impl BoundedBTreeMap +where + S: Get, +{ + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } +} + +impl BoundedBTreeMap +where + K: Ord, + S: Get, +{ + /// Create `Self` from `t` without any checks. + fn unchecked_from(t: BTreeMap) -> Self { + Self(t, Default::default()) + } + + /// Exactly the same semantics as `BTreeMap::retain`. + /// + /// The is a safe `&mut self` borrow because `retain` can only ever decrease the length of the + /// inner map. + pub fn retain bool>(&mut self, f: F) { + self.0.retain(f) + } + + /// Create a new `BoundedBTreeMap`. + /// + /// Does not allocate. + pub fn new() -> Self { + BoundedBTreeMap(BTreeMap::new(), PhantomData) + } + + /// Consume self, and return the inner `BTreeMap`. + /// + /// This is useful when a mutating API of the inner type is desired, and closure-based mutation + /// such as provided by [`try_mutate`][Self::try_mutate] is inconvenient. + pub fn into_inner(self) -> BTreeMap { + debug_assert!(self.0.len() <= Self::bound()); + self.0 + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut BTreeMap)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + /// Clears the map, removing all elements. + pub fn clear(&mut self) { + self.0.clear() + } + + /// Return a mutable reference to the value corresponding to the key. + /// + /// The key may be any borrowed form of the map's key type, but the ordering on the borrowed + /// form _must_ match the ordering on the key type. + pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> + where + K: Borrow, + Q: Ord + ?Sized, + { + self.0.get_mut(key) + } + + /// Exactly the same semantics as [`BTreeMap::insert`], but returns an `Err` (and is a noop) if + /// the new length of the map exceeds `S`. + /// + /// In the `Err` case, returns the inserted pair so it can be further used without cloning. + pub fn try_insert(&mut self, key: K, value: V) -> Result, (K, V)> { + if self.len() < Self::bound() || self.0.contains_key(&key) { + Ok(self.0.insert(key, value)) + } else { + Err((key, value)) + } + } + + /// Remove a key from the map, returning the value at the key if the key was previously in the + /// map. + /// + /// The key may be any borrowed form of the map's key type, but the ordering on the borrowed + /// form _must_ match the ordering on the key type. + pub fn remove(&mut self, key: &Q) -> Option + where + K: Borrow, + Q: Ord + ?Sized, + { + self.0.remove(key) + } + + /// Remove a key from the map, returning the value at the key if the key was previously in the + /// map. + /// + /// The key may be any borrowed form of the map's key type, but the ordering on the borrowed + /// form _must_ match the ordering on the key type. + pub fn remove_entry(&mut self, key: &Q) -> Option<(K, V)> + where + K: Borrow, + Q: Ord + ?Sized, + { + self.0.remove_entry(key) + } + + /// Gets a mutable iterator over the entries of the map, sorted by key. + /// + /// See [`BTreeMap::iter_mut`] for more information. + pub fn iter_mut(&mut self) -> alloc::collections::btree_map::IterMut { + self.0.iter_mut() + } + + /// Consume the map, applying `f` to each of it's values and returning a new map. + pub fn map(self, mut f: F) -> BoundedBTreeMap + where + F: FnMut((&K, V)) -> T, + { + BoundedBTreeMap::::unchecked_from( + self.0 + .into_iter() + .map(|(k, v)| { + let t = f((&k, v)); + (k, t) + }) + .collect(), + ) + } + + /// Consume the map, applying `f` to each of it's values as long as it returns successfully. If + /// an `Err(E)` is ever encountered, the mapping is short circuited and the error is returned; + /// otherwise, a new map is returned in the contained `Ok` value. + pub fn try_map(self, mut f: F) -> Result, E> + where + F: FnMut((&K, V)) -> Result, + { + Ok(BoundedBTreeMap::::unchecked_from( + self.0 + .into_iter() + .map(|(k, v)| (f((&k, v)).map(|t| (k, t)))) + .collect::, _>>()?, + )) + } +} + +impl Default for BoundedBTreeMap +where + K: Ord, + S: Get, +{ + fn default() -> Self { + Self::new() + } +} + +impl Clone for BoundedBTreeMap +where + BTreeMap: Clone, +{ + fn clone(&self) -> Self { + BoundedBTreeMap(self.0.clone(), PhantomData) + } +} + +impl core::fmt::Debug for BoundedBTreeMap +where + BTreeMap: core::fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("BoundedBTreeMap").field(&self.0).field(&Self::bound()).finish() + } +} + +impl PartialEq> for BoundedBTreeMap +where + BTreeMap: PartialEq, + S1: Get, + S2: Get, +{ + fn eq(&self, other: &BoundedBTreeMap) -> bool { + S1::get() == S2::get() && self.0 == other.0 + } +} + +impl Eq for BoundedBTreeMap +where + BTreeMap: Eq, + S: Get, +{ +} + +impl PartialEq> for BoundedBTreeMap +where + BTreeMap: PartialEq, +{ + fn eq(&self, other: &BTreeMap) -> bool { + self.0 == *other + } +} + +impl PartialOrd for BoundedBTreeMap +where + BTreeMap: PartialOrd, + S: Get, +{ + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Ord for BoundedBTreeMap +where + BTreeMap: Ord, + S: Get, +{ + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl IntoIterator for BoundedBTreeMap { + type Item = (K, V); + type IntoIter = alloc::collections::btree_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a BoundedBTreeMap { + type Item = (&'a K, &'a V); + type IntoIter = alloc::collections::btree_map::Iter<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a mut BoundedBTreeMap { + type Item = (&'a K, &'a mut V); + type IntoIter = alloc::collections::btree_map::IterMut<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter_mut() + } +} + +impl MaxEncodedLen for BoundedBTreeMap +where + K: MaxEncodedLen, + V: MaxEncodedLen, + S: Get, +{ + fn max_encoded_len() -> usize { + Self::bound() + .saturating_mul(K::max_encoded_len().saturating_add(V::max_encoded_len())) + .saturating_add(codec::Compact(S::get()).encoded_size()) + } +} + +impl Deref for BoundedBTreeMap +where + K: Ord, +{ + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef> for BoundedBTreeMap +where + K: Ord, +{ + fn as_ref(&self) -> &BTreeMap { + &self.0 + } +} + +impl From> for BTreeMap +where + K: Ord, +{ + fn from(map: BoundedBTreeMap) -> Self { + map.0 + } +} + +impl TryFrom> for BoundedBTreeMap +where + K: Ord, + S: Get, +{ + type Error = (); + + fn try_from(value: BTreeMap) -> Result { + (value.len() <= Self::bound()) + .then(move || BoundedBTreeMap(value, PhantomData)) + .ok_or(()) + } +} + +impl codec::DecodeLength for BoundedBTreeMap { + fn len(self_encoded: &[u8]) -> Result { + // `BoundedBTreeMap` is stored just a `BTreeMap`, which is stored as a + // `Compact` with its length followed by an iteration of its items. We can just use + // the underlying implementation. + as codec::DecodeLength>::len(self_encoded) + } +} + +impl codec::EncodeLike> for BoundedBTreeMap where BTreeMap: Encode {} + +impl TryCollect> for I +where + K: Ord, + I: ExactSizeIterator + Iterator, + Bound: Get, +{ + type Error = &'static str; + + fn try_collect(self) -> Result, Self::Error> { + if self.len() > Bound::get() as usize { + Err("iterator length too big") + } else { + Ok(BoundedBTreeMap::::unchecked_from(self.collect::>())) + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::ConstU32; + + fn map_from_keys(keys: &[K]) -> BTreeMap + where + K: Ord + Copy, + { + keys.iter().copied().zip(std::iter::repeat(())).collect() + } + + fn boundedmap_from_keys(keys: &[K]) -> BoundedBTreeMap + where + K: Ord + Copy, + S: Get, + { + map_from_keys(keys).try_into().unwrap() + } + + #[test] + fn try_insert_works() { + let mut bounded = boundedmap_from_keys::>(&[1, 2, 3]); + bounded.try_insert(0, ()).unwrap(); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + + assert!(bounded.try_insert(9, ()).is_err()); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + } + + #[test] + fn deref_coercion_works() { + let bounded = boundedmap_from_keys::>(&[1, 2, 3]); + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded = boundedmap_from_keys::>(&[1, 2, 3, 4, 5, 6]); + let bounded = bounded + .try_mutate(|v| { + v.insert(7, ()); + }) + .unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded + .try_mutate(|v| { + v.insert(8, ()); + }) + .is_none()); + } + + #[test] + fn btree_map_eq_works() { + let bounded = boundedmap_from_keys::>(&[1, 2, 3, 4, 5, 6]); + assert_eq!(bounded, map_from_keys(&[1, 2, 3, 4, 5, 6])); + } + + #[test] + fn too_big_fail_to_decode() { + let v: Vec<(u32, u32)> = vec![(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)]; + assert_eq!( + BoundedBTreeMap::>::decode(&mut &v.encode()[..]), + Err("BoundedBTreeMap exceeds its limit".into()), + ); + } + + #[test] + fn unequal_eq_impl_insert_works() { + // given a struct with a strange notion of equality + #[derive(Debug)] + struct Unequal(u32, bool); + + impl PartialEq for Unequal { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + impl Eq for Unequal {} + + impl Ord for Unequal { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } + } + + impl PartialOrd for Unequal { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + let mut map = BoundedBTreeMap::>::new(); + + // when the set is full + + for i in 0..4 { + map.try_insert(Unequal(i, false), i).unwrap(); + } + + // can't insert a new distinct member + map.try_insert(Unequal(5, false), 5).unwrap_err(); + + // but _can_ insert a distinct member which compares equal, though per the documentation, + // neither the set length nor the actual member are changed, but the value is + map.try_insert(Unequal(0, true), 6).unwrap(); + assert_eq!(map.len(), 4); + let (zero_key, zero_value) = map.get_key_value(&Unequal(0, true)).unwrap(); + assert_eq!(zero_key.0, 0); + assert_eq!(zero_key.1, false); + assert_eq!(*zero_value, 6); + } + + #[test] + fn eq_works() { + // of same type + let b1 = boundedmap_from_keys::>(&[1, 2]); + let b2 = boundedmap_from_keys::>(&[1, 2]); + assert_eq!(b1, b2); + + // of different type, but same value and bound. + crate::parameter_types! { + B1: u32 = 7; + B2: u32 = 7; + } + let b1 = boundedmap_from_keys::(&[1, 2]); + let b2 = boundedmap_from_keys::(&[1, 2]); + assert_eq!(b1, b2); + } + + #[test] + fn can_be_collected() { + let b1 = boundedmap_from_keys::>(&[1, 2, 3, 4]); + let b2: BoundedBTreeMap> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect().unwrap(); + assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3, 4, 5]); + + // can also be collected into a collection of length 4. + let b2: BoundedBTreeMap> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect().unwrap(); + assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3, 4, 5]); + + // can be mutated further into iterators that are `ExactSizedIterator`. + let b2: BoundedBTreeMap> = + b1.iter().map(|(k, v)| (k + 1, *v)).rev().skip(2).try_collect().unwrap(); + // note that the binary tree will re-sort this, so rev() is not really seen + assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3]); + + let b2: BoundedBTreeMap> = + b1.iter().map(|(k, v)| (k + 1, *v)).take(2).try_collect().unwrap(); + assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3]); + + // but these won't work + let b2: Result>, _> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect(); + assert!(b2.is_err()); + + let b2: Result>, _> = + b1.iter().map(|(k, v)| (k + 1, *v)).skip(2).try_collect(); + assert!(b2.is_err()); + } + + #[test] + fn test_iter_mut() { + let mut b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); + + b1.iter_mut().for_each(|(_, v)| *v *= 2); + + assert_eq!(b1, b2); + } + + #[test] + fn map_retains_size() { + let b1 = boundedmap_from_keys::>(&[1, 2]); + let b2 = b1.clone(); + + assert_eq!(b1.len(), b2.map(|(_, _)| 5_u32).len()); + } + + #[test] + fn map_maps_properly() { + let b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + assert_eq!(b1, b2.map(|(_, v)| v * 2)); + } + + #[test] + fn try_map_retains_size() { + let b1 = boundedmap_from_keys::>(&[1, 2]); + let b2 = b1.clone(); + + assert_eq!(b1.len(), b2.try_map::<_, (), _>(|(_, _)| Ok(5_u32)).unwrap().len()); + } + + #[test] + fn try_map_maps_properly() { + let b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + assert_eq!(b1, b2.try_map::<_, (), _>(|(_, v)| Ok(v * 2)).unwrap()); + } + + #[test] + fn try_map_short_circuit() { + let b1: BoundedBTreeMap> = [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + assert_eq!(Err("overflow"), b1.try_map(|(_, v)| v.checked_mul(100).ok_or("overflow"))); + } + + #[test] + fn try_map_ok() { + let b1: BoundedBTreeMap> = [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, (k as u16) * 100)).try_collect().unwrap(); + + assert_eq!(Ok(b2), b1.try_map(|(_, v)| (v as u16).checked_mul(100_u16).ok_or("overflow"))); + } +} diff --git a/bounded-collections/src/bounded_btree_set.rs b/bounded-collections/src/bounded_btree_set.rs new file mode 100644 index 000000000..f324316c8 --- /dev/null +++ b/bounded-collections/src/bounded_btree_set.rs @@ -0,0 +1,479 @@ +// This file is part of Substrate. + +// Copyright (C) 2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support a bounded `BTreeSet`. + +use crate::{Get, TryCollect}; +use alloc::collections::BTreeSet; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; + +/// A bounded set based on a B-Tree. +/// +/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing +/// the amount of work performed in a search. See [`BTreeSet`] for more details. +/// +/// Unlike a standard `BTreeSet`, there is an enforced upper limit to the number of items in the +/// set. All internal operations ensure this bound is respected. +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] +pub struct BoundedBTreeSet(BTreeSet, PhantomData); + +impl Decode for BoundedBTreeSet +where + T: Decode + Ord, + S: Get, +{ + fn decode(input: &mut I) -> Result { + let inner = BTreeSet::::decode(input)?; + if inner.len() > S::get() as usize { + return Err("BoundedBTreeSet exceeds its limit".into()) + } + Ok(Self(inner, PhantomData)) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + BTreeSet::::skip(input) + } +} + +impl BoundedBTreeSet +where + S: Get, +{ + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } +} + +impl BoundedBTreeSet +where + T: Ord, + S: Get, +{ + /// Create `Self` from `t` without any checks. + fn unchecked_from(t: BTreeSet) -> Self { + Self(t, Default::default()) + } + + /// Create a new `BoundedBTreeSet`. + /// + /// Does not allocate. + pub fn new() -> Self { + BoundedBTreeSet(BTreeSet::new(), PhantomData) + } + + /// Consume self, and return the inner `BTreeSet`. + /// + /// This is useful when a mutating API of the inner type is desired, and closure-based mutation + /// such as provided by [`try_mutate`][Self::try_mutate] is inconvenient. + pub fn into_inner(self) -> BTreeSet { + debug_assert!(self.0.len() <= Self::bound()); + self.0 + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut BTreeSet)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + /// Clears the set, removing all elements. + pub fn clear(&mut self) { + self.0.clear() + } + + /// Exactly the same semantics as [`BTreeSet::insert`], but returns an `Err` (and is a noop) if + /// the new length of the set exceeds `S`. + /// + /// In the `Err` case, returns the inserted item so it can be further used without cloning. + pub fn try_insert(&mut self, item: T) -> Result { + if self.len() < Self::bound() || self.0.contains(&item) { + Ok(self.0.insert(item)) + } else { + Err(item) + } + } + + /// Remove an item from the set, returning whether it was previously in the set. + /// + /// The item may be any borrowed form of the set's item type, but the ordering on the borrowed + /// form _must_ match the ordering on the item type. + pub fn remove(&mut self, item: &Q) -> bool + where + T: Borrow, + Q: Ord + ?Sized, + { + self.0.remove(item) + } + + /// Removes and returns the value in the set, if any, that is equal to the given one. + /// + /// The value may be any borrowed form of the set's value type, but the ordering on the borrowed + /// form _must_ match the ordering on the value type. + pub fn take(&mut self, value: &Q) -> Option + where + T: Borrow + Ord, + Q: Ord + ?Sized, + { + self.0.take(value) + } +} + +impl Default for BoundedBTreeSet +where + T: Ord, + S: Get, +{ + fn default() -> Self { + Self::new() + } +} + +impl Clone for BoundedBTreeSet +where + BTreeSet: Clone, +{ + fn clone(&self) -> Self { + BoundedBTreeSet(self.0.clone(), PhantomData) + } +} + +impl core::fmt::Debug for BoundedBTreeSet +where + BTreeSet: core::fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("BoundedBTreeSet").field(&self.0).field(&Self::bound()).finish() + } +} + +impl PartialEq> for BoundedBTreeSet +where + BTreeSet: PartialEq, + S1: Get, + S2: Get, +{ + fn eq(&self, other: &BoundedBTreeSet) -> bool { + S1::get() == S2::get() && self.0 == other.0 + } +} + +impl Eq for BoundedBTreeSet +where + BTreeSet: Eq, + S: Get, +{ +} + +impl PartialEq> for BoundedBTreeSet +where + BTreeSet: PartialEq, + S: Get, +{ + fn eq(&self, other: &BTreeSet) -> bool { + self.0 == *other + } +} + +impl PartialOrd for BoundedBTreeSet +where + BTreeSet: PartialOrd, + S: Get, +{ + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Ord for BoundedBTreeSet +where + BTreeSet: Ord, + S: Get, +{ + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl IntoIterator for BoundedBTreeSet { + type Item = T; + type IntoIter = alloc::collections::btree_set::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, T, S> IntoIterator for &'a BoundedBTreeSet { + type Item = &'a T; + type IntoIter = alloc::collections::btree_set::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl MaxEncodedLen for BoundedBTreeSet +where + T: MaxEncodedLen, + S: Get, +{ + fn max_encoded_len() -> usize { + Self::bound() + .saturating_mul(T::max_encoded_len()) + .saturating_add(codec::Compact(S::get()).encoded_size()) + } +} + +impl Deref for BoundedBTreeSet +where + T: Ord, +{ + type Target = BTreeSet; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef> for BoundedBTreeSet +where + T: Ord, +{ + fn as_ref(&self) -> &BTreeSet { + &self.0 + } +} + +impl From> for BTreeSet +where + T: Ord, +{ + fn from(set: BoundedBTreeSet) -> Self { + set.0 + } +} + +impl TryFrom> for BoundedBTreeSet +where + T: Ord, + S: Get, +{ + type Error = (); + + fn try_from(value: BTreeSet) -> Result { + (value.len() <= Self::bound()) + .then(move || BoundedBTreeSet(value, PhantomData)) + .ok_or(()) + } +} + +impl codec::DecodeLength for BoundedBTreeSet { + fn len(self_encoded: &[u8]) -> Result { + // `BoundedBTreeSet` is stored just a `BTreeSet`, which is stored as a + // `Compact` with its length followed by an iteration of its items. We can just use + // the underlying implementation. + as codec::DecodeLength>::len(self_encoded) + } +} + +impl codec::EncodeLike> for BoundedBTreeSet where BTreeSet: Encode {} + +impl TryCollect> for I +where + T: Ord, + I: ExactSizeIterator + Iterator, + Bound: Get, +{ + type Error = &'static str; + + fn try_collect(self) -> Result, Self::Error> { + if self.len() > Bound::get() as usize { + Err("iterator length too big") + } else { + Ok(BoundedBTreeSet::::unchecked_from(self.collect::>())) + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::ConstU32; + + fn set_from_keys(keys: &[T]) -> BTreeSet + where + T: Ord + Copy, + { + keys.iter().copied().collect() + } + + fn boundedset_from_keys(keys: &[T]) -> BoundedBTreeSet + where + T: Ord + Copy, + S: Get, + { + set_from_keys(keys).try_into().unwrap() + } + + #[test] + fn try_insert_works() { + let mut bounded = boundedset_from_keys::>(&[1, 2, 3]); + bounded.try_insert(0).unwrap(); + assert_eq!(*bounded, set_from_keys(&[1, 0, 2, 3])); + + assert!(bounded.try_insert(9).is_err()); + assert_eq!(*bounded, set_from_keys(&[1, 0, 2, 3])); + } + + #[test] + fn deref_coercion_works() { + let bounded = boundedset_from_keys::>(&[1, 2, 3]); + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded = boundedset_from_keys::>(&[1, 2, 3, 4, 5, 6]); + let bounded = bounded + .try_mutate(|v| { + v.insert(7); + }) + .unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded + .try_mutate(|v| { + v.insert(8); + }) + .is_none()); + } + + #[test] + fn btree_map_eq_works() { + let bounded = boundedset_from_keys::>(&[1, 2, 3, 4, 5, 6]); + assert_eq!(bounded, set_from_keys(&[1, 2, 3, 4, 5, 6])); + } + + #[test] + fn too_big_fail_to_decode() { + let v: Vec = vec![1, 2, 3, 4, 5]; + assert_eq!( + BoundedBTreeSet::>::decode(&mut &v.encode()[..]), + Err("BoundedBTreeSet exceeds its limit".into()), + ); + } + + #[test] + fn unequal_eq_impl_insert_works() { + // given a struct with a strange notion of equality + #[derive(Debug)] + struct Unequal(u32, bool); + + impl PartialEq for Unequal { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + impl Eq for Unequal {} + + impl Ord for Unequal { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } + } + + impl PartialOrd for Unequal { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + let mut set = BoundedBTreeSet::>::new(); + + // when the set is full + + for i in 0..4 { + set.try_insert(Unequal(i, false)).unwrap(); + } + + // can't insert a new distinct member + set.try_insert(Unequal(5, false)).unwrap_err(); + + // but _can_ insert a distinct member which compares equal, though per the documentation, + // neither the set length nor the actual member are changed + set.try_insert(Unequal(0, true)).unwrap(); + assert_eq!(set.len(), 4); + let zero_item = set.get(&Unequal(0, true)).unwrap(); + assert_eq!(zero_item.0, 0); + assert_eq!(zero_item.1, false); + } + + #[test] + fn eq_works() { + // of same type + let b1 = boundedset_from_keys::>(&[1, 2]); + let b2 = boundedset_from_keys::>(&[1, 2]); + assert_eq!(b1, b2); + + // of different type, but same value and bound. + crate::parameter_types! { + B1: u32 = 7; + B2: u32 = 7; + } + let b1 = boundedset_from_keys::(&[1, 2]); + let b2 = boundedset_from_keys::(&[1, 2]); + assert_eq!(b1, b2); + } + + #[test] + fn can_be_collected() { + let b1 = boundedset_from_keys::>(&[1, 2, 3, 4]); + let b2: BoundedBTreeSet> = b1.iter().map(|k| k + 1).try_collect().unwrap(); + assert_eq!(b2.into_iter().collect::>(), vec![2, 3, 4, 5]); + + // can also be collected into a collection of length 4. + let b2: BoundedBTreeSet> = b1.iter().map(|k| k + 1).try_collect().unwrap(); + assert_eq!(b2.into_iter().collect::>(), vec![2, 3, 4, 5]); + + // can be mutated further into iterators that are `ExactSizedIterator`. + let b2: BoundedBTreeSet> = b1.iter().map(|k| k + 1).rev().skip(2).try_collect().unwrap(); + // note that the binary tree will re-sort this, so rev() is not really seen + assert_eq!(b2.into_iter().collect::>(), vec![2, 3]); + + let b2: BoundedBTreeSet> = b1.iter().map(|k| k + 1).take(2).try_collect().unwrap(); + assert_eq!(b2.into_iter().collect::>(), vec![2, 3]); + + // but these worn't work + let b2: Result>, _> = b1.iter().map(|k| k + 1).try_collect(); + assert!(b2.is_err()); + + let b2: Result>, _> = b1.iter().map(|k| k + 1).skip(2).try_collect(); + assert!(b2.is_err()); + } +} diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs new file mode 100644 index 000000000..788008b9a --- /dev/null +++ b/bounded-collections/src/bounded_vec.rs @@ -0,0 +1,1268 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support putting a bounded vector into storage, as a raw value, map +//! or a double map. + +use super::WeakBoundedVec; +use crate::{Get, TryCollect}; +use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; +use core::{ + marker::PhantomData, + ops::{Deref, Index, IndexMut, RangeBounds}, + slice::SliceIndex, +}; +#[cfg(feature = "std")] +use serde::{ + de::{Error, SeqAccess, Visitor}, + Deserialize, Deserializer, Serialize, +}; + +/// A bounded vector. +/// +/// It has implementations for efficient append and length decoding, as with a normal `Vec<_>`, once +/// put into storage as a raw value, map or double-map. +/// +/// As the name suggests, the length of the queue is always bounded. All internal operations ensure +/// this bound is respected. +#[cfg_attr(feature = "std", derive(Serialize), serde(transparent))] +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] +pub struct BoundedVec(pub(super) Vec, #[cfg_attr(feature = "std", serde(skip_serializing))] PhantomData); + +/// Create an object through truncation. +pub trait TruncateFrom { + /// Create an object through truncation. + fn truncate_from(unbound: T) -> Self; +} + +#[cfg(feature = "std")] +impl<'de, T, S: Get> Deserialize<'de> for BoundedVec +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct VecVisitor>(PhantomData<(T, S)>); + + impl<'de, T, S: Get> Visitor<'de> for VecVisitor + where + T: Deserialize<'de>, + { + type Value = Vec; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a sequence") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let size = seq.size_hint().unwrap_or(0); + let max = match usize::try_from(S::get()) { + Ok(n) => n, + Err(_) => return Err(A::Error::custom("can't convert to usize")), + }; + if size > max { + Err(A::Error::custom("out of bounds")) + } else { + let mut values = Vec::with_capacity(size); + + while let Some(value) = seq.next_element()? { + values.push(value); + if values.len() > max { + return Err(A::Error::custom("out of bounds")) + } + } + + Ok(values) + } + } + } + + let visitor: VecVisitor = VecVisitor(PhantomData); + deserializer + .deserialize_seq(visitor) + .map(|v| BoundedVec::::try_from(v).map_err(|_| Error::custom("out of bounds")))? + } +} + +/// A bounded slice. +/// +/// Similar to a `BoundedVec`, but not owned and cannot be decoded. +#[derive(Encode)] +pub struct BoundedSlice<'a, T, S>(pub(super) &'a [T], PhantomData); + +// This can be replaced with +// #[derive(scale_info::TypeInfo)] +// #[scale_info(skip_type_params(S))] +// again once this issue is fixed in the rust compiler: https://github.com/rust-lang/rust/issues/96956 +// Tracking issues: https://github.com/paritytech/substrate/issues/11915 +impl<'a, T, S> scale_info::TypeInfo for BoundedSlice<'a, T, S> +where + &'a [T]: scale_info::TypeInfo + 'static, + PhantomData: scale_info::TypeInfo + 'static, + T: scale_info::TypeInfo + 'static, + S: 'static, +{ + type Identity = Self; + + fn type_info() -> ::scale_info::Type { + scale_info::Type::builder() + .path(scale_info::Path::new("BoundedSlice", "sp_runtime::bounded::bounded_vec")) + .type_params(<[_]>::into_vec(Box::new([ + scale_info::TypeParameter::new("T", core::option::Option::Some(::scale_info::meta_type::())), + scale_info::TypeParameter::new("S", ::core::option::Option::None), + ]))) + .docs(&["A bounded slice.", "", "Similar to a `BoundedVec`, but not owned and cannot be decoded."]) + .composite( + scale_info::build::Fields::unnamed() + .field(|f| f.ty::<&'static [T]>().type_name("&'static[T]").docs(&[])) + .field(|f| f.ty::>().type_name("PhantomData").docs(&[])), + ) + } +} + +// `BoundedSlice`s encode to something which will always decode into a `BoundedVec`, +// `WeakBoundedVec`, or a `Vec`. +impl<'a, T: Encode + Decode, S: Get> EncodeLike> for BoundedSlice<'a, T, S> {} +impl<'a, T: Encode + Decode, S: Get> EncodeLike> for BoundedSlice<'a, T, S> {} +impl<'a, T: Encode + Decode, S: Get> EncodeLike> for BoundedSlice<'a, T, S> {} + +impl<'a, T, BoundSelf, BoundRhs> PartialEq> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, other: &BoundedSlice<'a, T, BoundRhs>) -> bool { + self.0 == other.0 + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialEq> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, other: &BoundedVec) -> bool { + self.0 == other.0 + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialEq> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, other: &WeakBoundedVec) -> bool { + self.0 == other.0 + } +} + +impl<'a, T, S: Get> Eq for BoundedSlice<'a, T, S> where T: Eq {} + +impl<'a, T, BoundSelf, BoundRhs> PartialOrd> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedSlice<'a, T, BoundRhs>) -> Option { + self.0.partial_cmp(other.0) + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialOrd> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedVec) -> Option { + self.0.partial_cmp(&*other.0) + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialOrd> for BoundedSlice<'a, T, BoundSelf> +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &WeakBoundedVec) -> Option { + self.0.partial_cmp(&*other.0) + } +} + +impl<'a, T: Ord, Bound: Get> Ord for BoundedSlice<'a, T, Bound> { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl<'a, T, S: Get> TryFrom<&'a [T]> for BoundedSlice<'a, T, S> { + type Error = &'a [T]; + fn try_from(t: &'a [T]) -> Result { + if t.len() <= S::get() as usize { + Ok(BoundedSlice(t, PhantomData)) + } else { + Err(t) + } + } +} + +impl<'a, T, S> From> for &'a [T] { + fn from(t: BoundedSlice<'a, T, S>) -> Self { + t.0 + } +} + +impl<'a, T, S: Get> TruncateFrom<&'a [T]> for BoundedSlice<'a, T, S> { + fn truncate_from(unbound: &'a [T]) -> Self { + BoundedSlice::::truncate_from(unbound) + } +} + +impl<'a, T, S> Clone for BoundedSlice<'a, T, S> { + fn clone(&self) -> Self { + BoundedSlice(self.0, PhantomData) + } +} + +impl<'a, T, S> core::fmt::Debug for BoundedSlice<'a, T, S> +where + &'a [T]: core::fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("BoundedSlice").field(&self.0).field(&S::get()).finish() + } +} + +// Since a reference `&T` is always `Copy`, so is `BoundedSlice<'a, T, S>`. +impl<'a, T, S> Copy for BoundedSlice<'a, T, S> {} + +// will allow for all immutable operations of `[T]` on `BoundedSlice`. +impl<'a, T, S> Deref for BoundedSlice<'a, T, S> { + type Target = [T]; + + fn deref(&self) -> &Self::Target { + self.0 + } +} + +impl<'a, T, S> core::iter::IntoIterator for BoundedSlice<'a, T, S> { + type Item = &'a T; + type IntoIter = core::slice::Iter<'a, T>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl<'a, T, S: Get> BoundedSlice<'a, T, S> { + /// Create an instance from the first elements of the given slice (or all of it if it is smaller + /// than the length bound). + pub fn truncate_from(s: &'a [T]) -> Self { + Self(&s[0..(s.len().min(S::get() as usize))], PhantomData) + } +} + +impl> Decode for BoundedVec { + fn decode(input: &mut I) -> Result { + let inner = Vec::::decode(input)?; + if inner.len() > S::get() as usize { + return Err("BoundedVec exceeds its limit".into()) + } + Ok(Self(inner, PhantomData)) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + Vec::::skip(input) + } +} + +// `BoundedVec`s encode to something which will always decode as a `Vec`. +impl> EncodeLike> for BoundedVec {} + +impl BoundedVec { + /// Create `Self` from `t` without any checks. + fn unchecked_from(t: Vec) -> Self { + Self(t, Default::default()) + } + + /// Consume self, and return the inner `Vec`. Henceforth, the `Vec<_>` can be altered in an + /// arbitrary way. At some point, if the reverse conversion is required, `TryFrom>` can + /// be used. + /// + /// This is useful for cases if you need access to an internal API of the inner `Vec<_>` which + /// is not provided by the wrapper `BoundedVec`. + pub fn into_inner(self) -> Vec { + self.0 + } + + /// Exactly the same semantics as [`slice::sort_by`]. + /// + /// This is safe since sorting cannot change the number of elements in the vector. + pub fn sort_by(&mut self, compare: F) + where + F: FnMut(&T, &T) -> core::cmp::Ordering, + { + self.0.sort_by(compare) + } + + /// Exactly the same semantics as [`slice::sort_by_key`]. + /// + /// This is safe since sorting cannot change the number of elements in the vector. + pub fn sort_by_key(&mut self, f: F) + where + F: FnMut(&T) -> K, + K: core::cmp::Ord, + { + self.0.sort_by_key(f) + } + + /// Exactly the same semantics as [`slice::sort`]. + /// + /// This is safe since sorting cannot change the number of elements in the vector. + pub fn sort(&mut self) + where + T: core::cmp::Ord, + { + self.0.sort() + } + + /// Exactly the same semantics as `Vec::remove`. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn remove(&mut self, index: usize) -> T { + self.0.remove(index) + } + + /// Exactly the same semantics as `slice::swap_remove`. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn swap_remove(&mut self, index: usize) -> T { + self.0.swap_remove(index) + } + + /// Exactly the same semantics as `Vec::retain`. + pub fn retain bool>(&mut self, f: F) { + self.0.retain(f) + } + + /// Exactly the same semantics as `slice::get_mut`. + pub fn get_mut>(&mut self, index: I) -> Option<&mut >::Output> { + self.0.get_mut(index) + } + + /// Exactly the same semantics as `Vec::truncate`. + /// + /// This is safe because `truncate` can never increase the length of the internal vector. + pub fn truncate(&mut self, s: usize) { + self.0.truncate(s); + } + + /// Exactly the same semantics as `Vec::pop`. + /// + /// This is safe since popping can only shrink the inner vector. + pub fn pop(&mut self) -> Option { + self.0.pop() + } + + /// Exactly the same semantics as [`slice::iter_mut`]. + pub fn iter_mut(&mut self) -> core::slice::IterMut<'_, T> { + self.0.iter_mut() + } + + /// Exactly the same semantics as [`slice::last_mut`]. + pub fn last_mut(&mut self) -> Option<&mut T> { + self.0.last_mut() + } + + /// Exact same semantics as [`Vec::drain`]. + pub fn drain(&mut self, range: R) -> alloc::vec::Drain<'_, T> + where + R: RangeBounds, + { + self.0.drain(range) + } +} + +impl> From> for Vec { + fn from(x: BoundedVec) -> Vec { + x.0 + } +} + +impl> BoundedVec { + /// Pre-allocate `capacity` items in self. + /// + /// If `capacity` is greater than [`Self::bound`], then the minimum of the two is used. + pub fn with_bounded_capacity(capacity: usize) -> Self { + let capacity = capacity.min(Self::bound()); + Self(Vec::with_capacity(capacity), Default::default()) + } + + /// Allocate self with the maximum possible capacity. + pub fn with_max_capacity() -> Self { + Self::with_bounded_capacity(Self::bound()) + } + + /// Consume and truncate the vector `v` in order to create a new instance of `Self` from it. + pub fn truncate_from(mut v: Vec) -> Self { + v.truncate(Self::bound()); + Self::unchecked_from(v) + } + + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } + + /// Returns true of this collection is full. + pub fn is_full(&self) -> bool { + self.len() >= Self::bound() + } + + /// Forces the insertion of `element` into `self` retaining all items with index at least + /// `index`. + /// + /// If `index == 0` and `self.len() == Self::bound()`, then this is a no-op. + /// + /// If `Self::bound() < index` or `self.len() < index`, then this is also a no-op. + /// + /// Returns `Ok(maybe_removed)` if the item was inserted, where `maybe_removed` is + /// `Some(removed)` if an item was removed to make room for the new one. Returns `Err(())` if + /// `element` cannot be inserted. + pub fn force_insert_keep_right(&mut self, index: usize, mut element: T) -> Result, ()> { + // Check against panics. + if Self::bound() < index || self.len() < index { + Err(()) + } else if self.len() < Self::bound() { + // Cannot panic since self.len() >= index; + self.0.insert(index, element); + Ok(None) + } else { + if index == 0 { + return Err(()) + } + core::mem::swap(&mut self[0], &mut element); + // `[0..index] cannot panic since self.len() >= index. + // `rotate_left(1)` cannot panic because there is at least 1 element. + self[0..index].rotate_left(1); + Ok(Some(element)) + } + } + + /// Forces the insertion of `element` into `self` retaining all items with index at most + /// `index`. + /// + /// If `index == Self::bound()` and `self.len() == Self::bound()`, then this is a no-op. + /// + /// If `Self::bound() < index` or `self.len() < index`, then this is also a no-op. + /// + /// Returns `Ok(maybe_removed)` if the item was inserted, where `maybe_removed` is + /// `Some(removed)` if an item was removed to make room for the new one. Returns `Err(())` if + /// `element` cannot be inserted. + pub fn force_insert_keep_left(&mut self, index: usize, element: T) -> Result, ()> { + // Check against panics. + if Self::bound() < index || self.len() < index || Self::bound() == 0 { + return Err(()) + } + // Noop condition. + if Self::bound() == index && self.len() <= Self::bound() { + return Err(()) + } + let maybe_removed = if self.is_full() { + // defensive-only: since we are at capacity, this is a noop. + self.0.truncate(Self::bound()); + // if we truncate anything, it will be the last one. + self.0.pop() + } else { + None + }; + + // Cannot panic since `self.len() >= index`; + self.0.insert(index, element); + Ok(maybe_removed) + } + + /// Move the position of an item from one location to another in the slice. + /// + /// Except for the item being moved, the order of the slice remains the same. + /// + /// - `index` is the location of the item to be moved. + /// - `insert_position` is the index of the item in the slice which should *immediately follow* + /// the item which is being moved. + /// + /// Returns `true` of the operation was successful, otherwise `false` if a noop. + pub fn slide(&mut self, index: usize, insert_position: usize) -> bool { + // Check against panics. + if self.len() <= index || self.len() < insert_position || index == usize::MAX { + return false + } + // Noop conditions. + if index == insert_position || index + 1 == insert_position { + return false + } + if insert_position < index && index < self.len() { + // --- --- --- === === === === @@@ --- --- --- + // ^-- N ^O^ + // ... + // /-----<<<-----\ + // --- --- --- === === === === @@@ --- --- --- + // >>> >>> >>> >>> + // ... + // --- --- --- @@@ === === === === --- --- --- + // ^N^ + self[insert_position..index + 1].rotate_right(1); + return true + } else if insert_position > 0 && index + 1 < insert_position { + // Note that the apparent asymmetry of these two branches is due to the + // fact that the "new" position is the position to be inserted *before*. + // --- --- --- @@@ === === === === --- --- --- + // ^O^ ^-- N + // ... + // /----->>>-----\ + // --- --- --- @@@ === === === === --- --- --- + // <<< <<< <<< <<< + // ... + // --- --- --- === === === === @@@ --- --- --- + // ^N^ + self[index..insert_position].rotate_left(1); + return true + } + + debug_assert!(false, "all noop conditions should have been covered above"); + false + } + + /// Forces the insertion of `s` into `self` truncating first if necessary. + /// + /// Infallible, but if the bound is zero, then it's a no-op. + pub fn force_push(&mut self, element: T) { + if Self::bound() > 0 { + self.0.truncate(Self::bound() as usize - 1); + self.0.push(element); + } + } + + /// Same as `Vec::resize`, but if `size` is more than [`Self::bound`], then [`Self::bound`] is + /// used. + pub fn bounded_resize(&mut self, size: usize, value: T) + where + T: Clone, + { + let size = size.min(Self::bound()); + self.0.resize(size, value); + } + + /// Exactly the same semantics as [`Vec::extend`], but returns an error and does nothing if the + /// length of the outcome is larger than the bound. + pub fn try_extend(&mut self, with: impl IntoIterator + ExactSizeIterator) -> Result<(), ()> { + if with.len().saturating_add(self.len()) <= Self::bound() { + self.0.extend(with); + Ok(()) + } else { + Err(()) + } + } + + /// Exactly the same semantics as [`Vec::append`], but returns an error and does nothing if the + /// length of the outcome is larger than the bound. + pub fn try_append(&mut self, other: &mut Vec) -> Result<(), ()> { + if other.len().saturating_add(self.len()) <= Self::bound() { + self.0.append(other); + Ok(()) + } else { + Err(()) + } + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut Vec)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + /// Exactly the same semantics as [`Vec::insert`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if `index > len`. + pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), T> { + if self.len() < Self::bound() { + self.0.insert(index, element); + Ok(()) + } else { + Err(element) + } + } + + /// Exactly the same semantics as [`Vec::push`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds isize::MAX bytes. + pub fn try_push(&mut self, element: T) -> Result<(), T> { + if self.len() < Self::bound() { + self.0.push(element); + Ok(()) + } else { + Err(element) + } + } +} + +impl BoundedVec { + /// Return a [`BoundedSlice`] with the content and bound of [`Self`]. + pub fn as_bounded_slice(&self) -> BoundedSlice { + BoundedSlice(&self.0[..], PhantomData::default()) + } +} + +impl Default for BoundedVec { + fn default() -> Self { + // the bound cannot be below 0, which is satisfied by an empty vector + Self::unchecked_from(Vec::default()) + } +} + +impl core::fmt::Debug for BoundedVec +where + Vec: core::fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("BoundedVec").field(&self.0).field(&Self::bound()).finish() + } +} + +impl Clone for BoundedVec +where + T: Clone, +{ + fn clone(&self) -> Self { + // bound is retained + Self::unchecked_from(self.0.clone()) + } +} + +impl> TryFrom> for BoundedVec { + type Error = Vec; + fn try_from(t: Vec) -> Result { + if t.len() <= Self::bound() { + // explicit check just above + Ok(Self::unchecked_from(t)) + } else { + Err(t) + } + } +} + +impl> TruncateFrom> for BoundedVec { + fn truncate_from(unbound: Vec) -> Self { + BoundedVec::::truncate_from(unbound) + } +} + +// It is okay to give a non-mutable reference of the inner vec to anyone. +impl AsRef> for BoundedVec { + fn as_ref(&self) -> &Vec { + &self.0 + } +} + +impl AsRef<[T]> for BoundedVec { + fn as_ref(&self) -> &[T] { + &self.0 + } +} + +impl AsMut<[T]> for BoundedVec { + fn as_mut(&mut self) -> &mut [T] { + &mut self.0 + } +} + +// will allow for all immutable operations of `Vec` on `BoundedVec`. +impl Deref for BoundedVec { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +// Allows for indexing similar to a normal `Vec`. Can panic if out of bound. +impl Index for BoundedVec +where + I: SliceIndex<[T]>, +{ + type Output = I::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + self.0.index(index) + } +} + +impl IndexMut for BoundedVec +where + I: SliceIndex<[T]>, +{ + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + self.0.index_mut(index) + } +} + +impl core::iter::IntoIterator for BoundedVec { + type Item = T; + type IntoIter = alloc::vec::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, T, S> core::iter::IntoIterator for &'a BoundedVec { + type Item = &'a T; + type IntoIter = core::slice::Iter<'a, T>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl<'a, T, S> core::iter::IntoIterator for &'a mut BoundedVec { + type Item = &'a mut T; + type IntoIter = core::slice::IterMut<'a, T>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter_mut() + } +} + +impl codec::DecodeLength for BoundedVec { + fn len(self_encoded: &[u8]) -> Result { + // `BoundedVec` stored just a `Vec`, thus the length is at the beginning in + // `Compact` form, and same implementation as `Vec` can be used. + as codec::DecodeLength>::len(self_encoded) + } +} + +impl PartialEq> for BoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &BoundedVec) -> bool { + self.0 == rhs.0 + } +} + +impl PartialEq> for BoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &WeakBoundedVec) -> bool { + self.0 == rhs.0 + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialEq> for BoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &BoundedSlice<'a, T, BoundRhs>) -> bool { + self.0 == rhs.0 + } +} + +impl<'a, T: PartialEq, S: Get> PartialEq<&'a [T]> for BoundedSlice<'a, T, S> { + fn eq(&self, other: &&'a [T]) -> bool { + &self.0 == other + } +} + +impl> PartialEq> for BoundedVec { + fn eq(&self, other: &Vec) -> bool { + &self.0 == other + } +} + +impl> Eq for BoundedVec where T: Eq {} + +impl PartialOrd> for BoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedVec) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl PartialOrd> for BoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &WeakBoundedVec) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialOrd> for BoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedSlice<'a, T, BoundRhs>) -> Option { + (&*self.0).partial_cmp(other.0) + } +} + +impl> Ord for BoundedVec { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl MaxEncodedLen for BoundedVec +where + T: MaxEncodedLen, + S: Get, + BoundedVec: Encode, +{ + fn max_encoded_len() -> usize { + // BoundedVec encodes like Vec which encodes like [T], which is a compact u32 + // plus each item in the slice: + // See: https://docs.substrate.io/reference/scale-codec/ + codec::Compact(S::get()) + .encoded_size() + .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) + } +} + +impl TryCollect> for I +where + I: ExactSizeIterator + Iterator, + Bound: Get, +{ + type Error = &'static str; + + fn try_collect(self) -> Result, Self::Error> { + if self.len() > Bound::get() as usize { + Err("iterator length too big") + } else { + Ok(BoundedVec::::unchecked_from(self.collect::>())) + } + } +} + +#[cfg(all(test, feature = "std"))] +mod test { + use super::*; + use crate::{bounded_vec, ConstU32}; + + #[test] + fn slice_truncate_from_works() { + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3, 4, 5]); + assert_eq!(bounded.deref(), &[1, 2, 3, 4]); + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3, 4]); + assert_eq!(bounded.deref(), &[1, 2, 3, 4]); + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3]); + assert_eq!(bounded.deref(), &[1, 2, 3]); + } + + #[test] + fn slide_works() { + let mut b: BoundedVec> = bounded_vec![0, 1, 2, 3, 4, 5]; + assert!(b.slide(1, 5)); + assert_eq!(*b, vec![0, 2, 3, 4, 1, 5]); + assert!(b.slide(4, 0)); + assert_eq!(*b, vec![1, 0, 2, 3, 4, 5]); + assert!(b.slide(0, 2)); + assert_eq!(*b, vec![0, 1, 2, 3, 4, 5]); + assert!(b.slide(1, 6)); + assert_eq!(*b, vec![0, 2, 3, 4, 5, 1]); + assert!(b.slide(0, 6)); + assert_eq!(*b, vec![2, 3, 4, 5, 1, 0]); + assert!(b.slide(5, 0)); + assert_eq!(*b, vec![0, 2, 3, 4, 5, 1]); + assert!(!b.slide(6, 0)); + assert!(!b.slide(7, 0)); + assert_eq!(*b, vec![0, 2, 3, 4, 5, 1]); + + let mut c: BoundedVec> = bounded_vec![0, 1, 2]; + assert!(!c.slide(1, 5)); + assert_eq!(*c, vec![0, 1, 2]); + assert!(!c.slide(4, 0)); + assert_eq!(*c, vec![0, 1, 2]); + assert!(!c.slide(3, 0)); + assert_eq!(*c, vec![0, 1, 2]); + assert!(c.slide(2, 0)); + assert_eq!(*c, vec![2, 0, 1]); + } + + #[test] + fn slide_noops_work() { + let mut b: BoundedVec> = bounded_vec![0, 1, 2, 3, 4, 5]; + assert!(!b.slide(3, 3)); + assert_eq!(*b, vec![0, 1, 2, 3, 4, 5]); + assert!(!b.slide(3, 4)); + assert_eq!(*b, vec![0, 1, 2, 3, 4, 5]); + } + + #[test] + fn force_insert_keep_left_works() { + let mut b: BoundedVec> = bounded_vec![]; + assert_eq!(b.force_insert_keep_left(1, 10), Err(())); + assert!(b.is_empty()); + + assert_eq!(b.force_insert_keep_left(0, 30), Ok(None)); + assert_eq!(b.force_insert_keep_left(0, 10), Ok(None)); + assert_eq!(b.force_insert_keep_left(1, 20), Ok(None)); + assert_eq!(b.force_insert_keep_left(3, 40), Ok(None)); + assert_eq!(*b, vec![10, 20, 30, 40]); + // at capacity. + assert_eq!(b.force_insert_keep_left(4, 41), Err(())); + assert_eq!(*b, vec![10, 20, 30, 40]); + assert_eq!(b.force_insert_keep_left(3, 31), Ok(Some(40))); + assert_eq!(*b, vec![10, 20, 30, 31]); + assert_eq!(b.force_insert_keep_left(1, 11), Ok(Some(31))); + assert_eq!(*b, vec![10, 11, 20, 30]); + assert_eq!(b.force_insert_keep_left(0, 1), Ok(Some(30))); + assert_eq!(*b, vec![1, 10, 11, 20]); + + let mut z: BoundedVec> = bounded_vec![]; + assert!(z.is_empty()); + assert_eq!(z.force_insert_keep_left(0, 10), Err(())); + assert!(z.is_empty()); + } + + #[test] + fn force_insert_keep_right_works() { + let mut b: BoundedVec> = bounded_vec![]; + assert_eq!(b.force_insert_keep_right(1, 10), Err(())); + assert!(b.is_empty()); + + assert_eq!(b.force_insert_keep_right(0, 30), Ok(None)); + assert_eq!(b.force_insert_keep_right(0, 10), Ok(None)); + assert_eq!(b.force_insert_keep_right(1, 20), Ok(None)); + assert_eq!(b.force_insert_keep_right(3, 40), Ok(None)); + assert_eq!(*b, vec![10, 20, 30, 40]); + + // at capacity. + assert_eq!(b.force_insert_keep_right(0, 0), Err(())); + assert_eq!(*b, vec![10, 20, 30, 40]); + assert_eq!(b.force_insert_keep_right(1, 11), Ok(Some(10))); + assert_eq!(*b, vec![11, 20, 30, 40]); + assert_eq!(b.force_insert_keep_right(3, 31), Ok(Some(11))); + assert_eq!(*b, vec![20, 30, 31, 40]); + assert_eq!(b.force_insert_keep_right(4, 41), Ok(Some(20))); + assert_eq!(*b, vec![30, 31, 40, 41]); + + assert_eq!(b.force_insert_keep_right(5, 69), Err(())); + assert_eq!(*b, vec![30, 31, 40, 41]); + + let mut z: BoundedVec> = bounded_vec![]; + assert!(z.is_empty()); + assert_eq!(z.force_insert_keep_right(0, 10), Err(())); + assert!(z.is_empty()); + } + + #[test] + fn bound_returns_correct_value() { + assert_eq!(BoundedVec::>::bound(), 7); + } + + #[test] + fn try_insert_works() { + let mut bounded: BoundedVec> = bounded_vec![1, 2, 3]; + bounded.try_insert(1, 0).unwrap(); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + + assert!(bounded.try_insert(0, 9).is_err()); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + } + + #[test] + fn constructor_macro_works() { + // With values. Use some brackets to make sure the macro doesn't expand. + let bv: BoundedVec<(u32, u32), ConstU32<3>> = bounded_vec![(1, 2), (1, 2), (1, 2)]; + assert_eq!(bv, vec![(1, 2), (1, 2), (1, 2)]); + + // With repetition. + let bv: BoundedVec<(u32, u32), ConstU32<3>> = bounded_vec![(1, 2); 3]; + assert_eq!(bv, vec![(1, 2), (1, 2), (1, 2)]); + } + + #[test] + #[should_panic(expected = "insertion index (is 9) should be <= len (is 3)")] + fn try_inert_panics_if_oob() { + let mut bounded: BoundedVec> = bounded_vec![1, 2, 3]; + bounded.try_insert(9, 0).unwrap(); + } + + #[test] + fn try_push_works() { + let mut bounded: BoundedVec> = bounded_vec![1, 2, 3]; + bounded.try_push(0).unwrap(); + assert_eq!(*bounded, vec![1, 2, 3, 0]); + + assert!(bounded.try_push(9).is_err()); + } + + #[test] + fn deref_vec_coercion_works() { + let bounded: BoundedVec> = bounded_vec![1, 2, 3]; + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn deref_slice_coercion_works() { + let bounded = BoundedSlice::>::try_from(&[1, 2, 3][..]).unwrap(); + // these methods come from deref-ed slice. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded: BoundedVec> = bounded_vec![1, 2, 3, 4, 5, 6]; + let bounded = bounded.try_mutate(|v| v.push(7)).unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded.try_mutate(|v| v.push(8)).is_none()); + } + + #[test] + fn slice_indexing_works() { + let bounded: BoundedVec> = bounded_vec![1, 2, 3, 4, 5, 6]; + assert_eq!(&bounded[0..=2], &[1, 2, 3]); + } + + #[test] + fn vec_eq_works() { + let bounded: BoundedVec> = bounded_vec![1, 2, 3, 4, 5, 6]; + assert_eq!(bounded, vec![1, 2, 3, 4, 5, 6]); + } + + #[test] + fn too_big_vec_fail_to_decode() { + let v: Vec = vec![1, 2, 3, 4, 5]; + assert_eq!( + BoundedVec::>::decode(&mut &v.encode()[..]), + Err("BoundedVec exceeds its limit".into()), + ); + } + + #[test] + fn eq_works() { + // of same type + let b1: BoundedVec> = bounded_vec![1, 2, 3]; + let b2: BoundedVec> = bounded_vec![1, 2, 3]; + assert_eq!(b1, b2); + + // of different type, but same value and bound. + crate::parameter_types! { + B1: u32 = 7; + B2: u32 = 7; + } + let b1: BoundedVec = bounded_vec![1, 2, 3]; + let b2: BoundedVec = bounded_vec![1, 2, 3]; + assert_eq!(b1, b2); + } + + #[test] + fn ord_works() { + use std::cmp::Ordering; + let b1: BoundedVec> = bounded_vec![1, 2, 3]; + let b2: BoundedVec> = bounded_vec![1, 3, 2]; + + // ordering for vec is lexicographic. + assert_eq!(b1.cmp(&b2), Ordering::Less); + assert_eq!(b1.cmp(&b2), b1.into_inner().cmp(&b2.into_inner())); + } + + #[test] + fn try_extend_works() { + let mut b: BoundedVec> = bounded_vec![1, 2, 3]; + + assert!(b.try_extend(vec![4].into_iter()).is_ok()); + assert_eq!(*b, vec![1, 2, 3, 4]); + + assert!(b.try_extend(vec![5].into_iter()).is_ok()); + assert_eq!(*b, vec![1, 2, 3, 4, 5]); + + assert!(b.try_extend(vec![6].into_iter()).is_err()); + assert_eq!(*b, vec![1, 2, 3, 4, 5]); + + let mut b: BoundedVec> = bounded_vec![1, 2, 3]; + assert!(b.try_extend(vec![4, 5].into_iter()).is_ok()); + assert_eq!(*b, vec![1, 2, 3, 4, 5]); + + let mut b: BoundedVec> = bounded_vec![1, 2, 3]; + assert!(b.try_extend(vec![4, 5, 6].into_iter()).is_err()); + assert_eq!(*b, vec![1, 2, 3]); + } + + #[test] + fn test_serializer() { + let c: BoundedVec> = bounded_vec![0, 1, 2]; + assert_eq!(serde_json::json!(&c).to_string(), r#"[0,1,2]"#); + } + + #[test] + fn test_deserializer() { + let c: BoundedVec> = serde_json::from_str(r#"[0,1,2]"#).unwrap(); + + assert_eq!(c.len(), 3); + assert_eq!(c[0], 0); + assert_eq!(c[1], 1); + assert_eq!(c[2], 2); + } + + #[test] + fn test_deserializer_failed() { + let c: Result>, serde_json::error::Error> = + serde_json::from_str(r#"[0,1,2,3,4,5]"#); + + match c { + Err(msg) => assert_eq!(msg.to_string(), "out of bounds at line 1 column 11"), + _ => unreachable!("deserializer must raise error"), + } + } + + #[test] + fn bounded_vec_try_from_works() { + assert!(BoundedVec::>::try_from(vec![0]).is_ok()); + assert!(BoundedVec::>::try_from(vec![0, 1]).is_ok()); + assert!(BoundedVec::>::try_from(vec![0, 1, 2]).is_err()); + } + + #[test] + fn bounded_slice_try_from_works() { + assert!(BoundedSlice::>::try_from(&[0][..]).is_ok()); + assert!(BoundedSlice::>::try_from(&[0, 1][..]).is_ok()); + assert!(BoundedSlice::>::try_from(&[0, 1, 2][..]).is_err()); + } + + #[test] + fn can_be_collected() { + let b1: BoundedVec> = bounded_vec![1, 2, 3, 4]; + let b2: BoundedVec> = b1.iter().map(|x| x + 1).try_collect().unwrap(); + assert_eq!(b2, vec![2, 3, 4, 5]); + + // can also be collected into a collection of length 4. + let b2: BoundedVec> = b1.iter().map(|x| x + 1).try_collect().unwrap(); + assert_eq!(b2, vec![2, 3, 4, 5]); + + // can be mutated further into iterators that are `ExactSizedIterator`. + let b2: BoundedVec> = b1.iter().map(|x| x + 1).rev().try_collect().unwrap(); + assert_eq!(b2, vec![5, 4, 3, 2]); + + let b2: BoundedVec> = b1.iter().map(|x| x + 1).rev().skip(2).try_collect().unwrap(); + assert_eq!(b2, vec![3, 2]); + let b2: BoundedVec> = b1.iter().map(|x| x + 1).rev().skip(2).try_collect().unwrap(); + assert_eq!(b2, vec![3, 2]); + + let b2: BoundedVec> = b1.iter().map(|x| x + 1).rev().take(2).try_collect().unwrap(); + assert_eq!(b2, vec![5, 4]); + let b2: BoundedVec> = b1.iter().map(|x| x + 1).rev().take(2).try_collect().unwrap(); + assert_eq!(b2, vec![5, 4]); + + // but these worn't work + let b2: Result>, _> = b1.iter().map(|x| x + 1).try_collect(); + assert!(b2.is_err()); + + let b2: Result>, _> = b1.iter().map(|x| x + 1).rev().take(2).try_collect(); + assert!(b2.is_err()); + } + + #[test] + fn bounded_vec_debug_works() { + let bound = BoundedVec::>::truncate_from(vec![1, 2, 3]); + assert_eq!(format!("{:?}", bound), "BoundedVec([1, 2, 3], 5)"); + } + + #[test] + fn bounded_slice_debug_works() { + let bound = BoundedSlice::>::truncate_from(&[1, 2, 3]); + assert_eq!(format!("{:?}", bound), "BoundedSlice([1, 2, 3], 5)"); + } + + #[test] + fn bounded_vec_sort_by_key_works() { + let mut v: BoundedVec> = bounded_vec![-5, 4, 1, -3, 2]; + // Sort by absolute value. + v.sort_by_key(|k| k.abs()); + assert_eq!(v, vec![1, 2, -3, 4, -5]); + } + + #[test] + fn bounded_vec_truncate_from_works() { + let unbound = vec![1, 2, 3, 4, 5]; + let bound = BoundedVec::>::truncate_from(unbound.clone()); + assert_eq!(bound, vec![1, 2, 3]); + } + + #[test] + fn bounded_slice_truncate_from_works() { + let unbound = [1, 2, 3, 4, 5]; + let bound = BoundedSlice::>::truncate_from(&unbound); + assert_eq!(bound, &[1, 2, 3][..]); + } + + #[test] + fn bounded_slice_partialeq_slice_works() { + let unbound = [1, 2, 3]; + let bound = BoundedSlice::>::truncate_from(&unbound); + + assert_eq!(bound, &unbound[..]); + assert!(bound == &unbound[..]); + } +} diff --git a/bounded-collections/src/lib.rs b/bounded-collections/src/lib.rs new file mode 100644 index 000000000..63e90b0c2 --- /dev/null +++ b/bounded-collections/src/lib.rs @@ -0,0 +1,263 @@ +// Copyright 2023 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Core collection types that have an upper limit on how many elements that they can contain. + +extern crate alloc; + +pub mod bounded_btree_map; +pub mod bounded_btree_set; +pub mod bounded_vec; +pub mod weak_bounded_vec; + +pub use bounded_btree_map::BoundedBTreeMap; +pub use bounded_btree_set::BoundedBTreeSet; +pub use bounded_vec::{BoundedSlice, BoundedVec}; +pub use weak_bounded_vec::WeakBoundedVec; + +/// A trait for querying a single value from a type defined in the trait. +/// +/// It is not required that the value is constant. +pub trait TypedGet { + /// The type which is returned. + type Type; + /// Return the current value. + fn get() -> Self::Type; +} + +/// A trait for querying a single value from a type. +/// +/// It is not required that the value is constant. +pub trait Get { + /// Return the current value. + fn get() -> T; +} + +impl Get for () { + fn get() -> T { + T::default() + } +} + +/// Implement Get by returning Default for any type that implements Default. +pub struct GetDefault; +impl Get for GetDefault { + fn get() -> T { + T::default() + } +} + +macro_rules! impl_const_get { + ($name:ident, $t:ty) => { + /// Const getter for a basic type. + #[cfg_attr(feature = "std", derive(core::fmt::Debug))] + pub struct $name; + #[cfg(not(feature = "std"))] + impl core::fmt::Debug for $name { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { + fmt.write_str("") + } + } + impl Get<$t> for $name { + fn get() -> $t { + T + } + } + impl Get> for $name { + fn get() -> Option<$t> { + Some(T) + } + } + impl TypedGet for $name { + type Type = $t; + fn get() -> $t { + T + } + } + }; +} + +impl_const_get!(ConstBool, bool); +impl_const_get!(ConstU8, u8); +impl_const_get!(ConstU16, u16); +impl_const_get!(ConstU32, u32); +impl_const_get!(ConstU64, u64); +impl_const_get!(ConstU128, u128); +impl_const_get!(ConstI8, i8); +impl_const_get!(ConstI16, i16); +impl_const_get!(ConstI32, i32); +impl_const_get!(ConstI64, i64); +impl_const_get!(ConstI128, i128); + +/// Try and collect into a collection `C`. +pub trait TryCollect { + /// The error type that gets returned when a collection can't be made from `self`. + type Error; + /// Consume self and try to collect the results into `C`. + /// + /// This is useful in preventing the undesirable `.collect().try_into()` call chain on + /// collections that need to be converted into a bounded type (e.g. `BoundedVec`). + fn try_collect(self) -> Result; +} + +/// Create new implementations of the [`Get`](crate::Get) trait. +/// +/// The so-called parameter type can be created in four different ways: +/// +/// - Using `const` to create a parameter type that provides a `const` getter. It is required that +/// the `value` is const. +/// +/// - Declare the parameter type without `const` to have more freedom when creating the value. +/// +/// NOTE: A more substantial version of this macro is available in `frame_support` crate which +/// allows mutable and persistant variants. +/// +/// # Examples +/// +/// ``` +/// # use bounded_collections::Get; +/// # use bounded_collections::parameter_types; +/// // This function cannot be used in a const context. +/// fn non_const_expression() -> u64 { 99 } +/// +/// const FIXED_VALUE: u64 = 10; +/// parameter_types! { +/// pub const Argument: u64 = 42 + FIXED_VALUE; +/// /// Visibility of the type is optional +/// OtherArgument: u64 = non_const_expression(); +/// } +/// +/// trait Config { +/// type Parameter: Get; +/// type OtherParameter: Get; +/// } +/// +/// struct Runtime; +/// impl Config for Runtime { +/// type Parameter = Argument; +/// type OtherParameter = OtherArgument; +/// } +/// ``` +/// +/// # Invalid example: +/// +/// ```compile_fail +/// # use sp_core::Get; +/// # use sp_core::parameter_types; +/// // This function cannot be used in a const context. +/// fn non_const_expression() -> u64 { 99 } +/// +/// parameter_types! { +/// pub const Argument: u64 = non_const_expression(); +/// } +/// ``` +#[macro_export] +macro_rules! parameter_types { + ( + $( #[ $attr:meta ] )* + $vis:vis const $name:ident: $type:ty = $value:expr; + $( $rest:tt )* + ) => ( + $( #[ $attr ] )* + $vis struct $name; + $crate::parameter_types!(@IMPL_CONST $name , $type , $value); + $crate::parameter_types!( $( $rest )* ); + ); + ( + $( #[ $attr:meta ] )* + $vis:vis $name:ident: $type:ty = $value:expr; + $( $rest:tt )* + ) => ( + $( #[ $attr ] )* + $vis struct $name; + $crate::parameter_types!(@IMPL $name, $type, $value); + $crate::parameter_types!( $( $rest )* ); + ); + () => (); + (@IMPL_CONST $name:ident, $type:ty, $value:expr) => { + impl $name { + /// Returns the value of this parameter type. + pub const fn get() -> $type { + $value + } + } + + impl> $crate::Get for $name { + fn get() -> I { + I::from(Self::get()) + } + } + + impl $crate::TypedGet for $name { + type Type = $type; + fn get() -> $type { + Self::get() + } + } + }; + (@IMPL $name:ident, $type:ty, $value:expr) => { + impl $name { + /// Returns the value of this parameter type. + pub fn get() -> $type { + $value + } + } + + impl> $crate::Get for $name { + fn get() -> I { + I::from(Self::get()) + } + } + + impl $crate::TypedGet for $name { + type Type = $type; + fn get() -> $type { + Self::get() + } + } + }; +} + +/// Build a bounded vec from the given literals. +/// +/// The type of the outcome must be known. +/// +/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding +/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. +#[macro_export] +#[cfg(feature = "std")] +macro_rules! bounded_vec { + ($ ($values:expr),* $(,)?) => { + { + $crate::alloc::vec![$($values),*].try_into().unwrap() + } + }; + ( $value:expr ; $repetition:expr ) => { + { + $crate::alloc::vec![$value ; $repetition].try_into().unwrap() + } + } +} + +/// Build a bounded btree-map from the given literals. +/// +/// The type of the outcome must be known. +/// +/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding +/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. +#[macro_export] +#[cfg(feature = "std")] +macro_rules! bounded_btree_map { + ($ ( $key:expr => $value:expr ),* $(,)?) => { + { + $crate::TryCollect::<$crate::bounded::BoundedBTreeMap<_, _, _>>::try_collect( + $crate::alloc::vec![$(($key, $value)),*].into_iter() + ).unwrap() + } + }; +} diff --git a/bounded-collections/src/weak_bounded_vec.rs b/bounded-collections/src/weak_bounded_vec.rs new file mode 100644 index 000000000..b2cbc3b8d --- /dev/null +++ b/bounded-collections/src/weak_bounded_vec.rs @@ -0,0 +1,518 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support putting a bounded vector into storage, as a raw value, map +//! or a double map. + +use super::{BoundedSlice, BoundedVec}; +use crate::Get; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::{ + marker::PhantomData, + ops::{Deref, Index, IndexMut}, + slice::SliceIndex, +}; +#[cfg(feature = "std")] +use serde::{ + de::{Error, SeqAccess, Visitor}, + Deserialize, Deserializer, Serialize, +}; + +/// A weakly bounded vector. +/// +/// It has implementations for efficient append and length decoding, as with a normal `Vec<_>`, once +/// put into storage as a raw value, map or double-map. +/// +/// The length of the vec is not strictly bounded. Decoding a vec with more element that the bound +/// is accepted, and some method allow to bypass the restriction with warnings. +#[cfg_attr(feature = "std", derive(Serialize), serde(transparent))] +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] +pub struct WeakBoundedVec( + pub(super) Vec, + #[cfg_attr(feature = "std", serde(skip_serializing))] PhantomData, +); + +#[cfg(feature = "std")] +impl<'de, T, S: Get> Deserialize<'de> for WeakBoundedVec +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct VecVisitor>(PhantomData<(T, S)>); + + impl<'de, T, S: Get> Visitor<'de> for VecVisitor + where + T: Deserialize<'de>, + { + type Value = Vec; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a sequence") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let size = seq.size_hint().unwrap_or(0); + let max = match usize::try_from(S::get()) { + Ok(n) => n, + Err(_) => return Err(A::Error::custom("can't convert to usize")), + }; + if size > max { + log::warn!( + target: "runtime", + "length of a bounded vector while deserializing is not respected.", + ); + } + let mut values = Vec::with_capacity(size); + + while let Some(value) = seq.next_element()? { + values.push(value); + if values.len() > max { + log::warn!( + target: "runtime", + "length of a bounded vector while deserializing is not respected.", + ); + } + } + + Ok(values) + } + } + + let visitor: VecVisitor = VecVisitor(PhantomData); + deserializer + .deserialize_seq(visitor) + .map(|v| WeakBoundedVec::::try_from(v).map_err(|_| Error::custom("out of bounds")))? + } +} + +impl> Decode for WeakBoundedVec { + fn decode(input: &mut I) -> Result { + let inner = Vec::::decode(input)?; + Ok(Self::force_from(inner, Some("decode"))) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + Vec::::skip(input) + } +} + +impl WeakBoundedVec { + /// Create `Self` from `t` without any checks. + fn unchecked_from(t: Vec) -> Self { + Self(t, Default::default()) + } + + /// Consume self, and return the inner `Vec`. Henceforth, the `Vec<_>` can be altered in an + /// arbitrary way. At some point, if the reverse conversion is required, `TryFrom>` can + /// be used. + /// + /// This is useful for cases if you need access to an internal API of the inner `Vec<_>` which + /// is not provided by the wrapper `WeakBoundedVec`. + pub fn into_inner(self) -> Vec { + self.0 + } + + /// Exactly the same semantics as [`Vec::remove`]. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn remove(&mut self, index: usize) -> T { + self.0.remove(index) + } + + /// Exactly the same semantics as [`Vec::swap_remove`]. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn swap_remove(&mut self, index: usize) -> T { + self.0.swap_remove(index) + } + + /// Exactly the same semantics as [`Vec::retain`]. + pub fn retain bool>(&mut self, f: F) { + self.0.retain(f) + } + + /// Exactly the same semantics as [`slice::get_mut`]. + pub fn get_mut>(&mut self, index: I) -> Option<&mut >::Output> { + self.0.get_mut(index) + } +} + +impl> WeakBoundedVec { + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } + + /// Create `Self` from `t` without any checks. Logs warnings if the bound is not being + /// respected. The additional scope can be used to indicate where a potential overflow is + /// happening. + pub fn force_from(t: Vec, scope: Option<&'static str>) -> Self { + if t.len() > Self::bound() { + log::warn!( + target: "runtime", + "length of a bounded vector in scope {} is not respected.", + scope.unwrap_or("UNKNOWN"), + ); + } + + Self::unchecked_from(t) + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut Vec)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + /// Exactly the same semantics as [`Vec::insert`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if `index > len`. + pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), ()> { + if self.len() < Self::bound() { + self.0.insert(index, element); + Ok(()) + } else { + Err(()) + } + } + + /// Exactly the same semantics as [`Vec::push`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds isize::MAX bytes. + pub fn try_push(&mut self, element: T) -> Result<(), ()> { + if self.len() < Self::bound() { + self.0.push(element); + Ok(()) + } else { + Err(()) + } + } +} + +impl Default for WeakBoundedVec { + fn default() -> Self { + // the bound cannot be below 0, which is satisfied by an empty vector + Self::unchecked_from(Vec::default()) + } +} + +impl core::fmt::Debug for WeakBoundedVec +where + Vec: core::fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_tuple("WeakBoundedVec").field(&self.0).field(&Self::bound()).finish() + } +} + +impl Clone for WeakBoundedVec +where + T: Clone, +{ + fn clone(&self) -> Self { + // bound is retained + Self::unchecked_from(self.0.clone()) + } +} + +impl> TryFrom> for WeakBoundedVec { + type Error = (); + fn try_from(t: Vec) -> Result { + if t.len() <= Self::bound() { + // explicit check just above + Ok(Self::unchecked_from(t)) + } else { + Err(()) + } + } +} + +// It is okay to give a non-mutable reference of the inner vec to anyone. +impl AsRef> for WeakBoundedVec { + fn as_ref(&self) -> &Vec { + &self.0 + } +} + +impl AsRef<[T]> for WeakBoundedVec { + fn as_ref(&self) -> &[T] { + &self.0 + } +} + +impl AsMut<[T]> for WeakBoundedVec { + fn as_mut(&mut self) -> &mut [T] { + &mut self.0 + } +} + +// will allow for immutable all operations of `Vec` on `WeakBoundedVec`. +impl Deref for WeakBoundedVec { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +// Allows for indexing similar to a normal `Vec`. Can panic if out of bound. +impl Index for WeakBoundedVec +where + I: SliceIndex<[T]>, +{ + type Output = I::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + self.0.index(index) + } +} + +impl IndexMut for WeakBoundedVec +where + I: SliceIndex<[T]>, +{ + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + self.0.index_mut(index) + } +} + +impl core::iter::IntoIterator for WeakBoundedVec { + type Item = T; + type IntoIter = alloc::vec::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, T, S> core::iter::IntoIterator for &'a WeakBoundedVec { + type Item = &'a T; + type IntoIter = core::slice::Iter<'a, T>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl<'a, T, S> core::iter::IntoIterator for &'a mut WeakBoundedVec { + type Item = &'a mut T; + type IntoIter = core::slice::IterMut<'a, T>; + fn into_iter(self) -> Self::IntoIter { + self.0.iter_mut() + } +} + +impl codec::DecodeLength for WeakBoundedVec { + fn len(self_encoded: &[u8]) -> Result { + // `WeakBoundedVec` stored just a `Vec`, thus the length is at the beginning in + // `Compact` form, and same implementation as `Vec` can be used. + as codec::DecodeLength>::len(self_encoded) + } +} + +impl PartialEq> for WeakBoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &WeakBoundedVec) -> bool { + self.0 == rhs.0 + } +} + +impl PartialEq> for WeakBoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &BoundedVec) -> bool { + self.0 == rhs.0 + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialEq> for WeakBoundedVec +where + T: PartialEq, + BoundSelf: Get, + BoundRhs: Get, +{ + fn eq(&self, rhs: &BoundedSlice<'a, T, BoundRhs>) -> bool { + self.0 == rhs.0 + } +} + +impl> PartialEq> for WeakBoundedVec { + fn eq(&self, other: &Vec) -> bool { + &self.0 == other + } +} + +impl> Eq for WeakBoundedVec where T: Eq {} + +impl PartialOrd> for WeakBoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &WeakBoundedVec) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl PartialOrd> for WeakBoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedVec) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl<'a, T, BoundSelf, BoundRhs> PartialOrd> for WeakBoundedVec +where + T: PartialOrd, + BoundSelf: Get, + BoundRhs: Get, +{ + fn partial_cmp(&self, other: &BoundedSlice<'a, T, BoundRhs>) -> Option { + (&*self.0).partial_cmp(other.0) + } +} + +impl> Ord for WeakBoundedVec { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl MaxEncodedLen for WeakBoundedVec +where + T: MaxEncodedLen, + S: Get, + WeakBoundedVec: Encode, +{ + fn max_encoded_len() -> usize { + // WeakBoundedVec encodes like Vec which encodes like [T], which is a compact u32 + // plus each item in the slice: + // See: https://docs.substrate.io/reference/scale-codec/ + codec::Compact(S::get()) + .encoded_size() + .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::ConstU32; + + #[test] + fn bound_returns_correct_value() { + assert_eq!(WeakBoundedVec::>::bound(), 7); + } + + #[test] + fn try_insert_works() { + let mut bounded: WeakBoundedVec> = vec![1, 2, 3].try_into().unwrap(); + bounded.try_insert(1, 0).unwrap(); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + + assert!(bounded.try_insert(0, 9).is_err()); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + } + + #[test] + #[should_panic(expected = "insertion index (is 9) should be <= len (is 3)")] + fn try_inert_panics_if_oob() { + let mut bounded: WeakBoundedVec> = vec![1, 2, 3].try_into().unwrap(); + bounded.try_insert(9, 0).unwrap(); + } + + #[test] + fn try_push_works() { + let mut bounded: WeakBoundedVec> = vec![1, 2, 3].try_into().unwrap(); + bounded.try_push(0).unwrap(); + assert_eq!(*bounded, vec![1, 2, 3, 0]); + + assert!(bounded.try_push(9).is_err()); + } + + #[test] + fn deref_coercion_works() { + let bounded: WeakBoundedVec> = vec![1, 2, 3].try_into().unwrap(); + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded: WeakBoundedVec> = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + let bounded = bounded.try_mutate(|v| v.push(7)).unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded.try_mutate(|v| v.push(8)).is_none()); + } + + #[test] + fn slice_indexing_works() { + let bounded: WeakBoundedVec> = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + assert_eq!(&bounded[0..=2], &[1, 2, 3]); + } + + #[test] + fn vec_eq_works() { + let bounded: WeakBoundedVec> = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + assert_eq!(bounded, vec![1, 2, 3, 4, 5, 6]); + } + + #[test] + fn too_big_succeed_to_decode() { + let v: Vec = vec![1, 2, 3, 4, 5]; + let w = WeakBoundedVec::>::decode(&mut &v.encode()[..]).unwrap(); + assert_eq!(v, *w); + } +} From 528e5e7dd72036af5996a7e41fc619c19ed5585a Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 26 Jan 2023 15:40:17 +0900 Subject: [PATCH 299/359] Write better description for bounded-collections (#709) --- bounded-collections/Cargo.toml | 2 +- bounded-collections/src/lib.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index bcedae9eb..e3a23f67a 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" -description = "Bounded types and their supporting traits used in Substrate" +description = "Bounded types and their supporting traits" edition = "2021" rust-version = "1.60.0" diff --git a/bounded-collections/src/lib.rs b/bounded-collections/src/lib.rs index 63e90b0c2..b5f02c7c7 100644 --- a/bounded-collections/src/lib.rs +++ b/bounded-collections/src/lib.rs @@ -6,7 +6,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Core collection types that have an upper limit on how many elements that they can contain. +//! Collection types that have an upper limit on how many elements that they can contain, and +//! supporting traits that aid in defining the limit. extern crate alloc; From 5ba1db5408f6a92b3293ca7b62d99724bcc470c5 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 26 Jan 2023 16:24:27 +0900 Subject: [PATCH 300/359] Remove a reference to `sp_core` in the comments (#710) --- bounded-collections/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bounded-collections/src/lib.rs b/bounded-collections/src/lib.rs index b5f02c7c7..8eb24f491 100644 --- a/bounded-collections/src/lib.rs +++ b/bounded-collections/src/lib.rs @@ -148,8 +148,8 @@ pub trait TryCollect { /// # Invalid example: /// /// ```compile_fail -/// # use sp_core::Get; -/// # use sp_core::parameter_types; +/// # use bounded_collections::Get; +/// # use bounded_collections::parameter_types; /// // This function cannot be used in a const context. /// fn non_const_expression() -> u64 { 99 } /// From 0e53fd9ed8cd5c811068bdef5d9bc93eb6cd59cf Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 26 Jan 2023 16:40:14 +0900 Subject: [PATCH 301/359] Make alloc public and bump version (#711) --- bounded-collections/Cargo.toml | 2 +- bounded-collections/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index e3a23f67a..59514c254 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.1.0" +version = "0.1.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/bounded-collections/src/lib.rs b/bounded-collections/src/lib.rs index 8eb24f491..8694ebcfb 100644 --- a/bounded-collections/src/lib.rs +++ b/bounded-collections/src/lib.rs @@ -9,7 +9,7 @@ //! Collection types that have an upper limit on how many elements that they can contain, and //! supporting traits that aid in defining the limit. -extern crate alloc; +pub extern crate alloc; pub mod bounded_btree_map; pub mod bounded_btree_set; From b9123adf0ab70a6d80d9fd93d491ac2221d3e217 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Fri, 27 Jan 2023 13:25:32 +0900 Subject: [PATCH 302/359] Ensure bounded-collections compiles under no_std (#712) * Ensure bounded-collections compiles under no_std * Add README.md * Fixes * Add CHANGELOG.md to bounded-collections * Add CI check for bounded-collections --- .github/workflows/ci.yml | 12 ++++++++++++ bounded-collections/CHANGELOG.md | 16 ++++++++++++++++ bounded-collections/Cargo.toml | 12 +++++++++--- bounded-collections/README.md | 3 +++ bounded-collections/src/bounded_btree_map.rs | 7 ++++--- bounded-collections/src/bounded_btree_set.rs | 5 +++-- bounded-collections/src/bounded_vec.rs | 1 + bounded-collections/src/lib.rs | 2 ++ bounded-collections/src/weak_bounded_vec.rs | 2 ++ 9 files changed, 52 insertions(+), 8 deletions(-) create mode 100644 bounded-collections/CHANGELOG.md create mode 100644 bounded-collections/README.md diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1fa9c2746..1ffe117b6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -98,6 +98,18 @@ jobs: command: test args: -p ethbloom --all-features + - name: Test bounded-collections no_std + uses: actions-rs/cargo@v1 + with: + command: test + args: -p bounded-collections --no-default-features + + - name: Test bounded-collections all-features + uses: actions-rs/cargo@v1 + with: + command: test + args: -p bounded-collections --all-features + - name: Test uint on bigendian if: runner.os == 'Linux' uses: actions-rs/cargo@v1 diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md new file mode 100644 index 000000000..94de1222e --- /dev/null +++ b/bounded-collections/CHANGELOG.md @@ -0,0 +1,16 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## [0.1.2] - 2023-01-27 +- Ensured `bounded-collections` crate compiles under `no_std`. [#712](https://github.com/paritytech/parity-common/pull/712) + +## [0.1.1] - 2023-01-26 +- Made `alloc` public. [#711](https://github.com/paritytech/parity-common/pull/711) +- Removed a reference to `sp_core` in the comments. [#710](https://github.com/paritytech/parity-common/pull/710) + +## [0.1.0] - 2023-01-26 +- Wrote better description for `bounded-collections`. [#709](https://github.com/paritytech/parity-common/pull/709) +- Added `bounded-collections` crate. [#708](https://github.com/paritytech/parity-common/pull/708) \ No newline at end of file diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index 59514c254..9c3b4fc4e 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.1.1" +version = "0.1.2" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -9,7 +9,7 @@ edition = "2021" rust-version = "1.60.0" [dependencies] -serde = { version = "1.0.101", default-features = false } +serde = { version = "1.0.101", default-features = false, optional = true } codec = { version = "3.0.0", default-features = false, features = ["max-encoded-len"], package = "parity-scale-codec" } scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false } log = { version = "0.4.17", default-features = false } @@ -19,4 +19,10 @@ serde_json = "1.0.41" [features] default = ["std"] -std = ["serde/std", "serde/derive"] +std = [ + "log/std", + "codec/std", + "scale-info/std", + "serde", + "serde/derive", +] diff --git a/bounded-collections/README.md b/bounded-collections/README.md new file mode 100644 index 000000000..b7cda37f9 --- /dev/null +++ b/bounded-collections/README.md @@ -0,0 +1,3 @@ +# Bounded Collections + +Bounded types and their supporting traits. \ No newline at end of file diff --git a/bounded-collections/src/bounded_btree_map.rs b/bounded-collections/src/bounded_btree_map.rs index 3d064fa73..e298bc60e 100644 --- a/bounded-collections/src/bounded_btree_map.rs +++ b/bounded-collections/src/bounded_btree_map.rs @@ -393,12 +393,13 @@ where mod test { use super::*; use crate::ConstU32; + use alloc::{vec, vec::Vec}; fn map_from_keys(keys: &[K]) -> BTreeMap where K: Ord + Copy, { - keys.iter().copied().zip(std::iter::repeat(())).collect() + keys.iter().copied().zip(core::iter::repeat(())).collect() } fn boundedmap_from_keys(keys: &[K]) -> BoundedBTreeMap @@ -473,13 +474,13 @@ mod test { impl Eq for Unequal {} impl Ord for Unequal { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { self.0.cmp(&other.0) } } impl PartialOrd for Unequal { - fn partial_cmp(&self, other: &Self) -> Option { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } diff --git a/bounded-collections/src/bounded_btree_set.rs b/bounded-collections/src/bounded_btree_set.rs index f324316c8..a4876fcd3 100644 --- a/bounded-collections/src/bounded_btree_set.rs +++ b/bounded-collections/src/bounded_btree_set.rs @@ -323,6 +323,7 @@ where mod test { use super::*; use crate::ConstU32; + use alloc::{vec, vec::Vec}; fn set_from_keys(keys: &[T]) -> BTreeSet where @@ -403,13 +404,13 @@ mod test { impl Eq for Unequal {} impl Ord for Unequal { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { self.0.cmp(&other.0) } } impl PartialOrd for Unequal { - fn partial_cmp(&self, other: &Self) -> Option { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index 788008b9a..5122ed3f4 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -20,6 +20,7 @@ use super::WeakBoundedVec; use crate::{Get, TryCollect}; +use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use core::{ marker::PhantomData, diff --git a/bounded-collections/src/lib.rs b/bounded-collections/src/lib.rs index 8694ebcfb..77b703dd5 100644 --- a/bounded-collections/src/lib.rs +++ b/bounded-collections/src/lib.rs @@ -9,6 +9,8 @@ //! Collection types that have an upper limit on how many elements that they can contain, and //! supporting traits that aid in defining the limit. +#![cfg_attr(not(feature = "std"), no_std)] + pub extern crate alloc; pub mod bounded_btree_map; diff --git a/bounded-collections/src/weak_bounded_vec.rs b/bounded-collections/src/weak_bounded_vec.rs index b2cbc3b8d..cb711d760 100644 --- a/bounded-collections/src/weak_bounded_vec.rs +++ b/bounded-collections/src/weak_bounded_vec.rs @@ -20,6 +20,7 @@ use super::{BoundedSlice, BoundedVec}; use crate::Get; +use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use core::{ marker::PhantomData, @@ -448,6 +449,7 @@ where mod test { use super::*; use crate::ConstU32; + use alloc::vec; #[test] fn bound_returns_correct_value() { From 854e68bb5af1bf285445228c47d1a1090e1f1bf3 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Fri, 27 Jan 2023 17:33:04 +0900 Subject: [PATCH 303/359] Remove non-existent bounded mod reference (#715) * Remove non-existent bounded mod reference * Bump version --- bounded-collections/CHANGELOG.md | 3 +++ bounded-collections/Cargo.toml | 2 +- bounded-collections/src/lib.rs | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index 94de1222e..27184ff6e 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,6 +4,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [0.1.3] - 2023-01-27 +- Removed non-existent `bounded` mod reference. [#715](https://github.com/paritytech/parity-common/pull/715) + ## [0.1.2] - 2023-01-27 - Ensured `bounded-collections` crate compiles under `no_std`. [#712](https://github.com/paritytech/parity-common/pull/712) diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index 9c3b4fc4e..7f40fe474 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.1.2" +version = "0.1.3" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/bounded-collections/src/lib.rs b/bounded-collections/src/lib.rs index 77b703dd5..401842c96 100644 --- a/bounded-collections/src/lib.rs +++ b/bounded-collections/src/lib.rs @@ -258,7 +258,7 @@ macro_rules! bounded_vec { macro_rules! bounded_btree_map { ($ ( $key:expr => $value:expr ),* $(,)?) => { { - $crate::TryCollect::<$crate::bounded::BoundedBTreeMap<_, _, _>>::try_collect( + $crate::TryCollect::<$crate::BoundedBTreeMap<_, _, _>>::try_collect( $crate::alloc::vec![$(($key, $value)),*].into_iter() ).unwrap() } From c2da3b9e767a240cbc53123085717f22f8ec964e Mon Sep 17 00:00:00 2001 From: Squirrel Date: Wed, 1 Feb 2023 02:32:48 +0000 Subject: [PATCH 304/359] Add checked num traits (#716) * Add support for checked num traits * import traits --- primitive-types/Cargo.toml | 3 +++ primitive-types/impls/num-traits/src/lib.rs | 28 +++++++++++++++++++++ primitive-types/tests/num_traits.rs | 20 +++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 06c1901ce..7aa3d2e9c 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -17,6 +17,9 @@ impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-featur impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } scale-info-crate = { package = "scale-info", version = ">=0.9, <3", features = ["derive"], default-features = false, optional = true } +[dev-dependencies] +num-traits = "0.2" + [features] default = ["std"] std = ["uint/std", "fixed-hash/std", "impl-codec?/std"] diff --git a/primitive-types/impls/num-traits/src/lib.rs b/primitive-types/impls/num-traits/src/lib.rs index 5fa9a76d4..5c0973eaa 100644 --- a/primitive-types/impls/num-traits/src/lib.rs +++ b/primitive-types/impls/num-traits/src/lib.rs @@ -57,5 +57,33 @@ macro_rules! impl_uint_num_traits { Some(self.integer_sqrt()) } } + + impl $crate::num_traits::ops::checked::CheckedAdd for $name { + #[inline] + fn checked_add(&self, v: &Self) -> Option { + $name::checked_add(*self, *v) + } + } + + impl $crate::num_traits::ops::checked::CheckedSub for $name { + #[inline] + fn checked_sub(&self, v: &Self) -> Option { + $name::checked_sub(*self, *v) + } + } + + impl $crate::num_traits::ops::checked::CheckedDiv for $name { + #[inline] + fn checked_div(&self, v: &Self) -> Option { + $name::checked_div(*self, *v) + } + } + + impl $crate::num_traits::ops::checked::CheckedMul for $name { + #[inline] + fn checked_mul(&self, v: &Self) -> Option { + $name::checked_mul(*self, *v) + } + } }; } diff --git a/primitive-types/tests/num_traits.rs b/primitive-types/tests/num_traits.rs index 1d6c8e8a5..9bb26d26a 100644 --- a/primitive-types/tests/num_traits.rs +++ b/primitive-types/tests/num_traits.rs @@ -7,6 +7,7 @@ // except according to those terms. use impl_num_traits::integer_sqrt::IntegerSquareRoot; +use num_traits::ops::checked::{CheckedAdd, CheckedDiv, CheckedMul, CheckedSub}; use primitive_types::U256; #[test] @@ -15,3 +16,22 @@ fn u256_isqrt() { let s = x.integer_sqrt_checked().unwrap(); assert_eq!(x.integer_sqrt(), s); } + +#[test] +fn u256_checked_traits_supported() { + const ZERO: &U256 = &U256::zero(); + const ONE: &U256 = &U256::one(); + const MAX: &U256 = &U256::MAX; + + assert_eq!(::checked_add(MAX, ONE), None); + assert_eq!(::checked_add(ZERO, ONE), Some(*ONE)); + + assert_eq!(::checked_sub(ZERO, ONE), None); + assert_eq!(::checked_sub(ONE, ZERO), Some(*ONE)); + + assert_eq!(::checked_div(MAX, ZERO), None); + assert_eq!(::checked_div(MAX, ONE), Some(*MAX)); + + assert_eq!(::checked_mul(MAX, MAX), None); + assert_eq!(::checked_mul(MAX, ZERO), Some(*ZERO)); +} From 07127b6afab1743ad127baf3f17664d33c14bad0 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Wed, 1 Feb 2023 09:07:51 +0100 Subject: [PATCH 305/359] bounded_vec: Avoid unnecessary allocation when decoding (#713) * bounded_vec: Avoid unnecessary allocation when decoding * Fix unnecessary alloc for other types; add `Hash` derive * bounded-collections: Update version to 0.1.4 * Cargo fmt * bounded-collections: add some missing encoding tests --- bounded-collections/CHANGELOG.md | 6 +++- bounded-collections/Cargo.toml | 4 +-- bounded-collections/src/bounded_btree_map.rs | 31 ++++++++++++++++++-- bounded-collections/src/bounded_btree_set.rs | 31 ++++++++++++++++++-- bounded-collections/src/bounded_vec.rs | 31 +++++++++++++++++--- primitive-types/impls/codec/Cargo.toml | 2 +- 6 files changed, 91 insertions(+), 14 deletions(-) diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index 27184ff6e..a30fa96a8 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,6 +4,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [0.1.4] - 2023-01-28 +- Fixed unnecessary decoding and allocations for bounded types, when the decoded length is greater than the allowed bound. +- Add `Hash` derivation (when `feature = "std"`) for bounded types. + ## [0.1.3] - 2023-01-27 - Removed non-existent `bounded` mod reference. [#715](https://github.com/paritytech/parity-common/pull/715) @@ -16,4 +20,4 @@ The format is based on [Keep a Changelog]. ## [0.1.0] - 2023-01-26 - Wrote better description for `bounded-collections`. [#709](https://github.com/paritytech/parity-common/pull/709) -- Added `bounded-collections` crate. [#708](https://github.com/paritytech/parity-common/pull/708) \ No newline at end of file +- Added `bounded-collections` crate. [#708](https://github.com/paritytech/parity-common/pull/708) diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index 7f40fe474..ffd739673 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.1.3" +version = "0.1.4" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -10,7 +10,7 @@ rust-version = "1.60.0" [dependencies] serde = { version = "1.0.101", default-features = false, optional = true } -codec = { version = "3.0.0", default-features = false, features = ["max-encoded-len"], package = "parity-scale-codec" } +codec = { version = "3.3.0", default-features = false, features = ["max-encoded-len"], package = "parity-scale-codec" } scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false } log = { version = "0.4.17", default-features = false } diff --git a/bounded-collections/src/bounded_btree_map.rs b/bounded-collections/src/bounded_btree_map.rs index e298bc60e..f306fe956 100644 --- a/bounded-collections/src/bounded_btree_map.rs +++ b/bounded-collections/src/bounded_btree_map.rs @@ -19,7 +19,7 @@ use crate::{Get, TryCollect}; use alloc::collections::BTreeMap; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Compact, Decode, Encode, MaxEncodedLen}; use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// A bounded map based on a B-Tree. @@ -29,6 +29,7 @@ use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// /// Unlike a standard `BTreeMap`, there is an enforced upper limit to the number of items in the /// map. All internal operations ensure this bound is respected. +#[cfg_attr(feature = "std", derive(Hash))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] pub struct BoundedBTreeMap(BTreeMap, PhantomData); @@ -40,10 +41,15 @@ where S: Get, { fn decode(input: &mut I) -> Result { - let inner = BTreeMap::::decode(input)?; - if inner.len() > S::get() as usize { + // Same as the underlying implementation for `Decode` on `BTreeMap`, except we fail early if + // the len is too big. + let len: u32 = >::decode(input)?.into(); + if len > S::get() { return Err("BoundedBTreeMap exceeds its limit".into()) } + input.descend_ref()?; + let inner = Result::from_iter((0..len).map(|_| Decode::decode(input)))?; + input.ascend_ref(); Ok(Self(inner, PhantomData)) } @@ -394,6 +400,7 @@ mod test { use super::*; use crate::ConstU32; use alloc::{vec, vec::Vec}; + use codec::CompactLen; fn map_from_keys(keys: &[K]) -> BTreeMap where @@ -410,6 +417,14 @@ mod test { map_from_keys(keys).try_into().unwrap() } + #[test] + fn encoding_same_as_unbounded_map() { + let b = boundedmap_from_keys::>(&[1, 2, 3, 4, 5, 6]); + let m = map_from_keys(&[1, 2, 3, 4, 5, 6]); + + assert_eq!(b.encode(), m.encode()); + } + #[test] fn try_insert_works() { let mut bounded = boundedmap_from_keys::>(&[1, 2, 3]); @@ -460,6 +475,16 @@ mod test { ); } + #[test] + fn dont_consume_more_data_than_bounded_len() { + let m = map_from_keys(&[1, 2, 3, 4, 5, 6]); + let data = m.encode(); + let data_input = &mut &data[..]; + + BoundedBTreeMap::>::decode(data_input).unwrap_err(); + assert_eq!(data_input.len(), data.len() - Compact::::compact_len(&(data.len() as u32))); + } + #[test] fn unequal_eq_impl_insert_works() { // given a struct with a strange notion of equality diff --git a/bounded-collections/src/bounded_btree_set.rs b/bounded-collections/src/bounded_btree_set.rs index a4876fcd3..654ae1b3b 100644 --- a/bounded-collections/src/bounded_btree_set.rs +++ b/bounded-collections/src/bounded_btree_set.rs @@ -19,7 +19,7 @@ use crate::{Get, TryCollect}; use alloc::collections::BTreeSet; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Compact, Decode, Encode, MaxEncodedLen}; use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// A bounded set based on a B-Tree. @@ -29,6 +29,7 @@ use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// /// Unlike a standard `BTreeSet`, there is an enforced upper limit to the number of items in the /// set. All internal operations ensure this bound is respected. +#[cfg_attr(feature = "std", derive(Hash))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] pub struct BoundedBTreeSet(BTreeSet, PhantomData); @@ -39,10 +40,15 @@ where S: Get, { fn decode(input: &mut I) -> Result { - let inner = BTreeSet::::decode(input)?; - if inner.len() > S::get() as usize { + // Same as the underlying implementation for `Decode` on `BTreeSet`, except we fail early if + // the len is too big. + let len: u32 = >::decode(input)?.into(); + if len > S::get() { return Err("BoundedBTreeSet exceeds its limit".into()) } + input.descend_ref()?; + let inner = Result::from_iter((0..len).map(|_| Decode::decode(input)))?; + input.ascend_ref(); Ok(Self(inner, PhantomData)) } @@ -324,6 +330,7 @@ mod test { use super::*; use crate::ConstU32; use alloc::{vec, vec::Vec}; + use codec::CompactLen; fn set_from_keys(keys: &[T]) -> BTreeSet where @@ -340,6 +347,14 @@ mod test { set_from_keys(keys).try_into().unwrap() } + #[test] + fn encoding_same_as_unbounded_set() { + let b = boundedset_from_keys::>(&[1, 2, 3, 4, 5, 6]); + let m = set_from_keys(&[1, 2, 3, 4, 5, 6]); + + assert_eq!(b.encode(), m.encode()); + } + #[test] fn try_insert_works() { let mut bounded = boundedset_from_keys::>(&[1, 2, 3]); @@ -390,6 +405,16 @@ mod test { ); } + #[test] + fn dont_consume_more_data_than_bounded_len() { + let s = set_from_keys(&[1, 2, 3, 4, 5, 6]); + let data = s.encode(); + let data_input = &mut &data[..]; + + BoundedBTreeSet::>::decode(data_input).unwrap_err(); + assert_eq!(data_input.len(), data.len() - Compact::::compact_len(&(data.len() as u32))); + } + #[test] fn unequal_eq_impl_insert_works() { // given a struct with a strange notion of equality diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index 5122ed3f4..4bbe389f3 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -21,7 +21,7 @@ use super::WeakBoundedVec; use crate::{Get, TryCollect}; use alloc::{boxed::Box, vec::Vec}; -use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; +use codec::{decode_vec_with_len, Compact, Decode, Encode, EncodeLike, MaxEncodedLen}; use core::{ marker::PhantomData, ops::{Deref, Index, IndexMut, RangeBounds}, @@ -40,7 +40,7 @@ use serde::{ /// /// As the name suggests, the length of the queue is always bounded. All internal operations ensure /// this bound is respected. -#[cfg_attr(feature = "std", derive(Serialize), serde(transparent))] +#[cfg_attr(feature = "std", derive(Hash, Serialize), serde(transparent))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] pub struct BoundedVec(pub(super) Vec, #[cfg_attr(feature = "std", serde(skip_serializing))] PhantomData); @@ -108,6 +108,7 @@ where /// A bounded slice. /// /// Similar to a `BoundedVec`, but not owned and cannot be decoded. +#[cfg_attr(feature = "std", derive(Hash))] #[derive(Encode)] pub struct BoundedSlice<'a, T, S>(pub(super) &'a [T], PhantomData); @@ -290,10 +291,13 @@ impl<'a, T, S: Get> BoundedSlice<'a, T, S> { impl> Decode for BoundedVec { fn decode(input: &mut I) -> Result { - let inner = Vec::::decode(input)?; - if inner.len() > S::get() as usize { + // Same as the underlying implementation for `Decode` on `Vec`, except we fail early if the + // len is too big. + let len: u32 = >::decode(input)?.into(); + if len > S::get() { return Err("BoundedVec exceeds its limit".into()) } + let inner = decode_vec_with_len(input, len as usize)?; Ok(Self(inner, PhantomData)) } @@ -904,6 +908,15 @@ where mod test { use super::*; use crate::{bounded_vec, ConstU32}; + use codec::CompactLen; + + #[test] + fn encoding_same_as_unbounded_vec() { + let b: BoundedVec> = bounded_vec![0, 1, 2, 3, 4, 5]; + let v: Vec = vec![0, 1, 2, 3, 4, 5]; + + assert_eq!(b.encode(), v.encode()); + } #[test] fn slice_truncate_from_works() { @@ -1101,6 +1114,16 @@ mod test { ); } + #[test] + fn dont_consume_more_data_than_bounded_len() { + let v: Vec = vec![1, 2, 3, 4, 5]; + let data = v.encode(); + let data_input = &mut &data[..]; + + BoundedVec::>::decode(data_input).unwrap_err(); + assert_eq!(data_input.len(), data.len() - Compact::::compact_len(&(data.len() as u32))); + } + #[test] fn eq_works() { // of same type diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index 50c5d2180..5f41774c1 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" rust-version = "1.56.1" [dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false, features = ["max-encoded-len"] } +parity-scale-codec = { version = "3.3.0", default-features = false, features = ["max-encoded-len"] } [features] default = ["std"] From 0eb011b331e4c85c03a374e1a658bd5bcf6236a8 Mon Sep 17 00:00:00 2001 From: ordian Date: Wed, 1 Feb 2023 16:22:07 -0300 Subject: [PATCH 306/359] release impl-num-traits 0.1.2 (#718) --- primitive-types/impls/num-traits/CHANGELOG.md | 3 +++ primitive-types/impls/num-traits/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/primitive-types/impls/num-traits/CHANGELOG.md b/primitive-types/impls/num-traits/CHANGELOG.md index 0cec2af8f..d33482beb 100644 --- a/primitive-types/impls/num-traits/CHANGELOG.md +++ b/primitive-types/impls/num-traits/CHANGELOG.md @@ -5,6 +5,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] + +## [0.1.2] - 2023-02-01 +- Added `checked_*` trait impls. [#716](https://github.com/paritytech/parity-common/pull/716) - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.1.1] - 2021-06-30 diff --git a/primitive-types/impls/num-traits/Cargo.toml b/primitive-types/impls/num-traits/Cargo.toml index 4209de17b..765ed92e6 100644 --- a/primitive-types/impls/num-traits/Cargo.toml +++ b/primitive-types/impls/num-traits/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-num-traits" -version = "0.1.1" +version = "0.1.2" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" From 223af1dc6c176e35698aed9285f44e428da0050e Mon Sep 17 00:00:00 2001 From: Jens W <8270201+DragonDev1906@users.noreply.github.com> Date: Wed, 1 Feb 2023 20:40:35 +0100 Subject: [PATCH 307/359] Uint: Fix clippy warnings (#707) - In the uint crate - When using the uint crate --- uint/src/uint.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 2366543f4..2914f7223 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -128,7 +128,7 @@ impl From for FromStrRadixErr { } /// Conversion from decimal string error -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub enum FromDecStrErr { /// Char not from range 0-9 InvalidCharacter, @@ -1001,13 +1001,13 @@ macro_rules! construct_uint { while n > u_one { if is_even(&n) { x = x * x; - n = n >> 1usize; + n >>= 1usize; } else { y = x * y; x = x * x; // to reduce odd number by 1 we should just clear the last bit - n.0[$n_words-1] = n.0[$n_words-1] & ((!0u64)>>1); - n = n >> 1usize; + n.0[$n_words-1] &= (!0u64)>>1; + n >>= 1usize; } } x * y @@ -1028,7 +1028,7 @@ macro_rules! construct_uint { while n > u_one { if is_even(&n) { x = $crate::overflowing!(x.overflowing_mul(x), overflow); - n = n >> 1usize; + n >>= 1usize; } else { y = $crate::overflowing!(x.overflowing_mul(y), overflow); x = $crate::overflowing!(x.overflowing_mul(x), overflow); @@ -1678,7 +1678,7 @@ macro_rules! construct_uint { loop { let digit = (current % ten).low_u64() as u8; buf[i] = digit + b'0'; - current = current / ten; + current /= ten; if current.is_zero() { break; } From 2fb72eea96b6de4a085144ce239feb49da0cd39e Mon Sep 17 00:00:00 2001 From: Marcin S Date: Thu, 16 Feb 2023 12:39:58 +0100 Subject: [PATCH 308/359] bounded-collections: Fixes `Hash` impl (#721) --- bounded-collections/CHANGELOG.md | 3 ++ bounded-collections/Cargo.toml | 2 +- bounded-collections/src/bounded_btree_map.rs | 22 +++++++++++- bounded-collections/src/bounded_btree_set.rs | 22 +++++++++++- bounded-collections/src/bounded_vec.rs | 36 ++++++++++++++++++-- 5 files changed, 79 insertions(+), 6 deletions(-) diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index a30fa96a8..8198520bf 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,6 +4,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [0.1.5] - 2023-02-13 +- Fixed `Hash` impl (previously it could not be used in practice, because the size bound was required to also implement `Hash`). + ## [0.1.4] - 2023-01-28 - Fixed unnecessary decoding and allocations for bounded types, when the decoded length is greater than the allowed bound. - Add `Hash` derivation (when `feature = "std"`) for bounded types. diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index ffd739673..e5ae07808 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.1.4" +version = "0.1.5" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/bounded-collections/src/bounded_btree_map.rs b/bounded-collections/src/bounded_btree_map.rs index f306fe956..de975801a 100644 --- a/bounded-collections/src/bounded_btree_map.rs +++ b/bounded-collections/src/bounded_btree_map.rs @@ -29,7 +29,6 @@ use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// /// Unlike a standard `BTreeMap`, there is an enforced upper limit to the number of items in the /// map. All internal operations ensure this bound is respected. -#[cfg_attr(feature = "std", derive(Hash))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] pub struct BoundedBTreeMap(BTreeMap, PhantomData); @@ -237,6 +236,15 @@ where } } +// Custom implementation of `Hash` since deriving it would require all generic bounds to also +// implement it. +#[cfg(feature = "std")] +impl std::hash::Hash for BoundedBTreeMap { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + impl PartialEq> for BoundedBTreeMap where BTreeMap: PartialEq, @@ -641,4 +649,16 @@ mod test { assert_eq!(Ok(b2), b1.try_map(|(_, v)| (v as u16).checked_mul(100_u16).ok_or("overflow"))); } + + // Just a test that structs containing `BoundedBTreeMap` can derive `Hash`. (This was broken + // when it was deriving `Hash`). + #[test] + #[cfg(feature = "std")] + fn container_can_derive_hash() { + #[derive(Hash)] + struct Foo { + bar: u8, + map: BoundedBTreeMap>, + } + } } diff --git a/bounded-collections/src/bounded_btree_set.rs b/bounded-collections/src/bounded_btree_set.rs index 654ae1b3b..3322cb720 100644 --- a/bounded-collections/src/bounded_btree_set.rs +++ b/bounded-collections/src/bounded_btree_set.rs @@ -29,7 +29,6 @@ use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// /// Unlike a standard `BTreeSet`, there is an enforced upper limit to the number of items in the /// set. All internal operations ensure this bound is respected. -#[cfg_attr(feature = "std", derive(Hash))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] pub struct BoundedBTreeSet(BTreeSet, PhantomData); @@ -176,6 +175,15 @@ where } } +// Custom implementation of `Hash` since deriving it would require all generic bounds to also +// implement it. +#[cfg(feature = "std")] +impl std::hash::Hash for BoundedBTreeSet { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + impl PartialEq> for BoundedBTreeSet where BTreeSet: PartialEq, @@ -502,4 +510,16 @@ mod test { let b2: Result>, _> = b1.iter().map(|k| k + 1).skip(2).try_collect(); assert!(b2.is_err()); } + + // Just a test that structs containing `BoundedBTreeSet` can derive `Hash`. (This was broken + // when it was deriving `Hash`). + #[test] + #[cfg(feature = "std")] + fn container_can_derive_hash() { + #[derive(Hash)] + struct Foo { + bar: u8, + set: BoundedBTreeSet>, + } + } } diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index 4bbe389f3..33a192fec 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -40,7 +40,7 @@ use serde::{ /// /// As the name suggests, the length of the queue is always bounded. All internal operations ensure /// this bound is respected. -#[cfg_attr(feature = "std", derive(Hash, Serialize), serde(transparent))] +#[cfg_attr(feature = "std", derive(Serialize), serde(transparent))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] pub struct BoundedVec(pub(super) Vec, #[cfg_attr(feature = "std", serde(skip_serializing))] PhantomData); @@ -108,7 +108,6 @@ where /// A bounded slice. /// /// Similar to a `BoundedVec`, but not owned and cannot be decoded. -#[cfg_attr(feature = "std", derive(Hash))] #[derive(Encode)] pub struct BoundedSlice<'a, T, S>(pub(super) &'a [T], PhantomData); @@ -273,6 +272,15 @@ impl<'a, T, S> Deref for BoundedSlice<'a, T, S> { } } +// Custom implementation of `Hash` since deriving it would require all generic bounds to also +// implement it. +#[cfg(feature = "std")] +impl<'a, T: std::hash::Hash, S> std::hash::Hash for BoundedSlice<'a, T, S> { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + impl<'a, T, S> core::iter::IntoIterator for BoundedSlice<'a, T, S> { type Item = &'a T; type IntoIter = core::slice::Iter<'a, T>; @@ -703,6 +711,15 @@ impl> TruncateFrom> for BoundedVec { } } +// Custom implementation of `Hash` since deriving it would require all generic bounds to also +// implement it. +#[cfg(feature = "std")] +impl std::hash::Hash for BoundedVec { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + // It is okay to give a non-mutable reference of the inner vec to anyone. impl AsRef> for BoundedVec { fn as_ref(&self) -> &Vec { @@ -907,7 +924,7 @@ where #[cfg(all(test, feature = "std"))] mod test { use super::*; - use crate::{bounded_vec, ConstU32}; + use crate::{bounded_vec, ConstU32, ConstU8}; use codec::CompactLen; #[test] @@ -1289,4 +1306,17 @@ mod test { assert_eq!(bound, &unbound[..]); assert!(bound == &unbound[..]); } + + // Just a test that structs containing `BoundedVec` and `BoundedSlice` can derive `Hash`. (This was broken when + // they were deriving `Hash`). + #[test] + #[cfg(feature = "std")] + fn container_can_derive_hash() { + #[derive(Hash)] + struct Foo<'a> { + bar: u8, + slice: BoundedSlice<'a, usize, ConstU8<8>>, + map: BoundedVec>, + } + } } From 108da26e07a7fcb9332f5bd5ca4f44c09694b0d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 16:15:02 +0100 Subject: [PATCH 309/359] build(deps): update sysinfo requirement from 0.27.5 to 0.28.1 (#724) Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Release notes](https://github.com/GuillaumeGomez/sysinfo/releases) - [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) --- updated-dependencies: - dependency-name: sysinfo dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 0e7839fd3..2ca9f7ff9 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -39,6 +39,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.11" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.27.5" +sysinfo = "0.28.1" ctrlc = "3.1.4" chrono = "0.4" From 27b17c0d5d32ebf384dd4b535d9045598904519d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 16:15:16 +0100 Subject: [PATCH 310/359] build(deps): bump Swatinem/rust-cache from 2.2.0 to 2.2.1 (#725) Bumps [Swatinem/rust-cache](https://github.com/Swatinem/rust-cache) from 2.2.0 to 2.2.1. - [Release notes](https://github.com/Swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/Swatinem/rust-cache/compare/359a70e43a0bb8a13953b04a90f76428b4959bb6...6fd3edff6979b79f87531400ad694fb7f2c84b1f) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1ffe117b6..f6c95932d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # v2.2.0 + uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # v2.2.0 + uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 - run: rustup target add wasm32-unknown-unknown @@ -130,7 +130,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@359a70e43a0bb8a13953b04a90f76428b4959bb6 # v2.2.0 + uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 - uses: actions-rs/cargo@v1 with: From 2fa111d0b3e4ecf03bb2cd4de6aaac23b0269280 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Apr 2023 16:07:50 +0200 Subject: [PATCH 311/359] build(deps): update hex-literal requirement from 0.3.1 to 0.4.1 (#739) Updates the requirements on [hex-literal](https://github.com/RustCrypto/utils) to permit the latest version. - [Release notes](https://github.com/RustCrypto/utils/releases) - [Commits](https://github.com/RustCrypto/utils/compare/hex-literal-v0.3.1...hex-literal-v0.4.1) --- updated-dependencies: - dependency-name: hex-literal dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ethbloom/Cargo.toml | 2 +- rlp/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 0b9044b45..56c0ef747 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -22,7 +22,7 @@ scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = [dev-dependencies] criterion = "0.4.0" rand = "0.8.0" -hex-literal = "0.3.1" +hex-literal = "0.4.1" [features] default = ["std", "rlp", "serialize", "rustc-hex"] diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index c61433ecd..a9079437c 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -15,7 +15,7 @@ rlp-derive = { version = "0.1", path = "../rlp-derive", optional = true } [dev-dependencies] criterion = "0.4.0" -hex-literal = "0.3.1" +hex-literal = "0.4.1" primitive-types = { path = "../primitive-types", version = "0.12", features = ["impl-rlp"] } [features] From dbfbbde208f2936f04f8e058a2105fc8a01597ed Mon Sep 17 00:00:00 2001 From: ordian Date: Fri, 21 Apr 2023 16:47:06 +0200 Subject: [PATCH 312/359] upgrade `rocksdb` to 0.20.1 (#743) * kvdb-rocksdb: update to 0.20.1 and fix test * update changelog * bump version --- kvdb-rocksdb/CHANGELOG.md | 3 +++ kvdb-rocksdb/Cargo.toml | 6 +++--- kvdb-rocksdb/src/lib.rs | 7 ++++--- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 32bbd3810..1a5d2e775 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.18.0] - 2023-04-21 +- Updated `rocksdb` to 0.20.1. [#743](https://github.com/paritytech/parity-common/pull/743) + ## [0.17.0] - 2022-11-29 - Removed `parity-util-mem` support. [#696](https://github.com/paritytech/parity-common/pull/696) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 2ca9f7ff9..a1c8d41b0 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.17.0" +version = "0.18.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -24,12 +24,12 @@ regex = "1.3.1" [target.'cfg(any(target_os = "openbsd", target_env = "msvc"))'.dependencies.rocksdb] default-features = false features = ["snappy"] -version = "0.19.0" +version = "0.20.1" [target.'cfg(not(any(target_os = "openbsd", target_env = "msvc")))'.dependencies.rocksdb] default-features = false features = ["snappy", "jemalloc"] -version = "0.19.0" +version = "0.20.1" [dev-dependencies] alloc_counter = "0.0.4" diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 5c231413d..aa55d7445 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -885,12 +885,13 @@ rocksdb.db.get.micros P50 : 2.000000 P95 : 3.000000 P99 : 4.000000 P100 : 5.0000 .tempdir() .expect("the OS can create tmp dirs"); let db = Database::open(&cfg, db_path.path()).expect("can open a db"); - let mut rocksdb_log = std::fs::File::open(format!("{}/LOG", db_path.path().to_str().unwrap())) - .expect("rocksdb creates a LOG file"); - let mut settings = String::new(); let statistics = db.get_statistics(); assert!(statistics.contains_key("block.cache.hit")); + drop(db); + let mut rocksdb_log = std::fs::File::open(format!("{}/LOG", db_path.path().to_str().unwrap())) + .expect("rocksdb creates a LOG file"); + let mut settings = String::new(); rocksdb_log.read_to_string(&mut settings).unwrap(); // Check column count assert!(settings.contains("Options for column family [default]"), "no default col"); From 910097e9e8c1c8aeb86a4da5e5b84ed890f0a2e0 Mon Sep 17 00:00:00 2001 From: Florian Hartwig Date: Sat, 22 Apr 2023 20:59:55 +0200 Subject: [PATCH 313/359] fixed-hash: Derive PartialEq and Eq instead of implementing them manually (#742) * derive PartialEq and Eq instead of implementing them manually * add unit test * cargo fmt --- fixed-hash/src/hash.rs | 10 +--------- fixed-hash/src/tests.rs | 9 +++++++++ 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 9dc356cdc..94ea66558 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -43,6 +43,7 @@ macro_rules! construct_fixed_hash { ( $(#[$attr:meta])* $visibility:vis struct $name:ident ( $n_bytes:expr ); ) => { #[repr(C)] $(#[$attr])* + #[derive(PartialEq, Eq)] $visibility struct $name (pub [u8; $n_bytes]); impl From<[u8; $n_bytes]> for $name { @@ -263,8 +264,6 @@ macro_rules! construct_fixed_hash { } } - impl $crate::core_::cmp::Eq for $name {} - impl $crate::core_::cmp::PartialOrd for $name { fn partial_cmp(&self, other: &Self) -> Option<$crate::core_::cmp::Ordering> { Some(self.cmp(other)) @@ -531,13 +530,6 @@ macro_rules! impl_rand_for_fixed_hash { #[doc(hidden)] macro_rules! impl_cmp_for_fixed_hash { ( $name:ident ) => { - impl $crate::core_::cmp::PartialEq for $name { - #[inline] - fn eq(&self, other: &Self) -> bool { - self.as_bytes() == other.as_bytes() - } - } - impl $crate::core_::cmp::Ord for $name { #[inline] fn cmp(&self, other: &Self) -> $crate::core_::cmp::Ordering { diff --git a/fixed-hash/src/tests.rs b/fixed-hash/src/tests.rs index 5a5f5d94d..3ba8a6508 100644 --- a/fixed-hash/src/tests.rs +++ b/fixed-hash/src/tests.rs @@ -349,6 +349,15 @@ fn display_and_debug() { test_for(0x1000, "0000000000001000", "0000…1000"); } +#[test] +fn const_matching_works() { + const ONES: H32 = H32::repeat_byte(1); + match H32::repeat_byte(0) { + ONES => unreachable!(), + _ => {}, + } +} + mod ops { use super::*; From 4bcc5b230a7465971c89177d8eab2c30dfcd0a24 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 28 Apr 2023 13:59:00 +0200 Subject: [PATCH 314/359] Make `Const*` types `Clone+Default` and fix `Debug` (#744) * Impl Default, Clone and Debug Signed-off-by: Oliver Tale-Yazdi * Add tests Signed-off-by: Oliver Tale-Yazdi * Bump to 0.1.6 Signed-off-by: Oliver Tale-Yazdi * std-guard 'format' test Signed-off-by: Oliver Tale-Yazdi --------- Signed-off-by: Oliver Tale-Yazdi --- bounded-collections/CHANGELOG.md | 4 +++ bounded-collections/Cargo.toml | 2 +- bounded-collections/src/lib.rs | 11 ++++++- bounded-collections/src/test.rs | 50 ++++++++++++++++++++++++++++++++ 4 files changed, 65 insertions(+), 2 deletions(-) create mode 100644 bounded-collections/src/test.rs diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index 8198520bf..42c43f84e 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,6 +4,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [0.1.6] - 2023-04-27 +- Added `Clone` and `Default` derive to the `impl_const_get!` macro and thereby all `Const*` types. +- Fixed `Debug` impl for `impl_const_get!` and all `Const*` types to also print the value and not just the type name. + ## [0.1.5] - 2023-02-13 - Fixed `Hash` impl (previously it could not be used in practice, because the size bound was required to also implement `Hash`). diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index e5ae07808..5f3b9a03f 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.1.5" +version = "0.1.6" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/bounded-collections/src/lib.rs b/bounded-collections/src/lib.rs index 401842c96..ee706da5f 100644 --- a/bounded-collections/src/lib.rs +++ b/bounded-collections/src/lib.rs @@ -18,6 +18,8 @@ pub mod bounded_btree_set; pub mod bounded_vec; pub mod weak_bounded_vec; +mod test; + pub use bounded_btree_map::BoundedBTreeMap; pub use bounded_btree_set::BoundedBTreeSet; pub use bounded_vec::{BoundedSlice, BoundedVec}; @@ -58,8 +60,15 @@ impl Get for GetDefault { macro_rules! impl_const_get { ($name:ident, $t:ty) => { /// Const getter for a basic type. - #[cfg_attr(feature = "std", derive(core::fmt::Debug))] + #[derive(Default, Clone)] pub struct $name; + + #[cfg(feature = "std")] + impl core::fmt::Debug for $name { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { + fmt.write_str(&format!("{}<{}>", stringify!($name), T)) + } + } #[cfg(not(feature = "std"))] impl core::fmt::Debug for $name { fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { diff --git a/bounded-collections/src/test.rs b/bounded-collections/src/test.rs new file mode 100644 index 000000000..285ad37e8 --- /dev/null +++ b/bounded-collections/src/test.rs @@ -0,0 +1,50 @@ +// Copyright 2023 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tests for the `bounded-collections` crate. + +#![cfg(test)] + +use crate::*; +use core::fmt::Debug; + +#[test] +#[allow(path_statements)] +fn const_impl_default_clone_debug() { + struct ImplsDefault(T); + + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; + ImplsDefault::>; +} + +#[test] +#[cfg(feature = "std")] +fn const_debug_fmt() { + assert_eq!(format!("{:?}", ConstBool:: {}), "ConstBool"); + assert_eq!(format!("{:?}", ConstBool:: {}), "ConstBool"); + assert_eq!(format!("{:?}", ConstU8::<255> {}), "ConstU8<255>"); + assert_eq!(format!("{:?}", ConstU16::<50> {}), "ConstU16<50>"); + assert_eq!(format!("{:?}", ConstU32::<10> {}), "ConstU32<10>"); + assert_eq!(format!("{:?}", ConstU64::<99> {}), "ConstU64<99>"); + assert_eq!(format!("{:?}", ConstU128::<100> {}), "ConstU128<100>"); + assert_eq!(format!("{:?}", ConstI8::<-127> {}), "ConstI8<-127>"); + assert_eq!(format!("{:?}", ConstI16::<-50> {}), "ConstI16<-50>"); + assert_eq!(format!("{:?}", ConstI32::<-10> {}), "ConstI32<-10>"); + assert_eq!(format!("{:?}", ConstI64::<-99> {}), "ConstI64<-99>"); + assert_eq!(format!("{:?}", ConstI128::<-100> {}), "ConstI128<-100>"); +} From 3401582151d86c986a8294f2b8a1ecd93f391f0c Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 8 May 2023 12:46:33 +0200 Subject: [PATCH 315/359] `serde` feature added (#745) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * serde feature added Support for serde derivations in no_std. Part of: https://github.com/paritytech/substrate/issues/12994 * fixes * CI tests for serde, CHANGELOG updated * version changed to 0.1.7 * fix for ci command * Update bounded-collections/Cargo.toml Co-authored-by: Bastian Köcher --------- Co-authored-by: Bastian Köcher --- .github/workflows/ci.yml | 6 ++++++ bounded-collections/CHANGELOG.md | 3 +++ bounded-collections/Cargo.toml | 7 +++---- bounded-collections/src/bounded_vec.rs | 10 +++++----- bounded-collections/src/weak_bounded_vec.rs | 10 +++++----- 5 files changed, 22 insertions(+), 14 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f6c95932d..571ae99cc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -104,6 +104,12 @@ jobs: command: test args: -p bounded-collections --no-default-features + - name: Test bounded-collections no_std,serde + uses: actions-rs/cargo@v1 + with: + command: test + args: -p bounded-collections --no-default-features --features=serde + - name: Test bounded-collections all-features uses: actions-rs/cargo@v1 with: diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index 42c43f84e..7e1c40c5b 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,6 +4,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [0.1.7] - 2023-05-05 +- Added `serde` feature, which can be enabled for no `std` deployments. + ## [0.1.6] - 2023-04-27 - Added `Clone` and `Default` derive to the `impl_const_get!` macro and thereby all `Const*` types. - Fixed `Debug` impl for `impl_const_get!` and all `Const*` types to also print the value and not just the type name. diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index 5f3b9a03f..98b9f4c3d 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.1.6" +version = "0.1.7" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -9,7 +9,7 @@ edition = "2021" rust-version = "1.60.0" [dependencies] -serde = { version = "1.0.101", default-features = false, optional = true } +serde = { version = "1.0.101", default-features = false, optional = true, features=["alloc", "derive"] } codec = { version = "3.3.0", default-features = false, features = ["max-encoded-len"], package = "parity-scale-codec" } scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false } log = { version = "0.4.17", default-features = false } @@ -23,6 +23,5 @@ std = [ "log/std", "codec/std", "scale-info/std", - "serde", - "serde/derive", + "serde/std", ] diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index 33a192fec..fd617ee7d 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -27,7 +27,7 @@ use core::{ ops::{Deref, Index, IndexMut, RangeBounds}, slice::SliceIndex, }; -#[cfg(feature = "std")] +#[cfg(feature = "serde")] use serde::{ de::{Error, SeqAccess, Visitor}, Deserialize, Deserializer, Serialize, @@ -40,10 +40,10 @@ use serde::{ /// /// As the name suggests, the length of the queue is always bounded. All internal operations ensure /// this bound is respected. -#[cfg_attr(feature = "std", derive(Serialize), serde(transparent))] +#[cfg_attr(feature = "serde", derive(Serialize), serde(transparent))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] -pub struct BoundedVec(pub(super) Vec, #[cfg_attr(feature = "std", serde(skip_serializing))] PhantomData); +pub struct BoundedVec(pub(super) Vec, #[cfg_attr(feature = "serde", serde(skip_serializing))] PhantomData); /// Create an object through truncation. pub trait TruncateFrom { @@ -51,7 +51,7 @@ pub trait TruncateFrom { fn truncate_from(unbound: T) -> Self; } -#[cfg(feature = "std")] +#[cfg(feature = "serde")] impl<'de, T, S: Get> Deserialize<'de> for BoundedVec where T: Deserialize<'de>, @@ -68,7 +68,7 @@ where { type Value = Vec; - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + fn expecting(&self, formatter: &mut alloc::fmt::Formatter) -> alloc::fmt::Result { formatter.write_str("a sequence") } diff --git a/bounded-collections/src/weak_bounded_vec.rs b/bounded-collections/src/weak_bounded_vec.rs index cb711d760..de5fa7128 100644 --- a/bounded-collections/src/weak_bounded_vec.rs +++ b/bounded-collections/src/weak_bounded_vec.rs @@ -27,7 +27,7 @@ use core::{ ops::{Deref, Index, IndexMut}, slice::SliceIndex, }; -#[cfg(feature = "std")] +#[cfg(feature = "serde")] use serde::{ de::{Error, SeqAccess, Visitor}, Deserialize, Deserializer, Serialize, @@ -40,15 +40,15 @@ use serde::{ /// /// The length of the vec is not strictly bounded. Decoding a vec with more element that the bound /// is accepted, and some method allow to bypass the restriction with warnings. -#[cfg_attr(feature = "std", derive(Serialize), serde(transparent))] +#[cfg_attr(feature = "serde", derive(Serialize), serde(transparent))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] pub struct WeakBoundedVec( pub(super) Vec, - #[cfg_attr(feature = "std", serde(skip_serializing))] PhantomData, + #[cfg_attr(feature = "serde", serde(skip_serializing))] PhantomData, ); -#[cfg(feature = "std")] +#[cfg(feature = "serde")] impl<'de, T, S: Get> Deserialize<'de> for WeakBoundedVec where T: Deserialize<'de>, @@ -65,7 +65,7 @@ where { type Value = Vec; - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + fn expecting(&self, formatter: &mut alloc::fmt::Formatter) -> alloc::fmt::Result { formatter.write_str("a sequence") } From bd9d3e0bafd575078cacc9e4f5b8f93477ccbdae Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 8 May 2023 22:54:59 +0200 Subject: [PATCH 316/359] Remove-manual-typeinfo (#748) --- bounded-collections/src/bounded_vec.rs | 34 ++------------------------ 1 file changed, 2 insertions(+), 32 deletions(-) diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index fd617ee7d..1f28b73fc 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -20,7 +20,7 @@ use super::WeakBoundedVec; use crate::{Get, TryCollect}; -use alloc::{boxed::Box, vec::Vec}; +use alloc::vec::Vec; use codec::{decode_vec_with_len, Compact, Decode, Encode, EncodeLike, MaxEncodedLen}; use core::{ marker::PhantomData, @@ -108,39 +108,9 @@ where /// A bounded slice. /// /// Similar to a `BoundedVec`, but not owned and cannot be decoded. -#[derive(Encode)] +#[derive(Encode, scale_info::TypeInfo)] pub struct BoundedSlice<'a, T, S>(pub(super) &'a [T], PhantomData); -// This can be replaced with -// #[derive(scale_info::TypeInfo)] -// #[scale_info(skip_type_params(S))] -// again once this issue is fixed in the rust compiler: https://github.com/rust-lang/rust/issues/96956 -// Tracking issues: https://github.com/paritytech/substrate/issues/11915 -impl<'a, T, S> scale_info::TypeInfo for BoundedSlice<'a, T, S> -where - &'a [T]: scale_info::TypeInfo + 'static, - PhantomData: scale_info::TypeInfo + 'static, - T: scale_info::TypeInfo + 'static, - S: 'static, -{ - type Identity = Self; - - fn type_info() -> ::scale_info::Type { - scale_info::Type::builder() - .path(scale_info::Path::new("BoundedSlice", "sp_runtime::bounded::bounded_vec")) - .type_params(<[_]>::into_vec(Box::new([ - scale_info::TypeParameter::new("T", core::option::Option::Some(::scale_info::meta_type::())), - scale_info::TypeParameter::new("S", ::core::option::Option::None), - ]))) - .docs(&["A bounded slice.", "", "Similar to a `BoundedVec`, but not owned and cannot be decoded."]) - .composite( - scale_info::build::Fields::unnamed() - .field(|f| f.ty::<&'static [T]>().type_name("&'static[T]").docs(&[])) - .field(|f| f.ty::>().type_name("PhantomData").docs(&[])), - ) - } -} - // `BoundedSlice`s encode to something which will always decode into a `BoundedVec`, // `WeakBoundedVec`, or a `Vec`. impl<'a, T: Encode + Decode, S: Get> EncodeLike> for BoundedSlice<'a, T, S> {} From 9fa489cc90de47f907352015ad861c88b189903c Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 10 May 2023 10:22:42 +0300 Subject: [PATCH 317/359] Update rockdb to 0.21 (#750) * Update rockdb to 0.21 * Fix a compilation error * Bump version and update changelog --- kvdb-rocksdb/CHANGELOG.md | 3 +++ kvdb-rocksdb/Cargo.toml | 6 +++--- kvdb-rocksdb/src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/kvdb-rocksdb/CHANGELOG.md b/kvdb-rocksdb/CHANGELOG.md index 1a5d2e775..d3426aec6 100644 --- a/kvdb-rocksdb/CHANGELOG.md +++ b/kvdb-rocksdb/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.19.0] - 2023-05-10 +- Updated `rocksdb` to 0.21. [#750](https://github.com/paritytech/parity-common/pull/750) + ## [0.18.0] - 2023-04-21 - Updated `rocksdb` to 0.20.1. [#743](https://github.com/paritytech/parity-common/pull/743) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index a1c8d41b0..bd61de957 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "kvdb-rocksdb" -version = "0.18.0" +version = "0.19.0" authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" description = "kvdb implementation backed by RocksDB" @@ -24,12 +24,12 @@ regex = "1.3.1" [target.'cfg(any(target_os = "openbsd", target_env = "msvc"))'.dependencies.rocksdb] default-features = false features = ["snappy"] -version = "0.20.1" +version = "0.21" [target.'cfg(not(any(target_os = "openbsd", target_env = "msvc")))'.dependencies.rocksdb] default-features = false features = ["snappy", "jemalloc"] -version = "0.20.1" +version = "0.21" [dev-dependencies] alloc_counter = "0.0.4" diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index aa55d7445..0af25f6b9 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -314,7 +314,7 @@ fn generate_block_based_options(config: &DatabaseConfig) -> io::Result Date: Wed, 10 May 2023 10:11:43 +0200 Subject: [PATCH 318/359] build(deps): update sysinfo requirement from 0.28.1 to 0.29.0 (#747) Updates the requirements on [sysinfo](https://github.com/GuillaumeGomez/sysinfo) to permit the latest version. - [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md) - [Commits](https://github.com/GuillaumeGomez/sysinfo/commits) --- updated-dependencies: - dependency-name: sysinfo dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- kvdb-rocksdb/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index bd61de957..e5180f5bd 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -39,6 +39,6 @@ kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.11" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.28.1" +sysinfo = "0.29.0" ctrlc = "3.1.4" chrono = "0.4" From a5ef7308d6986e62431e35d3156fed0a7a585d39 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Mon, 12 Jun 2023 06:40:21 +0100 Subject: [PATCH 319/359] Fallible element pushers should return unused element (#758) * Fallible element pushers should return unused element * 0.1.7 release prep * Fixes --------- Co-authored-by: Keith Yeung --- bounded-collections/CHANGELOG.md | 4 +++ bounded-collections/Cargo.toml | 2 +- bounded-collections/src/bounded_vec.rs | 44 ++++++++++++++++---------- 3 files changed, 32 insertions(+), 18 deletions(-) diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index 7e1c40c5b..4ab3266d4 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,6 +4,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [0.1.8] - 2023-06-11 +- Altered return types of `BoundedVec::force_insert_keep_` functions to return the element in case of error. +- Added `new` and `clear` to `BoundedVec`. + ## [0.1.7] - 2023-05-05 - Added `serde` feature, which can be enabled for no `std` deployments. diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index 98b9f4c3d..17a9ecb47 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.1.7" +version = "0.1.8" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index 1f28b73fc..2f87d3de2 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -288,11 +288,21 @@ impl> Decode for BoundedVec { impl> EncodeLike> for BoundedVec {} impl BoundedVec { + /// Create `Self` with no items. + pub fn new() -> Self { + Self(Vec::new(), Default::default()) + } + /// Create `Self` from `t` without any checks. fn unchecked_from(t: Vec) -> Self { Self(t, Default::default()) } + /// Exactly the same semantics as `Vec::clear`. + pub fn clear(&mut self) { + self.0.clear() + } + /// Consume self, and return the inner `Vec`. Henceforth, the `Vec<_>` can be altered in an /// arbitrary way. At some point, if the reverse conversion is required, `TryFrom>` can /// be used. @@ -439,19 +449,19 @@ impl> BoundedVec { /// If `Self::bound() < index` or `self.len() < index`, then this is also a no-op. /// /// Returns `Ok(maybe_removed)` if the item was inserted, where `maybe_removed` is - /// `Some(removed)` if an item was removed to make room for the new one. Returns `Err(())` if - /// `element` cannot be inserted. - pub fn force_insert_keep_right(&mut self, index: usize, mut element: T) -> Result, ()> { + /// `Some(removed)` if an item was removed to make room for the new one. Returns `Err(element)` + /// if `element` cannot be inserted. + pub fn force_insert_keep_right(&mut self, index: usize, mut element: T) -> Result, T> { // Check against panics. if Self::bound() < index || self.len() < index { - Err(()) + Err(element) } else if self.len() < Self::bound() { // Cannot panic since self.len() >= index; self.0.insert(index, element); Ok(None) } else { if index == 0 { - return Err(()) + return Err(element) } core::mem::swap(&mut self[0], &mut element); // `[0..index] cannot panic since self.len() >= index. @@ -469,16 +479,16 @@ impl> BoundedVec { /// If `Self::bound() < index` or `self.len() < index`, then this is also a no-op. /// /// Returns `Ok(maybe_removed)` if the item was inserted, where `maybe_removed` is - /// `Some(removed)` if an item was removed to make room for the new one. Returns `Err(())` if - /// `element` cannot be inserted. - pub fn force_insert_keep_left(&mut self, index: usize, element: T) -> Result, ()> { + /// `Some(removed)` if an item was removed to make room for the new one. Returns `Err(element)` + /// if `element` cannot be inserted. + pub fn force_insert_keep_left(&mut self, index: usize, element: T) -> Result, T> { // Check against panics. if Self::bound() < index || self.len() < index || Self::bound() == 0 { - return Err(()) + return Err(element) } // Noop condition. if Self::bound() == index && self.len() <= Self::bound() { - return Err(()) + return Err(element) } let maybe_removed = if self.is_full() { // defensive-only: since we are at capacity, this is a noop. @@ -957,7 +967,7 @@ mod test { #[test] fn force_insert_keep_left_works() { let mut b: BoundedVec> = bounded_vec![]; - assert_eq!(b.force_insert_keep_left(1, 10), Err(())); + assert_eq!(b.force_insert_keep_left(1, 10), Err(10)); assert!(b.is_empty()); assert_eq!(b.force_insert_keep_left(0, 30), Ok(None)); @@ -966,7 +976,7 @@ mod test { assert_eq!(b.force_insert_keep_left(3, 40), Ok(None)); assert_eq!(*b, vec![10, 20, 30, 40]); // at capacity. - assert_eq!(b.force_insert_keep_left(4, 41), Err(())); + assert_eq!(b.force_insert_keep_left(4, 41), Err(41)); assert_eq!(*b, vec![10, 20, 30, 40]); assert_eq!(b.force_insert_keep_left(3, 31), Ok(Some(40))); assert_eq!(*b, vec![10, 20, 30, 31]); @@ -977,14 +987,14 @@ mod test { let mut z: BoundedVec> = bounded_vec![]; assert!(z.is_empty()); - assert_eq!(z.force_insert_keep_left(0, 10), Err(())); + assert_eq!(z.force_insert_keep_left(0, 10), Err(10)); assert!(z.is_empty()); } #[test] fn force_insert_keep_right_works() { let mut b: BoundedVec> = bounded_vec![]; - assert_eq!(b.force_insert_keep_right(1, 10), Err(())); + assert_eq!(b.force_insert_keep_right(1, 10), Err(10)); assert!(b.is_empty()); assert_eq!(b.force_insert_keep_right(0, 30), Ok(None)); @@ -994,7 +1004,7 @@ mod test { assert_eq!(*b, vec![10, 20, 30, 40]); // at capacity. - assert_eq!(b.force_insert_keep_right(0, 0), Err(())); + assert_eq!(b.force_insert_keep_right(0, 0), Err(0)); assert_eq!(*b, vec![10, 20, 30, 40]); assert_eq!(b.force_insert_keep_right(1, 11), Ok(Some(10))); assert_eq!(*b, vec![11, 20, 30, 40]); @@ -1003,12 +1013,12 @@ mod test { assert_eq!(b.force_insert_keep_right(4, 41), Ok(Some(20))); assert_eq!(*b, vec![30, 31, 40, 41]); - assert_eq!(b.force_insert_keep_right(5, 69), Err(())); + assert_eq!(b.force_insert_keep_right(5, 69), Err(69)); assert_eq!(*b, vec![30, 31, 40, 41]); let mut z: BoundedVec> = bounded_vec![]; assert!(z.is_empty()); - assert_eq!(z.force_insert_keep_right(0, 10), Err(())); + assert_eq!(z.force_insert_keep_right(0, 10), Err(10)); assert!(z.is_empty()); } From db9940dc944eb1bf4fcff7d74bb700d37451dfb4 Mon Sep 17 00:00:00 2001 From: Jason Carver Date: Mon, 10 Jul 2023 06:26:57 -0700 Subject: [PATCH 320/359] doc: unsigned int byte() order (#732) --- uint/src/uint.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uint/src/uint.rs b/uint/src/uint.rs index 2914f7223..bc4b7416c 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -732,7 +732,7 @@ macro_rules! construct_uint { r } - /// Return specific byte. + /// Return specific byte. Byte 0 is the least significant value (ie~ little endian). /// /// # Panics /// From dce4bbd209e0d4ac55447b542fd329796ac7e2bb Mon Sep 17 00:00:00 2001 From: Jonathan Underwood Date: Wed, 12 Jul 2023 18:42:37 +0900 Subject: [PATCH 321/359] Feature: Make U256::to_f64_lossy more accurate (#726) --- primitive-types/src/fp_conversion.rs | 35 +++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/primitive-types/src/fp_conversion.rs b/primitive-types/src/fp_conversion.rs index cfc51d279..24f29905d 100644 --- a/primitive-types/src/fp_conversion.rs +++ b/primitive-types/src/fp_conversion.rs @@ -33,11 +33,34 @@ impl U256 { /// Lossy conversion of `U256` to `f64`. pub fn to_f64_lossy(self) -> f64 { - let (res, factor) = match self { - U256([_, _, 0, 0]) => (self, 1.0), - U256([_, _, _, 0]) => (self >> 64, 2.0f64.powi(64)), - U256([_, _, _, _]) => (self >> 128, 2.0f64.powi(128)), - }; - (res.low_u128() as f64) * factor + // Reference: https://blog.m-ou.se/floats/ + // Step 1: Get leading zeroes + let leading_zeroes = self.leading_zeros(); + // Step 2: Get msb to be farthest left bit + let left_aligned = self << leading_zeroes; + // Step 3: Shift msb to fit in lower 53 bits of the first u64 (64-53=11) + let quarter_aligned = left_aligned >> 11; + let mantissa = quarter_aligned.0[3]; + // Step 4: For the dropped bits (all bits beyond the 53 most significant + // We want to know only 2 things. If the msb of the dropped bits is 1 or 0, + // and if any of the other bits are 1. (See blog for explanation) + // So we take care to preserve the msb bit, while jumbling the rest of the bits + // together so that any 1s will survive. If all 0s, then the result will also be 0. + let dropped_bits = quarter_aligned.0[1] | quarter_aligned.0[0] | (left_aligned.0[0] & 0xFFFF_FFFF); + let dropped_bits = (dropped_bits & 0x7FFF_FFFF_FFFF_FFFF) | (dropped_bits >> 63); + let dropped_bits = quarter_aligned.0[2] | dropped_bits; + // Step 5: dropped_bits contains the msb of the original bits and an OR-mixed 63 bits. + // If msb of dropped bits is 0, it is mantissa + 0 + // If msb of dropped bits is 1, it is mantissa + 0 only if mantissa lowest bit is 0 + // and other bits of the dropped bits are all 0 (which both can be tested with the below all at once) + let mantissa = mantissa + ((dropped_bits - (dropped_bits >> 63 & !mantissa)) >> 63); + // Step 6: Calculate the exponent + // If self is 0, exponent should be 0 (special meaning) and mantissa will end up 0 too + // Otherwise, (255 - n) + 1022 so it simplifies to 1277 - n + // 1023 and 1022 are the cutoffs for the exponent having the msb next to the decimal point + let exponent = if self.is_zero() { 0 } else { 1277 - leading_zeroes as u64 }; + // Step 7: sign bit is always 0, exponent is shifted into place + // Use addition instead of bitwise OR to saturate the exponent if mantissa overflows + f64::from_bits((exponent << 52) + mantissa) } } From 0f96b34651d1b26002cac223e038088a962fd6d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jul 2023 11:42:52 +0200 Subject: [PATCH 322/359] build(deps): update criterion requirement from 0.4.0 to 0.5.1 (#757) Updates the requirements on [criterion](https://github.com/bheisler/criterion.rs) to permit the latest version. - [Changelog](https://github.com/bheisler/criterion.rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/bheisler/criterion.rs/compare/0.4.0...0.5.1) --- updated-dependencies: - dependency-name: criterion dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ethbloom/Cargo.toml | 2 +- fixed-hash/Cargo.toml | 2 +- keccak-hash/Cargo.toml | 2 +- kvdb-rocksdb/Cargo.toml | 2 +- rlp/Cargo.toml | 2 +- uint/Cargo.toml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 56c0ef747..fdc8cf8dc 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -20,7 +20,7 @@ impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", defau scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } [dev-dependencies] -criterion = "0.4.0" +criterion = "0.5.1" rand = "0.8.0" hex-literal = "0.4.1" diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index f878d513a..337dad706 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -24,7 +24,7 @@ arbitrary = { version = "1.0", optional = true } [dev-dependencies] rand_xorshift = "0.3.0" -criterion = "0.4.0" +criterion = "0.5.1" rand = { version = "0.8.0", default-features = false, features = ["std_rng"] } [features] diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index d5b5797a8..0ea24892d 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -15,7 +15,7 @@ primitive-types = { path = "../primitive-types", version = "0.12", default-featu [dev-dependencies] tempfile = "3.1.0" -criterion = "0.4.0" +criterion = "0.5.1" [features] default = ["std"] diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index e5180f5bd..257f45c9c 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -33,7 +33,7 @@ version = "0.21" [dev-dependencies] alloc_counter = "0.0.4" -criterion = "0.4" +criterion = "0.5" ethereum-types = { path = "../ethereum-types" } kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.11" } rand = "0.8.0" diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index a9079437c..55d7eaf4b 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -14,7 +14,7 @@ rustc-hex = { version = "2.0.1", default-features = false } rlp-derive = { version = "0.1", path = "../rlp-derive", optional = true } [dev-dependencies] -criterion = "0.4.0" +criterion = "0.5.1" hex-literal = "0.4.1" primitive-types = { path = "../primitive-types", version = "0.12", features = ["impl-rlp"] } diff --git a/uint/Cargo.toml b/uint/Cargo.toml index b1d2e8752..36f3de1b2 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -30,7 +30,7 @@ name = "uint_tests" required-features = ["std"] [dev-dependencies] -criterion = "0.4.0" +criterion = "0.5.1" num-bigint = "0.4.0" [target.'cfg(all(unix, target_arch = "x86_64"))'.dev-dependencies] From b0c12c2490e2f686c9ee0bbabad811a2c747be47 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jul 2023 11:43:05 +0200 Subject: [PATCH 323/359] build(deps): bump Swatinem/rust-cache from 2.2.1 to 2.5.1 (#763) Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.2.1 to 2.5.1. - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/6fd3edff6979b79f87531400ad694fb7f2c84b1f...dd05243424bd5c0e585e4b55eb2d7615cdd32f1f) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 571ae99cc..3a57a6068 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + uses: Swatinem/rust-cache@dd05243424bd5c0e585e4b55eb2d7615cdd32f1f # v2.5.1 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + uses: Swatinem/rust-cache@dd05243424bd5c0e585e4b55eb2d7615cdd32f1f # v2.5.1 - run: rustup target add wasm32-unknown-unknown @@ -136,7 +136,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + uses: Swatinem/rust-cache@dd05243424bd5c0e585e4b55eb2d7615cdd32f1f # v2.5.1 - uses: actions-rs/cargo@v1 with: From 7194def73feb7d97644303f1a6ddbab29bbb799f Mon Sep 17 00:00:00 2001 From: achillelamb <58937437+achillelamb@users.noreply.github.com> Date: Wed, 12 Jul 2023 11:43:18 +0200 Subject: [PATCH 324/359] fix(rlp): `len()` changes as `RlpIterator` is consumed (#766) Fixes #761 --- rlp/src/rlpin.rs | 2 +- rlp/tests/tests.rs | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/rlp/src/rlpin.rs b/rlp/src/rlpin.rs index 53b8731fc..395808fa8 100644 --- a/rlp/src/rlpin.rs +++ b/rlp/src/rlpin.rs @@ -367,7 +367,7 @@ impl<'a, 'view> Iterator for RlpIterator<'a, 'view> { impl<'a, 'view> ExactSizeIterator for RlpIterator<'a, 'view> { fn len(&self) -> usize { - self.rlp.item_count().unwrap_or(0) + self.rlp.item_count().unwrap_or(0).saturating_sub(self.index) } } diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index a5eface28..203397393 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -105,14 +105,20 @@ fn rlp_iter() { let rlp = Rlp::new(&data); let mut iter = rlp.iter(); + assert_eq!(iter.len(), 2); + let cat = iter.next().unwrap(); assert!(cat.is_data()); assert_eq!(cat.as_raw(), &[0x83, b'c', b'a', b't']); + assert_eq!(iter.len(), 1); + let dog = iter.next().unwrap(); assert!(dog.is_data()); assert_eq!(dog.as_raw(), &[0x83, b'd', b'o', b'g']); + assert_eq!(iter.len(), 0); + let none = iter.next(); assert!(none.is_none()); From d3a9327124a66e52ca1114bb8640c02c18c134b8 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 12 Jul 2023 13:34:58 +0300 Subject: [PATCH 325/359] reuse Copy trait in hash::clone() (#767) --- fixed-hash/src/hash.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 94ea66558..20f3864ef 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -258,9 +258,7 @@ macro_rules! construct_fixed_hash { #[cfg_attr(feature = "dev", allow(expl_impl_clone_on_copy))] impl $crate::core_::clone::Clone for $name { fn clone(&self) -> $name { - let mut ret = $name::zero(); - ret.0.copy_from_slice(&self.0); - ret + *self } } From f9cfc79fdd25b0bb0b96b7265055e82f932f299c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Aug 2023 14:29:32 +1000 Subject: [PATCH 326/359] build(deps): bump Swatinem/rust-cache from 2.5.1 to 2.6.2 (#778) Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.5.1 to 2.6.2. - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/dd05243424bd5c0e585e4b55eb2d7615cdd32f1f...e207df5d269b42b69c8bc5101da26f7d31feddb4) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3a57a6068..81de60cc7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@dd05243424bd5c0e585e4b55eb2d7615cdd32f1f # v2.5.1 + uses: Swatinem/rust-cache@e207df5d269b42b69c8bc5101da26f7d31feddb4 # v2.6.2 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@dd05243424bd5c0e585e4b55eb2d7615cdd32f1f # v2.5.1 + uses: Swatinem/rust-cache@e207df5d269b42b69c8bc5101da26f7d31feddb4 # v2.6.2 - run: rustup target add wasm32-unknown-unknown @@ -136,7 +136,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@dd05243424bd5c0e585e4b55eb2d7615cdd32f1f # v2.5.1 + uses: Swatinem/rust-cache@e207df5d269b42b69c8bc5101da26f7d31feddb4 # v2.6.2 - uses: actions-rs/cargo@v1 with: From a8a85a4080d2906564222278d1c694c1edfb7c81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=96zg=C3=BCn=20=C3=96zerk?= Date: Thu, 31 Aug 2023 17:03:05 +0300 Subject: [PATCH 327/359] Deserialize boundedbtreeset (#781) * deserialize for btreeset * tests added * run tests under std for bounded-btree-set * insert after bound check, new tests for bound check --- bounded-collections/src/bounded_btree_set.rs | 112 ++++++++++++++++++- bounded-collections/src/bounded_vec.rs | 17 ++- 2 files changed, 123 insertions(+), 6 deletions(-) diff --git a/bounded-collections/src/bounded_btree_set.rs b/bounded-collections/src/bounded_btree_set.rs index 3322cb720..c4eb255e6 100644 --- a/bounded-collections/src/bounded_btree_set.rs +++ b/bounded-collections/src/bounded_btree_set.rs @@ -21,6 +21,11 @@ use crate::{Get, TryCollect}; use alloc::collections::BTreeSet; use codec::{Compact, Decode, Encode, MaxEncodedLen}; use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; +#[cfg(feature = "serde")] +use serde::{ + de::{Error, SeqAccess, Visitor}, + Deserialize, Deserializer, Serialize, +}; /// A bounded set based on a B-Tree. /// @@ -29,9 +34,67 @@ use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// /// Unlike a standard `BTreeSet`, there is an enforced upper limit to the number of items in the /// set. All internal operations ensure this bound is respected. +#[cfg_attr(feature = "serde", derive(Serialize), serde(transparent))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] -pub struct BoundedBTreeSet(BTreeSet, PhantomData); +pub struct BoundedBTreeSet(BTreeSet, #[cfg_attr(feature = "serde", serde(skip_serializing))] PhantomData); + +#[cfg(feature = "serde")] +impl<'de, T, S: Get> Deserialize<'de> for BoundedBTreeSet +where + T: Ord + Deserialize<'de>, + S: Clone, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // Create a visitor to visit each element in the sequence + struct BTreeSetVisitor(std::marker::PhantomData<(T, S)>); + + impl<'de, T, S> Visitor<'de> for BTreeSetVisitor + where + T: Ord + Deserialize<'de>, + S: Get + Clone, + { + type Value = BTreeSet; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a sequence") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let size = seq.size_hint().unwrap_or(0); + let max = match usize::try_from(S::get()) { + Ok(n) => n, + Err(_) => return Err(A::Error::custom("can't convert to usize")), + }; + if size > max { + Err(A::Error::custom("out of bounds")) + } else { + let mut values = BTreeSet::new(); + + while let Some(value) = seq.next_element()? { + if values.len() >= max { + return Err(A::Error::custom("out of bounds")) + } + values.insert(value); + } + + Ok(values) + } + } + } + + let visitor: BTreeSetVisitor = BTreeSetVisitor(PhantomData); + deserializer + .deserialize_seq(visitor) + .map(|v| BoundedBTreeSet::::try_from(v).map_err(|_| Error::custom("out of bounds")))? + } +} impl Decode for BoundedBTreeSet where @@ -333,7 +396,7 @@ where } } -#[cfg(test)] +#[cfg(all(test, feature = "std"))] mod test { use super::*; use crate::ConstU32; @@ -522,4 +585,49 @@ mod test { set: BoundedBTreeSet>, } } + + #[test] + fn test_serializer() { + let mut c = BoundedBTreeSet::>::new(); + c.try_insert(0).unwrap(); + c.try_insert(1).unwrap(); + c.try_insert(2).unwrap(); + + assert_eq!(serde_json::json!(&c).to_string(), r#"[0,1,2]"#); + } + + #[test] + fn test_deserializer() { + let c: Result>, serde_json::error::Error> = serde_json::from_str(r#"[0,1,2]"#); + assert!(c.is_ok()); + let c = c.unwrap(); + + assert_eq!(c.len(), 3); + assert!(c.contains(&0)); + assert!(c.contains(&1)); + assert!(c.contains(&2)); + } + + #[test] + fn test_deserializer_bound() { + let c: Result>, serde_json::error::Error> = serde_json::from_str(r#"[0,1,2]"#); + assert!(c.is_ok()); + let c = c.unwrap(); + + assert_eq!(c.len(), 3); + assert!(c.contains(&0)); + assert!(c.contains(&1)); + assert!(c.contains(&2)); + } + + #[test] + fn test_deserializer_failed() { + let c: Result>, serde_json::error::Error> = + serde_json::from_str(r#"[0,1,2,3,4]"#); + + match c { + Err(msg) => assert_eq!(msg.to_string(), "out of bounds at line 1 column 11"), + _ => unreachable!("deserializer must raise error"), + } + } } diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index 2f87d3de2..cd26afb1d 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -87,10 +87,10 @@ where let mut values = Vec::with_capacity(size); while let Some(value) = seq.next_element()? { - values.push(value); - if values.len() > max { + if values.len() >= max { return Err(A::Error::custom("out of bounds")) } + values.push(value); } Ok(values) @@ -1187,10 +1187,19 @@ mod test { assert_eq!(c[2], 2); } + #[test] + fn test_deserializer_bound() { + let c: BoundedVec> = serde_json::from_str(r#"[0,1,2]"#).unwrap(); + + assert_eq!(c.len(), 3); + assert_eq!(c[0], 0); + assert_eq!(c[1], 1); + assert_eq!(c[2], 2); + } + #[test] fn test_deserializer_failed() { - let c: Result>, serde_json::error::Error> = - serde_json::from_str(r#"[0,1,2,3,4,5]"#); + let c: Result>, serde_json::error::Error> = serde_json::from_str(r#"[0,1,2,3,4]"#); match c { Err(msg) => assert_eq!(msg.to_string(), "out of bounds at line 1 column 11"), From 7b41a0432bec5ae32326a1a96aa76f9674d8fc62 Mon Sep 17 00:00:00 2001 From: ordian Date: Sun, 17 Sep 2023 21:44:59 +0200 Subject: [PATCH 328/359] bounded-collections: fix build for no_std + serde (#789) * bounded-collections: fix build for no_std + serde * put serde test under feature * try something * try something else * try without --- .github/workflows/ci.yml | 3 +- bounded-collections/src/bounded_btree_set.rs | 86 +++++++++++--------- 2 files changed, 49 insertions(+), 40 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 81de60cc7..57da5eff6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -46,6 +46,7 @@ jobs: uses: Swatinem/rust-cache@e207df5d269b42b69c8bc5101da26f7d31feddb4 # v2.6.2 - run: rustup target add wasm32-unknown-unknown + - run: rustup target add mips64-unknown-linux-muslabi64 - name: Test no-default-features uses: actions-rs/cargo@v1 @@ -122,7 +123,7 @@ jobs: with: use-cross: true command: test - args: -p uint --target=mips64-unknown-linux-gnuabi64 + args: -p uint --target=mips64-unknown-linux-muslabi64 test_windows: name: Test Windows diff --git a/bounded-collections/src/bounded_btree_set.rs b/bounded-collections/src/bounded_btree_set.rs index c4eb255e6..c966bce8a 100644 --- a/bounded-collections/src/bounded_btree_set.rs +++ b/bounded-collections/src/bounded_btree_set.rs @@ -50,7 +50,7 @@ where D: Deserializer<'de>, { // Create a visitor to visit each element in the sequence - struct BTreeSetVisitor(std::marker::PhantomData<(T, S)>); + struct BTreeSetVisitor(PhantomData<(T, S)>); impl<'de, T, S> Visitor<'de> for BTreeSetVisitor where @@ -59,7 +59,7 @@ where { type Value = BTreeSet; - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + fn expecting(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result { formatter.write_str("a sequence") } @@ -396,7 +396,7 @@ where } } -#[cfg(all(test, feature = "std"))] +#[cfg(test)] mod test { use super::*; use crate::ConstU32; @@ -586,48 +586,56 @@ mod test { } } - #[test] - fn test_serializer() { - let mut c = BoundedBTreeSet::>::new(); - c.try_insert(0).unwrap(); - c.try_insert(1).unwrap(); - c.try_insert(2).unwrap(); - - assert_eq!(serde_json::json!(&c).to_string(), r#"[0,1,2]"#); - } + #[cfg(feature = "serde")] + mod serde { + use super::*; + use crate::alloc::string::ToString as _; - #[test] - fn test_deserializer() { - let c: Result>, serde_json::error::Error> = serde_json::from_str(r#"[0,1,2]"#); - assert!(c.is_ok()); - let c = c.unwrap(); + #[test] + fn test_serializer() { + let mut c = BoundedBTreeSet::>::new(); + c.try_insert(0).unwrap(); + c.try_insert(1).unwrap(); + c.try_insert(2).unwrap(); - assert_eq!(c.len(), 3); - assert!(c.contains(&0)); - assert!(c.contains(&1)); - assert!(c.contains(&2)); - } + assert_eq!(serde_json::json!(&c).to_string(), r#"[0,1,2]"#); + } - #[test] - fn test_deserializer_bound() { - let c: Result>, serde_json::error::Error> = serde_json::from_str(r#"[0,1,2]"#); - assert!(c.is_ok()); - let c = c.unwrap(); + #[test] + fn test_deserializer() { + let c: Result>, serde_json::error::Error> = + serde_json::from_str(r#"[0,1,2]"#); + assert!(c.is_ok()); + let c = c.unwrap(); + + assert_eq!(c.len(), 3); + assert!(c.contains(&0)); + assert!(c.contains(&1)); + assert!(c.contains(&2)); + } - assert_eq!(c.len(), 3); - assert!(c.contains(&0)); - assert!(c.contains(&1)); - assert!(c.contains(&2)); - } + #[test] + fn test_deserializer_bound() { + let c: Result>, serde_json::error::Error> = + serde_json::from_str(r#"[0,1,2]"#); + assert!(c.is_ok()); + let c = c.unwrap(); + + assert_eq!(c.len(), 3); + assert!(c.contains(&0)); + assert!(c.contains(&1)); + assert!(c.contains(&2)); + } - #[test] - fn test_deserializer_failed() { - let c: Result>, serde_json::error::Error> = - serde_json::from_str(r#"[0,1,2,3,4]"#); + #[test] + fn test_deserializer_failed() { + let c: Result>, serde_json::error::Error> = + serde_json::from_str(r#"[0,1,2,3,4]"#); - match c { - Err(msg) => assert_eq!(msg.to_string(), "out of bounds at line 1 column 11"), - _ => unreachable!("deserializer must raise error"), + match c { + Err(msg) => assert_eq!(msg.to_string(), "out of bounds at line 1 column 11"), + _ => unreachable!("deserializer must raise error"), + } } } } From 6ba56691d1b8efabd9bed115d146372c7843476c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Sep 2023 22:04:05 +0200 Subject: [PATCH 329/359] build(deps): bump Swatinem/rust-cache from 2.6.2 to 2.7.0 (#787) Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.6.2 to 2.7.0. - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/e207df5d269b42b69c8bc5101da26f7d31feddb4...a95ba195448af2da9b00fb742d14ffaaf3c21f43) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 57da5eff6..691fc798c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@e207df5d269b42b69c8bc5101da26f7d31feddb4 # v2.6.2 + uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@e207df5d269b42b69c8bc5101da26f7d31feddb4 # v2.6.2 + uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 - run: rustup target add wasm32-unknown-unknown - run: rustup target add mips64-unknown-linux-muslabi64 @@ -137,7 +137,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@e207df5d269b42b69c8bc5101da26f7d31feddb4 # v2.6.2 + uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 - uses: actions-rs/cargo@v1 with: From 97973e559b3dced3c60d3ca3b5338b60564b6fe2 Mon Sep 17 00:00:00 2001 From: dzmitry-lahoda Date: Mon, 9 Oct 2023 20:40:04 +0100 Subject: [PATCH 330/359] feat(schema): added json schema to support schema on xcm messages (#785) * schemars * comment * clean up, aligned version of schema with CosmWasm for now * Update bounded-collections/src/bounded_vec.rs Co-authored-by: ordian --------- Co-authored-by: ordian --- bounded-collections/Cargo.toml | 2 ++ bounded-collections/src/bounded_vec.rs | 1 + primitive-types/Cargo.toml | 2 ++ primitive-types/src/lib.rs | 17 +++++++++++++++++ 4 files changed, 22 insertions(+) diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index 17a9ecb47..fb14da382 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -13,12 +13,14 @@ serde = { version = "1.0.101", default-features = false, optional = true, featur codec = { version = "3.3.0", default-features = false, features = ["max-encoded-len"], package = "parity-scale-codec" } scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false } log = { version = "0.4.17", default-features = false } +schemars = { version = ">=0.8.12", default-features = true, optional = true } [dev-dependencies] serde_json = "1.0.41" [features] default = ["std"] +json-schema = ["dep:schemars"] std = [ "log/std", "codec/std", diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index cd26afb1d..3d4269956 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -43,6 +43,7 @@ use serde::{ #[cfg_attr(feature = "serde", derive(Serialize), serde(transparent))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] pub struct BoundedVec(pub(super) Vec, #[cfg_attr(feature = "serde", serde(skip_serializing))] PhantomData); /// Create an object through truncation. diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 7aa3d2e9c..d642effc6 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -16,6 +16,7 @@ impl-codec = { version = "0.6.0", path = "impls/codec", default-features = false impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } scale-info-crate = { package = "scale-info", version = ">=0.9, <3", features = ["derive"], default-features = false, optional = true } +schemars = { version = ">=0.8.12", default-features = true, optional = true } [dev-dependencies] num-traits = "0.2" @@ -26,6 +27,7 @@ std = ["uint/std", "fixed-hash/std", "impl-codec?/std"] byteorder = ["fixed-hash/byteorder"] rustc-hex = ["fixed-hash/rustc-hex"] serde = ["std", "impl-serde", "impl-serde/std"] +json-schema = ["dep:schemars"] serde_no_std = ["impl-serde"] codec = ["impl-codec"] scale-info = ["codec", "scale-info-crate"] diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index dd372a9eb..d80a312f8 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -105,6 +105,23 @@ mod serde { impl_fixed_hash_serde!(H768, 96); } +// true that no need std, but need to do no_std alloc than, so simplified for now +// also no macro, but easy to create +#[cfg(all(feature = "std", feature = "json-schema"))] +mod json_schema { + use super::*; + + impl schemars::JsonSchema for H160 { + fn schema_name() -> String { + "0xPrefixedHexString".to_string() + } + + fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema { + String::json_schema(gen) + } + } +} + #[cfg(feature = "impl-codec")] mod codec { use super::*; From 400398a7ea5de6ecb8b64ce7da2c8c96aa3ec1a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Oct 2023 11:54:12 +0200 Subject: [PATCH 331/359] build(deps): bump actions/checkout from 3 to 4 (#783) Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 691fc798c..e89577690 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,7 @@ jobs: name: Check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -35,7 +35,7 @@ jobs: - ubuntu-latest - macOS-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -129,7 +129,7 @@ jobs: name: Test Windows runs-on: windows-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -149,7 +149,7 @@ jobs: name: Rustfmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: profile: minimal From b4e1516a95c7e7c0bde66d03dbea6f388e7c5fec Mon Sep 17 00:00:00 2001 From: ordian Date: Tue, 10 Oct 2023 14:13:44 +0200 Subject: [PATCH 332/359] release primitive-types and bounded-collections (#794) * primitive-types: release 0.12.2 * bounded-collections: release 0.1.9 --- bounded-collections/CHANGELOG.md | 3 +++ bounded-collections/Cargo.toml | 2 +- primitive-types/CHANGELOG.md | 5 ++++- primitive-types/Cargo.toml | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index 4ab3266d4..a76dffed1 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,6 +4,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [0.1.9] - 2023-10-10 +- Added `serde` support for `BoundedBTreeSet`. [#781](https://github.com/paritytech/parity-common/pull/781) + ## [0.1.8] - 2023-06-11 - Altered return types of `BoundedVec::force_insert_keep_` functions to return the element in case of error. - Added `new` and `clear` to `BoundedVec`. diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index fb14da382..d74943398 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.1.8" +version = "0.1.9" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index e33e0cd5f..5545d482d 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -6,7 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] -## [0.12.1] - 2022-20-27 +## [0.12.2] - 2023-10-10 +- Added `schemars` support via `json-schema` feature. [#785](https://github.com/paritytech/parity-common/pull/785) + +## [0.12.1] - 2022-10-27 - Added `H384` and `H768` types. [#684](https://github.com/paritytech/parity-common/pull/684) ## [0.12.0] - 2022-09-20 diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index d642effc6..126b17dca 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.12.1" +version = "0.12.2" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" From 3a35610d5530201e3689af511269b02e2e0b9cd6 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 28 Nov 2023 23:27:27 +0100 Subject: [PATCH 333/359] Add `BoundedVec::try_rotate_{left,right}` (#800) * Add BoundedVec::try_rotate_{left,right} Signed-off-by: Oliver Tale-Yazdi * Update changelog Signed-off-by: Oliver Tale-Yazdi --------- Signed-off-by: Oliver Tale-Yazdi --- bounded-collections/CHANGELOG.md | 3 ++ bounded-collections/src/bounded_vec.rs | 58 ++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index a76dffed1..744f41c6d 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,6 +4,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [0.2.0] - 2023-11-13 +- Added `try_rotate_left` and `try_rotate_right` to `BoundedVec`. [#800](https://github.com/paritytech/parity-common/pull/800) + ## [0.1.9] - 2023-10-10 - Added `serde` support for `BoundedBTreeSet`. [#781](https://github.com/paritytech/parity-common/pull/781) diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index 3d4269956..c8c7e87a4 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -638,6 +638,26 @@ impl> BoundedVec { Err(element) } } + + /// Exactly the same semantics as [`Vec::rotate_left`], but returns an `Err` (and is a noop) if `mid` is larger then the current length. + pub fn try_rotate_left(&mut self, mid: usize) -> Result<(), ()> { + if mid > self.len() { + return Err(()) + } + + self.0.rotate_left(mid); + Ok(()) + } + + /// Exactly the same semantics as [`Vec::rotate_right`], but returns an `Err` (and is a noop) if `mid` is larger then the current length. + pub fn try_rotate_right(&mut self, mid: usize) -> Result<(), ()> { + if mid > self.len() { + return Err(()) + } + + self.0.rotate_right(mid); + Ok(()) + } } impl BoundedVec { @@ -1297,6 +1317,44 @@ mod test { assert!(bound == &unbound[..]); } + #[test] + fn bounded_vec_try_rotate_left_works() { + let o = BoundedVec::>::truncate_from(vec![1, 2, 3]); + let mut bound = o.clone(); + + bound.try_rotate_left(0).unwrap(); + assert_eq!(bound, o); + bound.try_rotate_left(3).unwrap(); + assert_eq!(bound, o); + + bound.try_rotate_left(4).unwrap_err(); + assert_eq!(bound, o); + + bound.try_rotate_left(1).unwrap(); + assert_eq!(bound, vec![2, 3, 1]); + bound.try_rotate_left(2).unwrap(); + assert_eq!(bound, o); + } + + #[test] + fn bounded_vec_try_rotate_right_works() { + let o = BoundedVec::>::truncate_from(vec![1, 2, 3]); + let mut bound = o.clone(); + + bound.try_rotate_right(0).unwrap(); + assert_eq!(bound, o); + bound.try_rotate_right(3).unwrap(); + assert_eq!(bound, o); + + bound.try_rotate_right(4).unwrap_err(); + assert_eq!(bound, o); + + bound.try_rotate_right(1).unwrap(); + assert_eq!(bound, vec![3, 1, 2]); + bound.try_rotate_right(2).unwrap(); + assert_eq!(bound, o); + } + // Just a test that structs containing `BoundedVec` and `BoundedSlice` can derive `Hash`. (This was broken when // they were deriving `Hash`). #[test] From 4da96aef81d9b29552f299685f627dbd09a7551d Mon Sep 17 00:00:00 2001 From: dzmitry-lahoda Date: Tue, 5 Dec 2023 22:35:43 +0000 Subject: [PATCH 334/359] improvement(primitive-types): better json-schema, allow to build for serde_no_std/json-schema feature/targets combinations (#801) * more std * fixes of std and improvement of schema * better alloc support --- primitive-types/Cargo.toml | 2 + primitive-types/src/json_schema.rs | 75 ++++++++++++++++++++++++++++++ primitive-types/src/lib.rs | 23 +++------ 3 files changed, 83 insertions(+), 17 deletions(-) create mode 100644 primitive-types/src/json_schema.rs diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 126b17dca..1c861d36b 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -20,6 +20,8 @@ schemars = { version = ">=0.8.12", default-features = true, optional = true } [dev-dependencies] num-traits = "0.2" +serde_json = { version = "1.0", default-features = false } +jsonschema = { version = "0.17", default-features = false } [features] default = ["std"] diff --git a/primitive-types/src/json_schema.rs b/primitive-types/src/json_schema.rs new file mode 100644 index 000000000..948bf86c3 --- /dev/null +++ b/primitive-types/src/json_schema.rs @@ -0,0 +1,75 @@ +use super::*; +#[cfg(not(feature = "std"))] +use alloc::{ + borrow::ToOwned, + string::{String, ToString}, +}; + +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; + +impl JsonSchema for H160 { + fn schema_name() -> String { + "HexEncoded20Bytes".to_owned() + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let mut schema = gen.subschema_for::().into_object(); + schema.metadata().description = Some("Hex encoded 20 bytes".to_string()); + schema.string().pattern = Some("^0(x|X)[a-fA-F0-9]{40}$".to_string()); + schema.into() + } +} + +impl JsonSchema for U256 { + fn schema_name() -> String { + "U256String".to_string() + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let mut schema = gen.subschema_for::().into_object(); + schema.metadata().description = Some("256-bit Unsigned Integer".to_string()); + schema.string().pattern = Some("^(0|[1-9][0-9]{0,77})$".to_string()); + schema.into() + } +} + +#[cfg(test)] +#[cfg(any(feature = "serde", feature = "serde_no_std"))] +mod tests { + use crate::{H160, U256}; + #[cfg(not(feature = "std"))] + use alloc::string::String; + use jsonschema::Draft; + use schemars::JsonSchema; + + #[test] + fn hex_encoded_20_bytes() { + let schema = H160::json_schema(&mut schemars::gen::SchemaGenerator::default()); + let schema_json = serde_json::to_value(&schema).unwrap(); + let schema = jsonschema::JSONSchema::options() + .with_draft(Draft::Draft7) + .compile(&schema_json) + .unwrap(); + let value = serde_json::to_value("0x55086adeca661185c437d92b9818e6eda6d0d047").unwrap(); + assert!(schema.validate(&value).is_ok()); + let value = serde_json::to_value("0X0E9C8DA9FD4BDD3281879D9E328D8D74D02558CC").unwrap(); + assert!(schema.validate(&value).is_ok()); + + let value = serde_json::to_value("42").unwrap(); + assert!(schema.validate(&value).is_err()); + } + + #[test] + fn u256() { + let schema = U256::json_schema(&mut schemars::gen::SchemaGenerator::default()); + let schema_json = serde_json::to_value(&schema).unwrap(); + let schema = jsonschema::JSONSchema::options() + .with_draft(Draft::Draft7) + .compile(&schema_json) + .unwrap(); + let addr = serde_json::to_value("42").unwrap(); + assert!(schema.validate(&addr).is_ok()); + let addr = serde_json::to_value(['1'; 79].into_iter().collect::()).unwrap(); + assert!(schema.validate(&addr).is_err()); + } +} diff --git a/primitive-types/src/lib.rs b/primitive-types/src/lib.rs index d80a312f8..41b740298 100644 --- a/primitive-types/src/lib.rs +++ b/primitive-types/src/lib.rs @@ -14,8 +14,14 @@ #![cfg_attr(not(feature = "std"), no_std)] +// serde_no_std leads to alloc via impl, json-schema without std requires alloc +#[cfg(all(not(feature = "std"), any(feature = "serde_no_std", feature = "json-schema")))] +extern crate alloc; + #[cfg(feature = "fp-conversion")] mod fp_conversion; +#[cfg(feature = "json-schema")] +mod json_schema; use core::convert::TryFrom; use fixed_hash::{construct_fixed_hash, impl_fixed_hash_conversions}; @@ -105,23 +111,6 @@ mod serde { impl_fixed_hash_serde!(H768, 96); } -// true that no need std, but need to do no_std alloc than, so simplified for now -// also no macro, but easy to create -#[cfg(all(feature = "std", feature = "json-schema"))] -mod json_schema { - use super::*; - - impl schemars::JsonSchema for H160 { - fn schema_name() -> String { - "0xPrefixedHexString".to_string() - } - - fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema { - String::json_schema(gen) - } - } -} - #[cfg(feature = "impl-codec")] mod codec { use super::*; From 8384f5f47eb31b60ed18af126751b6263fc56e1b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Dec 2023 23:36:16 +0100 Subject: [PATCH 335/359] build(deps): bump Swatinem/rust-cache from 2.7.0 to 2.7.1 (#795) Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.7.0 to 2.7.1. - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/a95ba195448af2da9b00fb742d14ffaaf3c21f43...3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e89577690..3e42894be 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 - run: rustup target add wasm32-unknown-unknown - run: rustup target add mips64-unknown-linux-muslabi64 @@ -137,7 +137,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 + uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 - uses: actions-rs/cargo@v1 with: From 872345513a952ab327fa1fb25d14e509c725755b Mon Sep 17 00:00:00 2001 From: Farhad Shabani Date: Tue, 5 Dec 2023 14:46:46 -0800 Subject: [PATCH 336/359] imp: in `fixed-hash` allow opting out `rand` when `std` enabled (#804) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * imp: fixed-hash allow opting out rand when std enabled * Update fixed-hash/Cargo.toml * fix: cargo test --no-default-features * Update fixed-hash/Cargo.toml Co-authored-by: Bastian Köcher --------- Co-authored-by: Bastian Köcher Co-authored-by: ordian --- fixed-hash/Cargo.toml | 4 ++-- primitive-types/Cargo.toml | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index 337dad706..b1023533b 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -9,7 +9,7 @@ description = "Macros to define custom fixed-size hash types" documentation = "https://docs.rs/fixed-hash/" readme = "README.md" edition = "2021" -rust-version = "1.56.1" +rust-version = "1.60" [package.metadata.docs.rs] features = ["quickcheck", "api-dummy"] @@ -29,7 +29,7 @@ rand = { version = "0.8.0", default-features = false, features = ["std_rng"] } [features] default = ["std", "rand", "rustc-hex", "byteorder"] -std = ["rustc-hex/std", "rand/std", "byteorder/std"] +std = ["rustc-hex/std", "rand?/std", "byteorder/std"] api-dummy = [] # Feature used by docs.rs to display documentation of hash types diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 1c861d36b..d7d93c4fb 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -24,8 +24,9 @@ serde_json = { version = "1.0", default-features = false } jsonschema = { version = "0.17", default-features = false } [features] -default = ["std"] +default = ["std", "rand"] std = ["uint/std", "fixed-hash/std", "impl-codec?/std"] +rand = ["fixed-hash/rand"] byteorder = ["fixed-hash/byteorder"] rustc-hex = ["fixed-hash/rustc-hex"] serde = ["std", "impl-serde", "impl-serde/std"] From 422471a88bdb0a712261dbe9ee5f397ca38ce560 Mon Sep 17 00:00:00 2001 From: ordian Date: Tue, 2 Jan 2024 11:27:33 +0100 Subject: [PATCH 337/359] Try fixing CI (#817) `mips64-unknown-linux-muslabi64 target` (used to test bigendian) was removed in 1.75.0. The workaround for now is to pin the toolchain to 1.74.0. We could try using `cross` instead in the future. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3e42894be..d61b204d0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -39,7 +39,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: stable + toolchain: 1.74.0 override: true - name: Rust Cache From 31e7719af56603d1cfe97ff3a25da22ae3c94211 Mon Sep 17 00:00:00 2001 From: Fabian-Gruenbichler Date: Tue, 2 Jan 2024 11:50:00 +0100 Subject: [PATCH 338/359] rlp: tests: fix 32-bit overflow (#808) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit the overflow behaviour depends on the width of usize, conditionalize accordingly. Fixes: #802 Signed-off-by: Fabian Grünbichler Co-authored-by: ordian --- rlp/tests/tests.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index 203397393..768de3dd2 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -22,7 +22,10 @@ fn test_rlp_display() { #[test] fn length_overflow() { + #[cfg(target_pointer_width = "64")] let bs = hex!("bfffffffffffffffffffffffe5"); + #[cfg(target_pointer_width = "32")] + let bs = hex!("bbffffffffffffffe5"); let rlp = Rlp::new(&bs); let res: Result = rlp.as_val(); assert_eq!(Err(DecoderError::RlpInvalidLength), res); @@ -593,7 +596,10 @@ fn test_rlp_nested_empty_list_encode() { #[test] fn test_rlp_list_length_overflow() { + #[cfg(target_pointer_width = "64")] let data = hex!("ffffffffffffffffff000000"); + #[cfg(target_pointer_width = "32")] + let data = hex!("fbffffffff000000"); let rlp = Rlp::new(&data); let as_val: Result = rlp.val_at(0); assert_eq!(Err(DecoderError::RlpIsTooShort), as_val); From 2241f644e02e6e91072a7087eb056ffb0085c711 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jan 2024 18:55:48 +0100 Subject: [PATCH 339/359] build(deps): bump Swatinem/rust-cache from 2.7.1 to 2.7.2 (#823) Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.7.1 to 2.7.2. - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8...a22603398250b864f7190077025cf752307154dc) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d61b204d0..7750f8774 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 + uses: Swatinem/rust-cache@a22603398250b864f7190077025cf752307154dc # v2.7.2 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 + uses: Swatinem/rust-cache@a22603398250b864f7190077025cf752307154dc # v2.7.2 - run: rustup target add wasm32-unknown-unknown - run: rustup target add mips64-unknown-linux-muslabi64 @@ -137,7 +137,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 + uses: Swatinem/rust-cache@a22603398250b864f7190077025cf752307154dc # v2.7.2 - uses: actions-rs/cargo@v1 with: From 314bda627a590de8e43a4611e6e8a3a33cc3f1a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 09:15:46 +0100 Subject: [PATCH 340/359] build(deps): bump Swatinem/rust-cache from 2.7.2 to 2.7.3 (#824) Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.7.2 to 2.7.3. - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/a22603398250b864f7190077025cf752307154dc...23bce251a8cd2ffc3c1075eaa2367cf899916d84) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7750f8774..b6e26ad82 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@a22603398250b864f7190077025cf752307154dc # v2.7.2 + uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@a22603398250b864f7190077025cf752307154dc # v2.7.2 + uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 - run: rustup target add wasm32-unknown-unknown - run: rustup target add mips64-unknown-linux-muslabi64 @@ -137,7 +137,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@a22603398250b864f7190077025cf752307154dc # v2.7.2 + uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 - uses: actions-rs/cargo@v1 with: From dbf46bada7e1e796899b24155fa0fc9b7d2f3ab6 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 29 Jan 2024 22:35:36 +0100 Subject: [PATCH 341/359] Release 0.2.0 (#825) * Release 0.2.0 Signed-off-by: Oliver Tale-Yazdi * CHANGELOG release date Signed-off-by: Oliver Tale-Yazdi --------- Signed-off-by: Oliver Tale-Yazdi --- bounded-collections/CHANGELOG.md | 2 +- bounded-collections/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index 744f41c6d..101c91ec8 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,7 +4,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ -## [0.2.0] - 2023-11-13 +## [0.2.0] - 2024-01-29 - Added `try_rotate_left` and `try_rotate_right` to `BoundedVec`. [#800](https://github.com/paritytech/parity-common/pull/800) ## [0.1.9] - 2023-10-10 diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index d74943398..f9496eb8e 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.1.9" +version = "0.2.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" From d5e9c1d5b52e7a43f843855a0f4fbc319929a681 Mon Sep 17 00:00:00 2001 From: rongyi Date: Mon, 5 Feb 2024 18:23:51 +0800 Subject: [PATCH 342/359] Add more comment (#827) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add more comment * Apply suggestions from code review Co-authored-by: Bastian Köcher --------- Co-authored-by: ordian Co-authored-by: Bastian Köcher --- rlp/src/stream.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rlp/src/stream.rs b/rlp/src/stream.rs index d1b7f9a8c..e7c2a99e4 100644 --- a/rlp/src/stream.rs +++ b/rlp/src/stream.rs @@ -186,6 +186,9 @@ impl RlpStream { _ => { // payload is longer than 1 byte only for lists > 55 bytes // by pushing always this 1 byte we may avoid unnecessary shift of data + // both cases will need at least 1 byte header, so we push 1 byte + // and then, when we know the exactly size of data, the value will be updated + // accordingly in `insert_list_payload` method. self.buffer.put_u8(0); let position = self.total_written(); From 757e2bdd02fd7515545b8fe52226ff36e3cee14e Mon Sep 17 00:00:00 2001 From: Luca Bruno Date: Sat, 10 Aug 2024 10:34:11 +0200 Subject: [PATCH 343/359] primitive-types: add repository URL to Cargo.toml (#844) This adds the repository URL to the Cargo manifest of `primitive-types`, so that the crate published on crates.io can be automatically linked back to the source repository. --- primitive-types/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index d7d93c4fb..6aa89d2aa 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -4,6 +4,7 @@ version = "0.12.2" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" +repository = "https://github.com/paritytech/parity-common" description = "Primitive types shared by Ethereum and Substrate" edition = "2021" rust-version = "1.60.0" From 9ff9316a53d03e14c267aa6dcc45c478bfb2b12a Mon Sep 17 00:00:00 2001 From: ordian Date: Sat, 10 Aug 2024 15:20:44 +0200 Subject: [PATCH 344/359] fix compile errors with new Rust (#854) * fix compile errors with new Rust * try fixing rug compilation error --- bounded-collections/src/bounded_btree_map.rs | 3 ++- bounded-collections/src/bounded_btree_set.rs | 3 ++- bounded-collections/src/bounded_vec.rs | 5 +++-- fixed-hash/src/hash.rs | 1 - uint/Cargo.toml | 5 ++++- 5 files changed, 11 insertions(+), 6 deletions(-) diff --git a/bounded-collections/src/bounded_btree_map.rs b/bounded-collections/src/bounded_btree_map.rs index de975801a..c3369c192 100644 --- a/bounded-collections/src/bounded_btree_map.rs +++ b/bounded-collections/src/bounded_btree_map.rs @@ -655,10 +655,11 @@ mod test { #[test] #[cfg(feature = "std")] fn container_can_derive_hash() { - #[derive(Hash)] + #[derive(Hash, Default)] struct Foo { bar: u8, map: BoundedBTreeMap>, } + let _foo = Foo::default(); } } diff --git a/bounded-collections/src/bounded_btree_set.rs b/bounded-collections/src/bounded_btree_set.rs index c966bce8a..e651c862e 100644 --- a/bounded-collections/src/bounded_btree_set.rs +++ b/bounded-collections/src/bounded_btree_set.rs @@ -579,11 +579,12 @@ mod test { #[test] #[cfg(feature = "std")] fn container_can_derive_hash() { - #[derive(Hash)] + #[derive(Hash, Default)] struct Foo { bar: u8, set: BoundedBTreeSet>, } + let _foo = Foo::default(); } #[cfg(feature = "serde")] diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index c8c7e87a4..4d56971eb 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -925,7 +925,7 @@ where #[cfg(all(test, feature = "std"))] mod test { use super::*; - use crate::{bounded_vec, ConstU32, ConstU8}; + use crate::{bounded_vec, ConstU32}; use codec::CompactLen; #[test] @@ -1363,8 +1363,9 @@ mod test { #[derive(Hash)] struct Foo<'a> { bar: u8, - slice: BoundedSlice<'a, usize, ConstU8<8>>, + slice: BoundedSlice<'a, usize, ConstU32<4>>, map: BoundedVec>, } + let _foo = Foo { bar: 42, slice: BoundedSlice::truncate_from(&[0, 1][..]), map: BoundedVec::default() }; } } diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index 20f3864ef..a5c1ed41c 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -255,7 +255,6 @@ macro_rules! construct_fixed_hash { impl $crate::core_::marker::Copy for $name {} - #[cfg_attr(feature = "dev", allow(expl_impl_clone_on_copy))] impl $crate::core_::clone::Clone for $name { fn clone(&self) -> $name { *self diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 36f3de1b2..44bef00d6 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -34,7 +34,10 @@ criterion = "0.5.1" num-bigint = "0.4.0" [target.'cfg(all(unix, target_arch = "x86_64"))'.dev-dependencies] -rug = { version = "1.6.0", default-features = false, features = ["integer"] } +rug = { version = "1.6.0", default-features = false, features = [ + "integer", + "std", +] } [[bench]] name = "bigint" From 2f1866d2e1b6d1a09b82396404c9e9e5c79fbae4 Mon Sep 17 00:00:00 2001 From: Jun Jiang Date: Sat, 10 Aug 2024 09:29:53 -0400 Subject: [PATCH 345/359] Upgrade rocksdb to 0.22 (#853) Co-authored-by: ordian --- Cargo.toml | 1 + ethereum-types/Cargo.toml | 1 + kvdb-rocksdb/Cargo.toml | 8 ++++---- kvdb-rocksdb/examples/memtest.rs | 2 +- kvdb-rocksdb/src/lib.rs | 2 +- primitive-types/impls/serde/Cargo.toml | 2 +- 6 files changed, 9 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c2a4432d3..2608dd2a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ [workspace] +resolver = "2" members = [ "fixed-hash", "keccak-hash", diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index b5a5302ec..67d6fe6d9 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -29,3 +29,4 @@ arbitrary = ["ethbloom/arbitrary", "fixed-hash/arbitrary", "uint-crate/arbitrary rlp = ["impl-rlp", "ethbloom/rlp", "primitive-types/rlp"] codec = ["impl-codec", "ethbloom/codec", "scale-info", "primitive-types/scale-info"] num-traits = ["primitive-types/num-traits"] +rand = ["primitive-types/rand"] diff --git a/kvdb-rocksdb/Cargo.toml b/kvdb-rocksdb/Cargo.toml index 257f45c9c..b866b1344 100644 --- a/kvdb-rocksdb/Cargo.toml +++ b/kvdb-rocksdb/Cargo.toml @@ -24,21 +24,21 @@ regex = "1.3.1" [target.'cfg(any(target_os = "openbsd", target_env = "msvc"))'.dependencies.rocksdb] default-features = false features = ["snappy"] -version = "0.21" +version = "0.22.0" [target.'cfg(not(any(target_os = "openbsd", target_env = "msvc")))'.dependencies.rocksdb] default-features = false features = ["snappy", "jemalloc"] -version = "0.21" +version = "0.22.0" [dev-dependencies] alloc_counter = "0.0.4" criterion = "0.5" -ethereum-types = { path = "../ethereum-types" } +ethereum-types = { path = "../ethereum-types", features = ["rand"] } kvdb-shared-tests = { path = "../kvdb-shared-tests", version = "0.11" } rand = "0.8.0" tempfile = "3.1.0" keccak-hash = { path = "../keccak-hash" } -sysinfo = "0.29.0" +sysinfo = "0.30.13" ctrlc = "3.1.4" chrono = "0.4" diff --git a/kvdb-rocksdb/examples/memtest.rs b/kvdb-rocksdb/examples/memtest.rs index e41521bd5..f60ea50ad 100644 --- a/kvdb-rocksdb/examples/memtest.rs +++ b/kvdb-rocksdb/examples/memtest.rs @@ -24,7 +24,7 @@ use std::sync::{ atomic::{AtomicBool, Ordering as AtomicOrdering}, Arc, }; -use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; +use sysinfo::{get_current_pid, System}; const COLUMN_COUNT: u32 = 100; diff --git a/kvdb-rocksdb/src/lib.rs b/kvdb-rocksdb/src/lib.rs index 0af25f6b9..711468487 100644 --- a/kvdb-rocksdb/src/lib.rs +++ b/kvdb-rocksdb/src/lib.rs @@ -905,7 +905,7 @@ rocksdb.db.get.micros P50 : 2.000000 P95 : 3.000000 P99 : 4.000000 P100 : 5.0000 assert!(settings.contains(" block_size: 323232")); // LRU cache (default column) - assert!(settings.contains("block_cache_options:\n capacity : 8388608")); + assert!(settings.contains("block_cache_options:\n capacity : 115343360")); // LRU cache for non-default columns is ⅓ of memory budget (including default column) let lru_size = (330 * MB) / 3; let needle = format!("block_cache_options:\n capacity : {}", lru_size); diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index b572a2806..3a3ca2679 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -16,7 +16,7 @@ std = ["serde/std"] serde = { version = "1.0.101", default-features = false, features = ["alloc"] } [dev-dependencies] -criterion = "0.3.0" +criterion = "0.5.1" serde_derive = "1.0.101" serde_json = "1.0.41" uint = { version = "0.9.5", path = "../../../uint" } From 701148e9afc7aa03a0eb2b66ce0309875810a1c3 Mon Sep 17 00:00:00 2001 From: Jun Jiang Date: Sat, 10 Aug 2024 14:05:00 -0400 Subject: [PATCH 346/359] Update syn to 2 (#855) --- rlp-derive/Cargo.toml | 2 +- rlp-derive/src/de.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rlp-derive/Cargo.toml b/rlp-derive/Cargo.toml index a1785d7b7..5ca3af4ec 100644 --- a/rlp-derive/Cargo.toml +++ b/rlp-derive/Cargo.toml @@ -12,7 +12,7 @@ rust-version = "1.56.1" proc-macro = true [dependencies] -syn = "1.0.14" +syn = "2.0.72" quote = "1.0.2" proc-macro2 = "1.0.8" diff --git a/rlp-derive/src/de.rs b/rlp-derive/src/de.rs index 87f5e3a81..f3ec6f178 100644 --- a/rlp-derive/src/de.rs +++ b/rlp-derive/src/de.rs @@ -121,7 +121,7 @@ fn decodable_field( let list = quotes.list; let attributes = &field.attrs; - let default = if let Some(attr) = attributes.iter().find(|attr| attr.path.is_ident("rlp")) { + let default = if let Some(attr) = attributes.iter().find(|attr| attr.path().is_ident("rlp")) { if *default_attribute_encountered { panic!("only 1 #[rlp(default)] attribute is allowed in a struct") } From 0db43ee6a258f02894bbc873be08126331691012 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 10 Sep 2024 12:17:05 +0200 Subject: [PATCH 347/359] Remove From [u8; n] impl for uint types (#859) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * update uint and deps * Fix uppercase * Apply suggestions from code review Co-authored-by: Alexander Theißen * Update changelogs * Apply suggestions from code review * Apply suggestions from code review --------- Co-authored-by: Alexander Theißen Co-authored-by: ordian --- ethereum-types/CHANGELOG.md | 2 + ethereum-types/src/hash.rs | 4 +- ethereum-types/src/uint.rs | 4 +- primitive-types/CHANGELOG.md | 2 + primitive-types/impls/codec/src/lib.rs | 3 +- primitive-types/impls/rlp/src/lib.rs | 5 +-- primitive-types/impls/serde/CHANGELOG.md | 4 ++ primitive-types/impls/serde/src/lib.rs | 5 +-- rlp/tests/tests.rs | 4 +- uint/CHANGELOG.md | 2 + uint/benches/bigint.rs | 7 ++- uint/src/uint.rs | 55 +++++++++--------------- uint/tests/uint_tests.rs | 55 ++++++++++++------------ 13 files changed, 72 insertions(+), 80 deletions(-) diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 1fc54e5e6..19a728f25 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) ## [0.14.1] - 2022-11-29 - Added `if_ethbloom` conditional macro. [#682](https://github.com/paritytech/parity-common/pull/682) diff --git a/ethereum-types/src/hash.rs b/ethereum-types/src/hash.rs index 82372ee64..070124268 100644 --- a/ethereum-types/src/hash.rs +++ b/ethereum-types/src/hash.rs @@ -74,12 +74,12 @@ macro_rules! impl_uint_conversions { fn from_uint(value: &$uint) -> Self { let mut ret = $hash::zero(); - value.to_big_endian(ret.as_bytes_mut()); + value.write_as_big_endian(ret.as_bytes_mut()); ret } fn into_uint(&self) -> $uint { - $uint::from(self.as_ref() as &[u8]) + $uint::from_big_endian(self.as_ref() as &[u8]) } } }; diff --git a/ethereum-types/src/uint.rs b/ethereum-types/src/uint.rs index 7b9b8f07e..5dfbdb310 100644 --- a/ethereum-types/src/uint.rs +++ b/ethereum-types/src/uint.rs @@ -102,8 +102,8 @@ mod tests { #[test] fn fixed_arrays_roundtrip() { let raw: U256 = "7094875209347850239487502394881".into(); - let array: [u8; 32] = raw.into(); - let new_raw = array.into(); + let array: [u8; 32] = raw.to_big_endian(); + let new_raw = U256::from_big_endian(&array); assert_eq!(raw, new_raw); } diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 5545d482d..485138489 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) ## [0.12.2] - 2023-10-10 - Added `schemars` support via `json-schema` feature. [#785](https://github.com/paritytech/parity-common/pull/785) diff --git a/primitive-types/impls/codec/src/lib.rs b/primitive-types/impls/codec/src/lib.rs index feacec08a..905d17d12 100644 --- a/primitive-types/impls/codec/src/lib.rs +++ b/primitive-types/impls/codec/src/lib.rs @@ -19,8 +19,7 @@ macro_rules! impl_uint_codec { ($name: ident, $len: expr) => { impl $crate::codec::Encode for $name { fn using_encoded R>(&self, f: F) -> R { - let mut bytes = [0u8; $len * 8]; - self.to_little_endian(&mut bytes); + let bytes = self.to_little_endian(); bytes.using_encoded(f) } } diff --git a/primitive-types/impls/rlp/src/lib.rs b/primitive-types/impls/rlp/src/lib.rs index 71382a303..9b17ea4f7 100644 --- a/primitive-types/impls/rlp/src/lib.rs +++ b/primitive-types/impls/rlp/src/lib.rs @@ -23,8 +23,7 @@ macro_rules! impl_uint_rlp { impl $crate::rlp::Encodable for $name { fn rlp_append(&self, s: &mut $crate::rlp::RlpStream) { let leading_empty_bytes = $size * 8 - (self.bits() + 7) / 8; - let mut buffer = [0u8; $size * 8]; - self.to_big_endian(&mut buffer); + let buffer = self.to_big_endian(); s.encoder().encode_value(&buffer[leading_empty_bytes..]); } } @@ -35,7 +34,7 @@ macro_rules! impl_uint_rlp { if !bytes.is_empty() && bytes[0] == 0 { Err($crate::rlp::DecoderError::RlpInvalidIndirection) } else if bytes.len() <= $size * 8 { - Ok($name::from(bytes)) + Ok($name::from_big_endian(bytes)) } else { Err($crate::rlp::DecoderError::RlpIsTooBig) } diff --git a/primitive-types/impls/serde/CHANGELOG.md b/primitive-types/impls/serde/CHANGELOG.md index 0757363ea..623185f63 100644 --- a/primitive-types/impls/serde/CHANGELOG.md +++ b/primitive-types/impls/serde/CHANGELOG.md @@ -4,6 +4,10 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [Unreleased] +### Breaking +- Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) + ## [0.4.0] - 2022-09-02 - Support deserializing H256 et al from bytes or sequences of bytes, too. [#668](https://github.com/paritytech/parity-common/pull/668) - Support deserializing H256 et al from newtype structs containing anything compatible, too. [#672](https://github.com/paritytech/parity-common/pull/672) diff --git a/primitive-types/impls/serde/src/lib.rs b/primitive-types/impls/serde/src/lib.rs index 63fe535cb..50587a91d 100644 --- a/primitive-types/impls/serde/src/lib.rs +++ b/primitive-types/impls/serde/src/lib.rs @@ -32,8 +32,7 @@ macro_rules! impl_uint_serde { S: $crate::serde::Serializer, { let mut slice = [0u8; 2 + 2 * $len * 8]; - let mut bytes = [0u8; $len * 8]; - self.to_big_endian(&mut bytes); + let bytes = self.to_big_endian(); $crate::serialize::serialize_uint(&mut slice, &bytes, serializer) } } @@ -48,7 +47,7 @@ macro_rules! impl_uint_serde { deserializer, $crate::serialize::ExpectedLen::Between(0, &mut bytes), )?; - Ok(bytes[0..wrote].into()) + Ok(Self::from_big_endian(&bytes[0..wrote])) } } }; diff --git a/rlp/tests/tests.rs b/rlp/tests/tests.rs index 768de3dd2..c5224fd5d 100644 --- a/rlp/tests/tests.rs +++ b/rlp/tests/tests.rs @@ -226,7 +226,7 @@ fn encode_u256() { ETestPair::from((U256::from(0x0100_0000_u64), hex!("8401000000"))), ETestPair::from((U256::from(0xffff_ffff_u64), hex!("84ffffffff"))), ETestPair::from(( - hex!(" 8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").into(), + U256::from_big_endian(&hex!(" 8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0")), hex!("a08090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0"), )), ]; @@ -482,7 +482,7 @@ fn decode_untrusted_u256() { DTestPair::from((U256::from(0x0100_0000_u64), hex!("8401000000"))), DTestPair::from((U256::from(0xffff_ffff_u64), hex!("84ffffffff"))), DTestPair::from(( - hex!(" 8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0").into(), + U256::from_big_endian(&hex!(" 8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0")), hex!("a08090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0"), )), ]; diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 103758218..78474c709 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -5,6 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Breaking +- Removed From<[u8; n]> conversions, renamed `to_big_endian` / `to_little_endian` to write_as_*, and made them return byte arrays. [#859](https://github.com/paritytech/parity-common/pull/859) ## [0.9.5] - 2022-11-29 - Implemented bitwise assign traits. [#690](https://github.com/paritytech/parity-common/pull/690) diff --git a/uint/benches/bigint.rs b/uint/benches/bigint.rs index c092dfc5f..d338ccb43 100644 --- a/uint/benches/bigint.rs +++ b/uint/benches/bigint.rs @@ -78,8 +78,7 @@ criterion_group!( criterion_main!(bigint); fn to_biguint(x: U256) -> BigUint { - let mut bytes = [0u8; 32]; - x.to_little_endian(&mut bytes); + let bytes = x.to_little_endian(); BigUint::from_bytes_le(&bytes) } @@ -662,8 +661,8 @@ fn from_fixed_array(c: &mut Criterion) { [255, 0, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 0, 0, 0, 0, 0, 213, 0, 0, 0, 0, 0, 0]; c.bench_function("from_fixed_array", move |b| { b.iter(|| { - let _: U512 = black_box(black_box(ary512).into()); - let _: U256 = black_box(black_box(ary256).into()); + let _: U512 = black_box(U512::from_big_endian(black_box(&ary512))); + let _: U256 = black_box(U256::from_big_endian(black_box(&ary256))); }) }); } diff --git a/uint/src/uint.rs b/uint/src/uint.rs index bc4b7416c..3482aa6db 100644 --- a/uint/src/uint.rs +++ b/uint/src/uint.rs @@ -743,9 +743,17 @@ macro_rules! construct_uint { (arr[index / 8] >> (((index % 8)) * 8)) as u8 } + /// Convert to big-endian bytes. + #[inline] + pub fn to_big_endian(&self) -> [u8; $n_words * 8] { + let mut bytes = [0u8; $n_words * 8]; + self.write_as_big_endian(&mut bytes); + bytes + } + /// Write to the slice in big-endian format. #[inline] - pub fn to_big_endian(&self, bytes: &mut [u8]) { + pub fn write_as_big_endian(&self, bytes: &mut [u8]) { use $crate::byteorder::{ByteOrder, BigEndian}; debug_assert!($n_words * 8 == bytes.len()); for i in 0..$n_words { @@ -753,9 +761,16 @@ macro_rules! construct_uint { } } - /// Write to the slice in little-endian format. + /// Convert to little-endian bytes. #[inline] - pub fn to_little_endian(&self, bytes: &mut [u8]) { + pub fn to_little_endian(&self) -> [u8; $n_words * 8] { + let mut bytes = [0u8; $n_words * 8]; + self.write_as_little_endian(&mut bytes); + bytes + } + + #[inline] + pub fn write_as_little_endian(&self, bytes: &mut [u8]) { use $crate::byteorder::{ByteOrder, LittleEndian}; debug_assert!($n_words * 8 == bytes.len()); for i in 0..$n_words { @@ -1307,26 +1322,6 @@ macro_rules! construct_uint { } } - impl $crate::core_::convert::From<$name> for [u8; $n_words * 8] { - fn from(number: $name) -> Self { - let mut arr = [0u8; $n_words * 8]; - number.to_big_endian(&mut arr); - arr - } - } - - impl $crate::core_::convert::From<[u8; $n_words * 8]> for $name { - fn from(bytes: [u8; $n_words * 8]) -> Self { - Self::from(&bytes) - } - } - - impl<'a> $crate::core_::convert::From<&'a [u8; $n_words * 8]> for $name { - fn from(bytes: &[u8; $n_words * 8]) -> Self { - Self::from(&bytes[..]) - } - } - impl $crate::core_::default::Default for $name { fn default() -> Self { $name::zero() @@ -1360,13 +1355,6 @@ macro_rules! construct_uint { $crate::impl_map_from!($name, i32, i64); $crate::impl_map_from!($name, isize, i64); - // Converts from big endian representation. - impl<'a> $crate::core_::convert::From<&'a [u8]> for $name { - fn from(bytes: &[u8]) -> $name { - Self::from_big_endian(bytes) - } - } - $crate::impl_try_from_for_primitive!($name, u8); $crate::impl_try_from_for_primitive!($name, u16); $crate::impl_try_from_for_primitive!($name, u32); @@ -1736,8 +1724,7 @@ macro_rules! construct_uint { $crate::hex::decode_to_slice(encoded, out).map_err(Self::Err::from)?; } - let bytes_ref: &[u8] = &bytes; - Ok(From::from(bytes_ref)) + Ok(Self::from_big_endian(&bytes)) } } @@ -1787,7 +1774,7 @@ macro_rules! impl_quickcheck_arbitrary_for_uint { } }); - res.as_ref().into() + Self::from_big_endian(res.as_ref()) } } }; @@ -1809,7 +1796,7 @@ macro_rules! impl_arbitrary_for_uint { fn arbitrary(u: &mut $crate::arbitrary::Unstructured<'_>) -> $crate::arbitrary::Result { let mut res = [0u8; $n_bytes]; u.fill_buffer(&mut res)?; - Ok(Self::from(res)) + Ok(Self::from_big_endian(&res)) } } }; diff --git a/uint/tests/uint_tests.rs b/uint/tests/uint_tests.rs index a830e488e..61c03c86d 100644 --- a/uint/tests/uint_tests.rs +++ b/uint/tests/uint_tests.rs @@ -175,18 +175,18 @@ fn uint256_from() { assert_eq!(e, ud); // test initialization from bytes - let va = U256::from(&[10u8][..]); + let va = U256::from_big_endian(&[10u8][..]); assert_eq!(e, va); // more tests for initialization from bytes - assert_eq!(U256([0x1010, 0, 0, 0]), U256::from(&[0x10u8, 0x10][..])); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from(&[0x12u8, 0xf0][..])); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from(&[0, 0x12u8, 0xf0][..])); - assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from(&[0, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); - assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from(&[1, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); + assert_eq!(U256([0x1010, 0, 0, 0]), U256::from_big_endian(&[0x10u8, 0x10][..])); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_big_endian(&[0x12u8, 0xf0][..])); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_big_endian(&[0, 0x12u8, 0xf0][..])); + assert_eq!(U256([0x12f0, 0, 0, 0]), U256::from_big_endian(&[0, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); + assert_eq!(U256([0x12f0, 1, 0, 0]), U256::from_big_endian(&[1, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0][..])); assert_eq!( U256([0x12f0, 1, 0x0910203040506077, 0x8090a0b0c0d0e0f0]), - U256::from( + U256::from_big_endian( &[ 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0x09, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x77, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0x12u8, 0xf0 @@ -195,7 +195,7 @@ fn uint256_from() { ); assert_eq!( U256([0x00192437100019fa, 0x243710, 0, 0]), - U256::from(&[0x24u8, 0x37, 0x10, 0, 0x19, 0x24, 0x37, 0x10, 0, 0x19, 0xfa][..]) + U256::from_big_endian(&[0x24u8, 0x37, 0x10, 0, 0x19, 0x24, 0x37, 0x10, 0, 0x19, 0xfa][..]) ); // test initializtion from string @@ -273,9 +273,8 @@ fn uint256_try_into_primitives() { fn uint256_to() { let hex = "8090a0b0c0d0e0f00910203040506077583a2cf8264910e1436bda32571012f0"; let uint = U256::from_str(hex).unwrap(); - let mut bytes = [0u8; 32]; - uint.to_big_endian(&mut bytes); - let uint2 = U256::from(&bytes[..]); + let bytes = uint.to_big_endian(); + let uint2 = U256::from_big_endian(&bytes[..]); assert_eq!(uint, uint2); } @@ -893,7 +892,7 @@ fn big_endian() { assert_eq!(source, U256::from(1)); - source.to_big_endian(&mut target); + source.write_as_big_endian(&mut target); assert_eq!( vec![ 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, @@ -905,7 +904,7 @@ fn big_endian() { let source = U256([512, 0, 0, 0]); let mut target = vec![0u8; 32]; - source.to_big_endian(&mut target); + source.write_as_big_endian(&mut target); assert_eq!( vec![ 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, @@ -917,7 +916,7 @@ fn big_endian() { let source = U256([0, 512, 0, 0]); let mut target = vec![0u8; 32]; - source.to_big_endian(&mut target); + source.write_as_big_endian(&mut target); assert_eq!( vec![ 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, @@ -927,7 +926,7 @@ fn big_endian() { ); let source = U256::from_str("0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20").unwrap(); - source.to_big_endian(&mut target); + source.write_as_big_endian(&mut target); assert_eq!( vec![ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, @@ -1008,7 +1007,7 @@ fn little_endian() { 0xbb, 0xc5, 0x3c, 0x7d, 0x2b, 0x72, 0xe5, 0xf6, 0xa3, 0x1d, 0xca, 0x2c, 0x02, 0x00, ]; let mut result = [0u8; 32]; - number.to_little_endian(&mut result); + number.write_as_little_endian(&mut result); assert_eq!(expected, result); } @@ -1019,11 +1018,11 @@ fn slice_roundtrip() { 107, 109, 113, 127, ]; - let u256: U256 = (&raw[..]).into(); + let u256 = U256::from_big_endian(&raw[..]); let mut new_raw = [0u8; 32]; - u256.to_big_endian(&mut new_raw); + u256.write_as_big_endian(&mut new_raw); assert_eq!(&raw, &new_raw); } @@ -1039,7 +1038,7 @@ fn slice_roundtrip_le() { let mut new_raw = [0u8; 32]; - u256.to_little_endian(&mut new_raw); + u256.write_as_little_endian(&mut new_raw); assert_eq!(&raw, &new_raw); } @@ -1055,7 +1054,7 @@ fn slice_roundtrip_le2() { let mut new_raw = [0u8; 32]; - u256.to_little_endian(&mut new_raw); + u256.write_as_little_endian(&mut new_raw); assert_eq!(&raw, &new_raw[..31]); } @@ -1090,17 +1089,17 @@ fn from_big_endian() { fn into_fixed_array() { let expected: [u8; 32] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; - let ary: [u8; 32] = U256::from(1).into(); + let ary: [u8; 32] = U256::from(1).to_big_endian(); assert_eq!(ary, expected); } #[test] fn test_u256_from_fixed_array() { let ary = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 123]; - let num: U256 = ary.into(); + let num = U256::from_big_endian(&ary); assert_eq!(num, U256::from(core::u64::MAX) + 1 + 123); - let a_ref: &U256 = &ary.into(); + let a_ref = &U256::from_big_endian(&ary); assert_eq!(a_ref, &(U256::from(core::u64::MAX) + 1 + 123)); } @@ -1108,7 +1107,7 @@ fn test_u256_from_fixed_array() { fn test_from_ref_to_fixed_array() { let ary: &[u8; 32] = &[1, 0, 1, 2, 1, 0, 1, 2, 3, 0, 3, 4, 3, 0, 3, 4, 5, 0, 5, 6, 5, 0, 5, 6, 7, 0, 7, 8, 7, 0, 7, 8]; - let big: U256 = ary.into(); + let big = U256::from_big_endian(ary); // the numbers are each row of 8 bytes reversed and cast to u64 assert_eq!(big, U256([504410889324070664, 360293493601469702, 216176097878868740, 72058702156267778u64])); } @@ -1119,11 +1118,11 @@ fn test_u512_from_fixed_array() { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 123, ]; - let num: U512 = ary.into(); + let num = U512::from_big_endian(&ary); assert_eq!(num, U512::from(123)); - let a_ref: &U512 = &ary.into(); - assert_eq!(a_ref, &U512::from(123)); + let a_ref = U512::from_big_endian(&ary); + assert_eq!(a_ref, U512::from(123)); } #[test] @@ -1138,7 +1137,7 @@ fn leading_zeros() { fn issue_507_roundtrip() { let mut b32 = <[u8; 32]>::default(); let a = U256::from(10); - a.to_little_endian(&mut b32); + a.write_as_little_endian(&mut b32); let b = U256::from_little_endian(&b32[..]); assert_eq!(a, b); } From 63c5afbf8e0e903439d3c127c59b0e4a7991000b Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Wed, 11 Sep 2024 11:41:35 +0200 Subject: [PATCH 348/359] Bump versions (#860) * Bump versions * Update rlp changelog * adjust the date --------- Co-authored-by: ordian --- ethbloom/CHANGELOG.md | 3 +++ ethbloom/Cargo.toml | 6 +++--- ethereum-types/CHANGELOG.md | 3 ++- ethereum-types/Cargo.toml | 12 ++++++------ keccak-hash/CHANGELOG.md | 5 ++++- keccak-hash/Cargo.toml | 4 ++-- primitive-types/CHANGELOG.md | 3 ++- primitive-types/Cargo.toml | 10 +++++----- primitive-types/impls/num-traits/CHANGELOG.md | 3 +++ primitive-types/impls/num-traits/Cargo.toml | 4 ++-- primitive-types/impls/rlp/CHANGELOG.md | 3 ++- primitive-types/impls/rlp/Cargo.toml | 4 ++-- primitive-types/impls/serde/CHANGELOG.md | 3 ++- primitive-types/impls/serde/Cargo.toml | 4 ++-- rlp-derive/CHANGELOG.md | 2 +- rlp-derive/Cargo.toml | 2 +- rlp/CHANGELOG.md | 2 +- rlp/Cargo.toml | 4 ++-- uint/CHANGELOG.md | 3 ++- uint/Cargo.toml | 2 +- 20 files changed, 48 insertions(+), 34 deletions(-) diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 801efb666..01e267c35 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.14.0] - 2024-09-11 +- Updated `impl-serde` to 0.5. [#859](https://github.com/paritytech/parity-common/pull/859) + ## [0.13.0] - 2022-09-20 - Updated `fixed-hash` to 0.8. [#680](https://github.com/paritytech/parity-common/pull/680) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index fdc8cf8dc..34b3b3e38 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.13.0" +version = "0.14.0" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT OR Apache-2.0" @@ -14,8 +14,8 @@ rust-version = "1.56.1" tiny-keccak = { version = "2.0", features = ["keccak"] } crunchy = { version = "0.2.2", default-features = false, features = ["limit_256"] } fixed-hash = { path = "../fixed-hash", version = "0.8", default-features = false } -impl-serde = { path = "../primitive-types/impls/serde", version = "0.4", default-features = false, optional = true } -impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } +impl-serde = { path = "../primitive-types/impls/serde", version = "0.5", default-features = false, optional = true } +impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.4", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 19a728f25..4247d6f7f 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -5,7 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -### Breaking + +## [0.15.0] - 2024-09-11 - Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) ## [0.14.1] - 2022-11-29 diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 67d6fe6d9..fa967e32b 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.14.1" +version = "0.15.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -9,12 +9,12 @@ edition = "2021" rust-version = "1.60.0" [dependencies] -ethbloom = { path = "../ethbloom", version = "0.13", optional = true, default-features = false } +ethbloom = { path = "../ethbloom", version = "0.14", optional = true, default-features = false } fixed-hash = { path = "../fixed-hash", version = "0.8", default-features = false, features = ["byteorder", "rustc-hex"] } -uint-crate = { path = "../uint", package = "uint", version = "0.9", default-features = false } -primitive-types = { path = "../primitive-types", version = "0.12", features = ["byteorder", "rustc-hex"], default-features = false } -impl-serde = { path = "../primitive-types/impls/serde", version = "0.4.0", default-features = false, optional = true } -impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.3", default-features = false, optional = true } +uint-crate = { path = "../uint", package = "uint", version = "0.10", default-features = false } +primitive-types = { path = "../primitive-types", version = "0.13", features = ["byteorder", "rustc-hex"], default-features = false } +impl-serde = { path = "../primitive-types/impls/serde", version = "0.5.0", default-features = false, optional = true } +impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.4", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } diff --git a/keccak-hash/CHANGELOG.md b/keccak-hash/CHANGELOG.md index 24d554a21..f34dc1074 100644 --- a/keccak-hash/CHANGELOG.md +++ b/keccak-hash/CHANGELOG.md @@ -6,7 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] -## [0.12.0] - 2022-09-20 +## [0.11.0] - 2024-09-11 +- Updated `primitive-types` to 0.13. [#859](https://github.com/paritytech/parity-common/pull/859) + +## [0.10.0] - 2022-09-20 ### Breaking - Updated `parity-util-mem` to 0.12. [#680](https://github.com/paritytech/parity-common/pull/680) diff --git a/keccak-hash/Cargo.toml b/keccak-hash/Cargo.toml index 0ea24892d..210ea4020 100644 --- a/keccak-hash/Cargo.toml +++ b/keccak-hash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "keccak-hash" -version = "0.10.0" +version = "0.11.0" description = "`keccak-hash` is a set of utility functions to facilitate working with Keccak hashes (256/512 bits long)." authors = ["Parity Technologies "] repository = "https://github.com/paritytech/parity-common" @@ -11,7 +11,7 @@ rust-version = "1.56.1" [dependencies] tiny-keccak = { version = "2.0", features = ["keccak"] } -primitive-types = { path = "../primitive-types", version = "0.12", default-features = false } +primitive-types = { path = "../primitive-types", version = "0.13", default-features = false } [dev-dependencies] tempfile = "3.1.0" diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index 485138489..b0a1e7994 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -5,7 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -### Breaking + +## [0.13.0] - 2024-09-11 - Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) ## [0.12.2] - 2023-10-10 diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 6aa89d2aa..1bf2f4d41 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.12.2" +version = "0.13.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -11,11 +11,11 @@ rust-version = "1.60.0" [dependencies] fixed-hash = { version = "0.8", path = "../fixed-hash", default-features = false } -uint = { version = "0.9.5", path = "../uint", default-features = false } -impl-serde = { version = "0.4.0", path = "impls/serde", default-features = false, optional = true } +uint = { version = "0.10.0", path = "../uint", default-features = false } +impl-serde = { version = "0.5.0", path = "impls/serde", default-features = false, optional = true } impl-codec = { version = "0.6.0", path = "impls/codec", default-features = false, optional = true } -impl-num-traits = { version = "0.1.0", path = "impls/num-traits", default-features = false, optional = true } -impl-rlp = { version = "0.3", path = "impls/rlp", default-features = false, optional = true } +impl-num-traits = { version = "0.2.0", path = "impls/num-traits", default-features = false, optional = true } +impl-rlp = { version = "0.4", path = "impls/rlp", default-features = false, optional = true } scale-info-crate = { package = "scale-info", version = ">=0.9, <3", features = ["derive"], default-features = false, optional = true } schemars = { version = ">=0.8.12", default-features = true, optional = true } diff --git a/primitive-types/impls/num-traits/CHANGELOG.md b/primitive-types/impls/num-traits/CHANGELOG.md index d33482beb..1f811b9cc 100644 --- a/primitive-types/impls/num-traits/CHANGELOG.md +++ b/primitive-types/impls/num-traits/CHANGELOG.md @@ -6,6 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.2.0] - 2024-09-11 +- Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) + ## [0.1.2] - 2023-02-01 - Added `checked_*` trait impls. [#716](https://github.com/paritytech/parity-common/pull/716) - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) diff --git a/primitive-types/impls/num-traits/Cargo.toml b/primitive-types/impls/num-traits/Cargo.toml index 765ed92e6..64df11f7e 100644 --- a/primitive-types/impls/num-traits/Cargo.toml +++ b/primitive-types/impls/num-traits/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-num-traits" -version = "0.1.2" +version = "0.2.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -11,4 +11,4 @@ rust-version = "1.56.1" [dependencies] num-traits = { version = "0.2", default-features = false } integer-sqrt = "0.1" -uint = { version = "0.9.5", path = "../../../uint", default-features = false } +uint = { version = "0.10.0", path = "../../../uint", default-features = false } diff --git a/primitive-types/impls/rlp/CHANGELOG.md b/primitive-types/impls/rlp/CHANGELOG.md index a7a98544b..fb5aaed4c 100644 --- a/primitive-types/impls/rlp/CHANGELOG.md +++ b/primitive-types/impls/rlp/CHANGELOG.md @@ -4,7 +4,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ -## [Unreleased] +## [0.4.0] - 2024-09-11 +- Updated `rlp` to 0.6. [#859](https://github.com/paritytech/parity-common/pull/859) - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.3.0] - 2021-01-05 diff --git a/primitive-types/impls/rlp/Cargo.toml b/primitive-types/impls/rlp/Cargo.toml index e85d68e43..839a5842e 100644 --- a/primitive-types/impls/rlp/Cargo.toml +++ b/primitive-types/impls/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-rlp" -version = "0.3.0" +version = "0.4.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -9,7 +9,7 @@ edition = "2021" rust-version = "1.56.1" [dependencies] -rlp = { version = "0.5", path = "../../../rlp", default-features = false } +rlp = { version = "0.6", path = "../../../rlp", default-features = false } [features] default = ["std"] diff --git a/primitive-types/impls/serde/CHANGELOG.md b/primitive-types/impls/serde/CHANGELOG.md index 623185f63..700067d1a 100644 --- a/primitive-types/impls/serde/CHANGELOG.md +++ b/primitive-types/impls/serde/CHANGELOG.md @@ -5,7 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -### Breaking + +## [0.5.0] - 2024-09-11 - Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) ## [0.4.0] - 2022-09-02 diff --git a/primitive-types/impls/serde/Cargo.toml b/primitive-types/impls/serde/Cargo.toml index 3a3ca2679..929a60126 100644 --- a/primitive-types/impls/serde/Cargo.toml +++ b/primitive-types/impls/serde/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-serde" -version = "0.4.0" +version = "0.5.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -19,7 +19,7 @@ serde = { version = "1.0.101", default-features = false, features = ["alloc"] } criterion = "0.5.1" serde_derive = "1.0.101" serde_json = "1.0.41" -uint = { version = "0.9.5", path = "../../../uint" } +uint = { version = "0.10.0", path = "../../../uint" } [[bench]] name = "impl_serde" diff --git a/rlp-derive/CHANGELOG.md b/rlp-derive/CHANGELOG.md index 85516d84b..baa0a22f5 100644 --- a/rlp-derive/CHANGELOG.md +++ b/rlp-derive/CHANGELOG.md @@ -4,7 +4,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ -## [Unreleased] +## [0.2.0] - 2024-09-11 - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.1.0] - 2020-02-13 diff --git a/rlp-derive/Cargo.toml b/rlp-derive/Cargo.toml index 5ca3af4ec..421015403 100644 --- a/rlp-derive/Cargo.toml +++ b/rlp-derive/Cargo.toml @@ -17,4 +17,4 @@ quote = "1.0.2" proc-macro2 = "1.0.8" [dev-dependencies] -rlp = { version = "0.5.0", path = "../rlp" } +rlp = { version = "0.6.0", path = "../rlp" } diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index 0d5e343ac..c8232faca 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -4,7 +4,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ -## [Unreleased] +## [0.6.0] - 2024-09-11 - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) ## [0.5.2] - 2022-10-21 diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 55d7eaf4b..55748e45e 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rlp" -version = "0.5.2" +version = "0.6.0" description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" @@ -16,7 +16,7 @@ rlp-derive = { version = "0.1", path = "../rlp-derive", optional = true } [dev-dependencies] criterion = "0.5.1" hex-literal = "0.4.1" -primitive-types = { path = "../primitive-types", version = "0.12", features = ["impl-rlp"] } +primitive-types = { path = "../primitive-types", version = "0.13", features = ["impl-rlp"] } [features] default = ["std"] diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 78474c709..110def6a4 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -5,7 +5,8 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ ## [Unreleased] -### Breaking + +## [0.10.0] - 2024-09-11 - Removed From<[u8; n]> conversions, renamed `to_big_endian` / `to_little_endian` to write_as_*, and made them return byte arrays. [#859](https://github.com/paritytech/parity-common/pull/859) ## [0.9.5] - 2022-11-29 diff --git a/uint/Cargo.toml b/uint/Cargo.toml index 44bef00d6..dbb84d18a 100644 --- a/uint/Cargo.toml +++ b/uint/Cargo.toml @@ -4,7 +4,7 @@ homepage = "http://parity.io" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" name = "uint" -version = "0.9.5" +version = "0.10.0" authors = ["Parity Technologies "] readme = "README.md" edition = "2021" From ab7a7625eb828daf6a4ce213f238e4376e6bb757 Mon Sep 17 00:00:00 2001 From: ordian Date: Wed, 11 Sep 2024 12:40:39 +0200 Subject: [PATCH 349/359] bump rlp-derive version (#861) * bump rlp-derive version * fix rlp version --- rlp-derive/Cargo.toml | 2 +- rlp/CHANGELOG.md | 3 ++- rlp/Cargo.toml | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/rlp-derive/Cargo.toml b/rlp-derive/Cargo.toml index 421015403..1a0967de1 100644 --- a/rlp-derive/Cargo.toml +++ b/rlp-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rlp-derive" -version = "0.1.0" +version = "0.2.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" description = "Derive macro for #[derive(RlpEncodable, RlpDecodable)]" diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index c8232faca..8c1c41077 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -4,8 +4,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ -## [0.6.0] - 2024-09-11 +## [0.6.1] - 2024-09-11 - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) +- Updated `rlp` to 0.2.0. [#860](https://github.com/paritytech/parity-common/pull/860) ## [0.5.2] - 2022-10-21 - Add optional `derive` feature. [#613](https://github.com/paritytech/parity-common/pull/613) diff --git a/rlp/Cargo.toml b/rlp/Cargo.toml index 55748e45e..0f1583e5e 100644 --- a/rlp/Cargo.toml +++ b/rlp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rlp" -version = "0.6.0" +version = "0.6.1" description = "Recursive-length prefix encoding, decoding, and compression" repository = "https://github.com/paritytech/parity-common" license = "MIT OR Apache-2.0" @@ -11,7 +11,7 @@ rust-version = "1.56.1" [dependencies] bytes = { version = "1", default-features = false } rustc-hex = { version = "2.0.1", default-features = false } -rlp-derive = { version = "0.1", path = "../rlp-derive", optional = true } +rlp-derive = { version = "0.2", path = "../rlp-derive", optional = true } [dev-dependencies] criterion = "0.5.1" From 7c701c62fc19523cb975203ccf4c2ef9568c3a0c Mon Sep 17 00:00:00 2001 From: ordian Date: Thu, 12 Sep 2024 13:18:13 +0200 Subject: [PATCH 350/359] another missing bump (impl-codec) (#863) * bump impl-codec * fix rlp changelog * bump dep * ethereum-types too * ethbloom too --- ethbloom/CHANGELOG.md | 3 ++- ethbloom/Cargo.toml | 4 ++-- ethereum-types/CHANGELOG.md | 2 +- ethereum-types/Cargo.toml | 4 ++-- primitive-types/CHANGELOG.md | 2 +- primitive-types/Cargo.toml | 4 ++-- primitive-types/impls/codec/CHANGELOG.md | 4 ++++ primitive-types/impls/codec/Cargo.toml | 2 +- rlp/CHANGELOG.md | 2 +- 9 files changed, 16 insertions(+), 11 deletions(-) diff --git a/ethbloom/CHANGELOG.md b/ethbloom/CHANGELOG.md index 01e267c35..a9a5f415e 100644 --- a/ethbloom/CHANGELOG.md +++ b/ethbloom/CHANGELOG.md @@ -6,8 +6,9 @@ The format is based on [Keep a Changelog]. ## [Unreleased] -## [0.14.0] - 2024-09-11 +## [0.14.1] - 2024-09-12 - Updated `impl-serde` to 0.5. [#859](https://github.com/paritytech/parity-common/pull/859) +- Updated `impl-codec` to 0.7. [#860](https://github.com/paritytech/parity-common/pull/860) ## [0.13.0] - 2022-09-20 - Updated `fixed-hash` to 0.8. [#680](https://github.com/paritytech/parity-common/pull/680) diff --git a/ethbloom/Cargo.toml b/ethbloom/Cargo.toml index 34b3b3e38..9f858c889 100644 --- a/ethbloom/Cargo.toml +++ b/ethbloom/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethbloom" -version = "0.14.0" +version = "0.14.1" authors = ["Parity Technologies "] description = "Ethereum bloom filter" license = "MIT OR Apache-2.0" @@ -16,7 +16,7 @@ crunchy = { version = "0.2.2", default-features = false, features = ["limit_256" fixed-hash = { path = "../fixed-hash", version = "0.8", default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.5", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.4", default-features = false, optional = true } -impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } +impl-codec = { version = "0.7.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } [dev-dependencies] diff --git a/ethereum-types/CHANGELOG.md b/ethereum-types/CHANGELOG.md index 4247d6f7f..33030f4bb 100644 --- a/ethereum-types/CHANGELOG.md +++ b/ethereum-types/CHANGELOG.md @@ -6,7 +6,7 @@ The format is based on [Keep a Changelog]. ## [Unreleased] -## [0.15.0] - 2024-09-11 +## [0.15.1] - 2024-09-12 - Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) ## [0.14.1] - 2022-11-29 diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index fa967e32b..81efabaaf 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethereum-types" -version = "0.15.0" +version = "0.15.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -15,7 +15,7 @@ uint-crate = { path = "../uint", package = "uint", version = "0.10", default-fea primitive-types = { path = "../primitive-types", version = "0.13", features = ["byteorder", "rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.5.0", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.4", default-features = false, optional = true } -impl-codec = { version = "0.6.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } +impl-codec = { version = "0.7.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } scale-info = { version = ">=1.0, <3", features = ["derive"], default-features = false, optional = true } [dev-dependencies] diff --git a/primitive-types/CHANGELOG.md b/primitive-types/CHANGELOG.md index b0a1e7994..7bcb454af 100644 --- a/primitive-types/CHANGELOG.md +++ b/primitive-types/CHANGELOG.md @@ -6,7 +6,7 @@ The format is based on [Keep a Changelog]. ## [Unreleased] -## [0.13.0] - 2024-09-11 +## [0.13.1] - 2024-09-12 - Updated `uint` to 0.10. [#859](https://github.com/paritytech/parity-common/pull/859) ## [0.12.2] - 2023-10-10 diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 1bf2f4d41..34ddca0bb 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "primitive-types" -version = "0.13.0" +version = "0.13.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" @@ -13,7 +13,7 @@ rust-version = "1.60.0" fixed-hash = { version = "0.8", path = "../fixed-hash", default-features = false } uint = { version = "0.10.0", path = "../uint", default-features = false } impl-serde = { version = "0.5.0", path = "impls/serde", default-features = false, optional = true } -impl-codec = { version = "0.6.0", path = "impls/codec", default-features = false, optional = true } +impl-codec = { version = "0.7.0", path = "impls/codec", default-features = false, optional = true } impl-num-traits = { version = "0.2.0", path = "impls/num-traits", default-features = false, optional = true } impl-rlp = { version = "0.4", path = "impls/rlp", default-features = false, optional = true } scale-info-crate = { package = "scale-info", version = ">=0.9, <3", features = ["derive"], default-features = false, optional = true } diff --git a/primitive-types/impls/codec/CHANGELOG.md b/primitive-types/impls/codec/CHANGELOG.md index 713c28aa7..aac607ab4 100644 --- a/primitive-types/impls/codec/CHANGELOG.md +++ b/primitive-types/impls/codec/CHANGELOG.md @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog]. ## [Unreleased] +## [0.7.0] - 2024-09-12 +### Breaking +- Updated to `uint` 0.10. [#860](https://github.com/paritytech/parity-common/pull/860) + ## [0.6.0] - 2022-02-04 ### Breaking - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) diff --git a/primitive-types/impls/codec/Cargo.toml b/primitive-types/impls/codec/Cargo.toml index 5f41774c1..d0bd6b602 100644 --- a/primitive-types/impls/codec/Cargo.toml +++ b/primitive-types/impls/codec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "impl-codec" -version = "0.6.0" +version = "0.7.0" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/rlp/CHANGELOG.md b/rlp/CHANGELOG.md index 8c1c41077..dd5fd2e4d 100644 --- a/rlp/CHANGELOG.md +++ b/rlp/CHANGELOG.md @@ -6,7 +6,7 @@ The format is based on [Keep a Changelog]. ## [0.6.1] - 2024-09-11 - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) -- Updated `rlp` to 0.2.0. [#860](https://github.com/paritytech/parity-common/pull/860) +- Updated `rlp-derive` to 0.2.0. [#860](https://github.com/paritytech/parity-common/pull/860) ## [0.5.2] - 2022-10-21 - Add optional `derive` feature. [#613](https://github.com/paritytech/parity-common/pull/613) From 366a95cdf1a10b4870b1d780f11748c76fecc4a8 Mon Sep 17 00:00:00 2001 From: "polka.dom" Date: Tue, 8 Oct 2024 05:00:09 -0400 Subject: [PATCH 351/359] Serde Serialize & Deserialize for BoundedBTreeMap (#870) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * implementation * Changelog * Move tests down * Remove clone bound * Set date to Monday Co-authored-by: ordian * Fix match that's always okay Co-authored-by: Bastian Köcher * Update Cargo.toml * Move serde tests behind feature flag * Add ToString * Remove underscore * Update bounded-collections/CHANGELOG.md * fmt --------- Co-authored-by: ordian Co-authored-by: Bastian Köcher Co-authored-by: ordian --- bounded-collections/CHANGELOG.md | 3 + bounded-collections/Cargo.toml | 2 +- bounded-collections/src/bounded_btree_map.rs | 126 ++++++++++++++++++- 3 files changed, 128 insertions(+), 3 deletions(-) diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index 101c91ec8..f6e016254 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,6 +4,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [0.2.1] - 2024-10-08 +- Added `serde` support for `BoundedBTreeMap`. [#870](https://github.com/paritytech/parity-common/pull/870) + ## [0.2.0] - 2024-01-29 - Added `try_rotate_left` and `try_rotate_right` to `BoundedVec`. [#800](https://github.com/paritytech/parity-common/pull/800) diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index f9496eb8e..674b934f1 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.2.0" +version = "0.2.1" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" diff --git a/bounded-collections/src/bounded_btree_map.rs b/bounded-collections/src/bounded_btree_map.rs index c3369c192..6a4419412 100644 --- a/bounded-collections/src/bounded_btree_map.rs +++ b/bounded-collections/src/bounded_btree_map.rs @@ -21,6 +21,11 @@ use crate::{Get, TryCollect}; use alloc::collections::BTreeMap; use codec::{Compact, Decode, Encode, MaxEncodedLen}; use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; +#[cfg(feature = "serde")] +use serde::{ + de::{Error, MapAccess, Visitor}, + Deserialize, Deserializer, Serialize, +}; /// A bounded map based on a B-Tree. /// @@ -29,9 +34,70 @@ use core::{borrow::Borrow, marker::PhantomData, ops::Deref}; /// /// Unlike a standard `BTreeMap`, there is an enforced upper limit to the number of items in the /// map. All internal operations ensure this bound is respected. +#[cfg_attr(feature = "serde", derive(Serialize), serde(transparent))] #[derive(Encode, scale_info::TypeInfo)] #[scale_info(skip_type_params(S))] -pub struct BoundedBTreeMap(BTreeMap, PhantomData); +pub struct BoundedBTreeMap( + BTreeMap, + #[cfg_attr(feature = "serde", serde(skip_serializing))] PhantomData, +); + +#[cfg(feature = "serde")] +impl<'de, K, V, S: Get> Deserialize<'de> for BoundedBTreeMap +where + K: Deserialize<'de> + Ord, + V: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // Create a visitor to visit each element in the map + struct BTreeMapVisitor(PhantomData<(K, V, S)>); + + impl<'de, K, V, S> Visitor<'de> for BTreeMapVisitor + where + K: Deserialize<'de> + Ord, + V: Deserialize<'de>, + S: Get, + { + type Value = BTreeMap; + + fn expecting(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result { + formatter.write_str("a map") + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let size = map.size_hint().unwrap_or(0); + let max = S::get() as usize; + if size > max { + Err(A::Error::custom("map exceeds the size of the bounds")) + } else { + let mut values = BTreeMap::new(); + + while let Some(key) = map.next_key()? { + if values.len() >= max { + return Err(A::Error::custom("map exceeds the size of the bounds")); + } + let value = map.next_value()?; + values.insert(key, value); + } + + Ok(values) + } + } + } + + let visitor: BTreeMapVisitor = BTreeMapVisitor(PhantomData); + deserializer.deserialize_map(visitor).map(|v| { + BoundedBTreeMap::::try_from(v) + .map_err(|_| Error::custom("failed to create a BoundedBTreeMap from the provided map")) + })? + } +} impl Decode for BoundedBTreeMap where @@ -44,7 +110,7 @@ where // the len is too big. let len: u32 = >::decode(input)?.into(); if len > S::get() { - return Err("BoundedBTreeMap exceeds its limit".into()) + return Err("BoundedBTreeMap exceeds its limit".into()); } input.descend_ref()?; let inner = Result::from_iter((0..len).map(|_| Decode::decode(input)))?; @@ -662,4 +728,60 @@ mod test { } let _foo = Foo::default(); } + + #[cfg(feature = "serde")] + mod serde { + use super::*; + use crate::alloc::string::ToString; + + #[test] + fn test_bounded_btreemap_serializer() { + let mut map = BoundedBTreeMap::>::new(); + map.try_insert(0, 100).unwrap(); + map.try_insert(1, 101).unwrap(); + map.try_insert(2, 102).unwrap(); + + let serialized = serde_json::to_string(&map).unwrap(); + assert_eq!(serialized, r#"{"0":100,"1":101,"2":102}"#); + } + + #[test] + fn test_bounded_btreemap_deserializer() { + let json_str = r#"{"0":100,"1":101,"2":102}"#; + let map: Result>, serde_json::Error> = serde_json::from_str(json_str); + assert!(map.is_ok()); + let map = map.unwrap(); + + assert_eq!(map.len(), 3); + assert_eq!(map.get(&0), Some(&100)); + assert_eq!(map.get(&1), Some(&101)); + assert_eq!(map.get(&2), Some(&102)); + } + + #[test] + fn test_bounded_btreemap_deserializer_bound() { + let json_str = r#"{"0":100,"1":101,"2":102}"#; + let map: Result>, serde_json::Error> = serde_json::from_str(json_str); + assert!(map.is_ok()); + let map = map.unwrap(); + + assert_eq!(map.len(), 3); + assert_eq!(map.get(&0), Some(&100)); + assert_eq!(map.get(&1), Some(&101)); + assert_eq!(map.get(&2), Some(&102)); + } + + #[test] + fn test_bounded_btreemap_deserializer_failed() { + let json_str = r#"{"0":100,"1":101,"2":102,"3":103,"4":104}"#; + let map: Result>, serde_json::Error> = serde_json::from_str(json_str); + + match map { + Err(e) => { + assert!(e.to_string().contains("map exceeds the size of the bounds")); + }, + _ => unreachable!("deserializer must raise error"), + } + } + } } From 85f813aa603280e8b2ec201366155533066db6a4 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Thu, 10 Oct 2024 16:21:08 +0800 Subject: [PATCH 352/359] fixed-hash: remove `byteorder` feature (#872) * fixed-hash: remove byteorder features * fix fixed-hash part of CI * fix tests * update readme --- .github/workflows/ci.yml | 2 +- ethereum-types/Cargo.toml | 4 +- fixed-hash/Cargo.toml | 5 +- fixed-hash/README.md | 11 +-- fixed-hash/benches/cmp.rs | 4 +- fixed-hash/src/hash.rs | 183 +++++++++++++++---------------------- fixed-hash/src/lib.rs | 4 - fixed-hash/src/tests.rs | 4 +- primitive-types/Cargo.toml | 1 - 9 files changed, 85 insertions(+), 133 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b6e26ad82..f12e38087 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -67,7 +67,7 @@ jobs: args: -p uint --all-features - name: Test fixed-hash no_std - run: cargo test -p fixed-hash --no-default-features --features='byteorder,rustc-hex' + run: cargo test -p fixed-hash --no-default-features --features='rustc-hex' - name: Test fixed-hash all-features uses: actions-rs/cargo@v1 diff --git a/ethereum-types/Cargo.toml b/ethereum-types/Cargo.toml index 81efabaaf..110d338f1 100644 --- a/ethereum-types/Cargo.toml +++ b/ethereum-types/Cargo.toml @@ -10,9 +10,9 @@ rust-version = "1.60.0" [dependencies] ethbloom = { path = "../ethbloom", version = "0.14", optional = true, default-features = false } -fixed-hash = { path = "../fixed-hash", version = "0.8", default-features = false, features = ["byteorder", "rustc-hex"] } +fixed-hash = { path = "../fixed-hash", version = "0.8", default-features = false, features = ["rustc-hex"] } uint-crate = { path = "../uint", package = "uint", version = "0.10", default-features = false } -primitive-types = { path = "../primitive-types", version = "0.13", features = ["byteorder", "rustc-hex"], default-features = false } +primitive-types = { path = "../primitive-types", version = "0.13", features = ["rustc-hex"], default-features = false } impl-serde = { path = "../primitive-types/impls/serde", version = "0.5.0", default-features = false, optional = true } impl-rlp = { path = "../primitive-types/impls/rlp", version = "0.4", default-features = false, optional = true } impl-codec = { version = "0.7.0", path = "../primitive-types/impls/codec", default-features = false, optional = true } diff --git a/fixed-hash/Cargo.toml b/fixed-hash/Cargo.toml index b1023533b..161b1cef3 100644 --- a/fixed-hash/Cargo.toml +++ b/fixed-hash/Cargo.toml @@ -15,7 +15,6 @@ rust-version = "1.60" features = ["quickcheck", "api-dummy"] [dependencies] -byteorder = { version = "1.4.2", optional = true, default-features = false } quickcheck = { version = "1", optional = true } rand = { version = "0.8.0", optional = true, default-features = false } rustc-hex = { version = "2.0.1", optional = true, default-features = false } @@ -28,8 +27,8 @@ criterion = "0.5.1" rand = { version = "0.8.0", default-features = false, features = ["std_rng"] } [features] -default = ["std", "rand", "rustc-hex", "byteorder"] -std = ["rustc-hex/std", "rand?/std", "byteorder/std"] +default = ["std", "rand", "rustc-hex"] +std = ["rustc-hex/std", "rand?/std"] api-dummy = [] # Feature used by docs.rs to display documentation of hash types diff --git a/fixed-hash/README.md b/fixed-hash/README.md index 1974bea8f..7f38bc728 100644 --- a/fixed-hash/README.md +++ b/fixed-hash/README.md @@ -39,7 +39,7 @@ construct_fixed_hash!{ ## Features -By default this is an standard library depending crate. +By default this is an standard library depending crate. For a `#[no_std]` environment use it as follows: ``` @@ -52,17 +52,12 @@ fixed-hash = { version = "0.3", default-features = false } - Using this feature enables the following features - `rustc-hex/std` - `rand/std` - - `byteorder/std` - - Enabled by default. -- `libc`: Use `libc` for implementations of `PartialEq` and `Ord`. - Enabled by default. - `rand`: Provide API based on the `rand` crate. - Enabled by default. -- `byteorder`: Provide API based on the `byteorder` crate. - - Enabled by default. - `quickcheck`: Provide `quickcheck` implementation for hash types. - Disabled by default. -- `api-dummy`: Generate a dummy hash type for API documentation. - - Enabled by default at `docs.rs` - `arbitrary`: Allow for creation of a hash from random unstructured input. - Disabled by default. +- `api-dummy`: Generate a dummy hash type for API documentation. + - Enabled by default at `docs.rs` diff --git a/fixed-hash/benches/cmp.rs b/fixed-hash/benches/cmp.rs index fc5551e1c..38633f9f4 100644 --- a/fixed-hash/benches/cmp.rs +++ b/fixed-hash/benches/cmp.rs @@ -14,7 +14,7 @@ use fixed_hash::construct_fixed_hash; construct_fixed_hash! { pub struct H256(32); } -criterion_group!(cmp, eq_equal, eq_nonequal, compare,); +criterion_group!(cmp, eq_equal, eq_nonequal, compare); criterion_main!(cmp); fn eq_equal(c: &mut Criterion) { @@ -26,7 +26,7 @@ fn eq_equal(c: &mut Criterion) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x2D, 0x6D, 0x19, 0x40, 0x84, 0xC2, 0xDE, 0x36, 0xE0, 0xDA, 0xBF, 0xCE, 0x45, 0xD0, 0x46, 0xB3, 0x7D, 0x11, 0x06, ]), - H256([u8::max_value(); 32]), + H256([u8::MAX; 32]), ] { group.bench_with_input(BenchmarkId::from_parameter(input), &input, |b, x| { b.iter(|| black_box(x.eq(black_box(x)))) diff --git a/fixed-hash/src/hash.rs b/fixed-hash/src/hash.rs index a5c1ed41c..4cf63487e 100644 --- a/fixed-hash/src/hash.rs +++ b/fixed-hash/src/hash.rs @@ -262,8 +262,23 @@ macro_rules! construct_fixed_hash { } impl $crate::core_::cmp::PartialOrd for $name { + #[inline] fn partial_cmp(&self, other: &Self) -> Option<$crate::core_::cmp::Ordering> { - Some(self.cmp(other)) + self.as_bytes().partial_cmp(other.as_bytes()) + } + } + + impl $crate::core_::cmp::Ord for $name { + #[inline] + fn cmp(&self, other: &Self) -> $crate::core_::cmp::Ordering { + self.as_bytes().cmp(other.as_bytes()) + } + } + + impl $crate::core_::default::Default for $name { + #[inline] + fn default() -> Self { + Self::zero() } } @@ -295,53 +310,71 @@ macro_rules! construct_fixed_hash { } } - impl $crate::core_::default::Default for $name { - #[inline] - fn default() -> Self { - Self::zero() - } - } - - impl_ops_for_hash!($name, BitOr, bitor, BitOrAssign, bitor_assign, |, |=); - impl_ops_for_hash!($name, BitAnd, bitand, BitAndAssign, bitand_assign, &, &=); - impl_ops_for_hash!($name, BitXor, bitxor, BitXorAssign, bitxor_assign, ^, ^=); + impl_bit_ops_for_fixed_hash!($name, BitOr, bitor, BitOrAssign, bitor_assign, |, |=); + impl_bit_ops_for_fixed_hash!($name, BitAnd, bitand, BitAndAssign, bitand_assign, &, &=); + impl_bit_ops_for_fixed_hash!($name, BitXor, bitxor, BitXorAssign, bitxor_assign, ^, ^=); impl_byteorder_for_fixed_hash!($name); + impl_rand_for_fixed_hash!($name); - impl_cmp_for_fixed_hash!($name); impl_rustc_hex_for_fixed_hash!($name); impl_quickcheck_for_fixed_hash!($name); impl_arbitrary_for_fixed_hash!($name); } } -// Implementation for disabled byteorder crate support. -// -// # Note -// -// Feature guarded macro definitions instead of feature guarded impl blocks -// to work around the problems of introducing `byteorder` crate feature in -// a user crate. -#[cfg(not(feature = "byteorder"))] #[macro_export] #[doc(hidden)] -macro_rules! impl_byteorder_for_fixed_hash { - ( $name:ident ) => {}; +macro_rules! impl_bit_ops_for_fixed_hash { + ( + $impl_for:ident, + $ops_trait_name:ident, + $ops_fn_name:ident, + $ops_assign_trait_name:ident, + $ops_assign_fn_name:ident, + $ops_tok:tt, + $ops_assign_tok:tt + ) => { + impl<'r> $crate::core_::ops::$ops_assign_trait_name<&'r $impl_for> for $impl_for { + fn $ops_assign_fn_name(&mut self, rhs: &'r $impl_for) { + for (lhs, rhs) in self.as_bytes_mut().iter_mut().zip(rhs.as_bytes()) { + *lhs $ops_assign_tok rhs; + } + } + } + + impl $crate::core_::ops::$ops_assign_trait_name<$impl_for> for $impl_for { + #[inline] + fn $ops_assign_fn_name(&mut self, rhs: $impl_for) { + *self $ops_assign_tok &rhs; + } + } + + impl<'l, 'r> $crate::core_::ops::$ops_trait_name<&'r $impl_for> for &'l $impl_for { + type Output = $impl_for; + + fn $ops_fn_name(self, rhs: &'r $impl_for) -> Self::Output { + let mut ret = self.clone(); + ret $ops_assign_tok rhs; + ret + } + } + + impl $crate::core_::ops::$ops_trait_name<$impl_for> for $impl_for { + type Output = $impl_for; + + #[inline] + fn $ops_fn_name(self, rhs: Self) -> Self::Output { + &self $ops_tok &rhs + } + } + }; } -// Implementation for enabled byteorder crate support. -// -// # Note -// -// Feature guarded macro definitions instead of feature guarded impl blocks -// to work around the problems of introducing `byteorder` crate feature in -// a user crate. -#[cfg(feature = "byteorder")] #[macro_export] #[doc(hidden)] macro_rules! impl_byteorder_for_fixed_hash { ( $name:ident ) => { - /// Utilities using the `byteorder` crate. impl $name { /// Returns the least significant `n` bytes as slice. /// @@ -354,14 +387,11 @@ macro_rules! impl_byteorder_for_fixed_hash { &self[(Self::len_bytes() - n)..] } - fn to_low_u64_with_byteorder(&self) -> u64 - where - B: $crate::byteorder::ByteOrder, - { + fn to_low_u64_with_fn(&self, from_bytes: fn([u8; 8]) -> u64) -> u64 { let mut buf = [0x0; 8]; let capped = $crate::core_::cmp::min(Self::len_bytes(), 8); buf[(8 - capped)..].copy_from_slice(self.least_significant_bytes(capped)); - B::read_u64(&buf) + from_bytes(buf) } /// Returns the lowest 8 bytes interpreted as big-endian. @@ -372,7 +402,7 @@ macro_rules! impl_byteorder_for_fixed_hash { /// are interpreted as being zero. #[inline] pub fn to_low_u64_be(&self) -> u64 { - self.to_low_u64_with_byteorder::<$crate::byteorder::BigEndian>() + self.to_low_u64_with_fn(u64::from_be_bytes) } /// Returns the lowest 8 bytes interpreted as little-endian. @@ -383,7 +413,7 @@ macro_rules! impl_byteorder_for_fixed_hash { /// are interpreted as being zero. #[inline] pub fn to_low_u64_le(&self) -> u64 { - self.to_low_u64_with_byteorder::<$crate::byteorder::LittleEndian>() + self.to_low_u64_with_fn(u64::from_le_bytes) } /// Returns the lowest 8 bytes interpreted as native-endian. @@ -394,15 +424,11 @@ macro_rules! impl_byteorder_for_fixed_hash { /// are interpreted as being zero. #[inline] pub fn to_low_u64_ne(&self) -> u64 { - self.to_low_u64_with_byteorder::<$crate::byteorder::NativeEndian>() + self.to_low_u64_with_fn(u64::from_ne_bytes) } - fn from_low_u64_with_byteorder(val: u64) -> Self - where - B: $crate::byteorder::ByteOrder, - { - let mut buf = [0x0; 8]; - B::write_u64(&mut buf, val); + fn from_low_u64_with_fn(val: u64, to_bytes: fn(u64) -> [u8; 8]) -> Self { + let buf = to_bytes(val); let capped = $crate::core_::cmp::min(Self::len_bytes(), 8); let mut bytes = [0x0; $crate::core_::mem::size_of::()]; bytes[(Self::len_bytes() - capped)..].copy_from_slice(&buf[..capped]); @@ -418,7 +444,7 @@ macro_rules! impl_byteorder_for_fixed_hash { /// if the hash type has less than 8 bytes. #[inline] pub fn from_low_u64_be(val: u64) -> Self { - Self::from_low_u64_with_byteorder::<$crate::byteorder::BigEndian>(val) + Self::from_low_u64_with_fn(val, u64::to_be_bytes) } /// Creates a new hash type from the given `u64` value. @@ -430,7 +456,7 @@ macro_rules! impl_byteorder_for_fixed_hash { /// if the hash type has less than 8 bytes. #[inline] pub fn from_low_u64_le(val: u64) -> Self { - Self::from_low_u64_with_byteorder::<$crate::byteorder::LittleEndian>(val) + Self::from_low_u64_with_fn(val, u64::to_le_bytes) } /// Creates a new hash type from the given `u64` value. @@ -442,7 +468,7 @@ macro_rules! impl_byteorder_for_fixed_hash { /// if the hash type has less than 8 bytes. #[inline] pub fn from_low_u64_ne(val: u64) -> Self { - Self::from_low_u64_with_byteorder::<$crate::byteorder::NativeEndian>(val) + Self::from_low_u64_with_fn(val, u64::to_ne_bytes) } } }; @@ -523,19 +549,6 @@ macro_rules! impl_rand_for_fixed_hash { }; } -#[macro_export] -#[doc(hidden)] -macro_rules! impl_cmp_for_fixed_hash { - ( $name:ident ) => { - impl $crate::core_::cmp::Ord for $name { - #[inline] - fn cmp(&self, other: &Self) -> $crate::core_::cmp::Ordering { - self.as_bytes().cmp(other.as_bytes()) - } - } - }; -} - // Implementation for disabled rustc-hex crate support. // // # Note @@ -662,54 +675,6 @@ macro_rules! impl_arbitrary_for_fixed_hash { }; } -#[macro_export] -#[doc(hidden)] -macro_rules! impl_ops_for_hash { - ( - $impl_for:ident, - $ops_trait_name:ident, - $ops_fn_name:ident, - $ops_assign_trait_name:ident, - $ops_assign_fn_name:ident, - $ops_tok:tt, - $ops_assign_tok:tt - ) => { - impl<'r> $crate::core_::ops::$ops_assign_trait_name<&'r $impl_for> for $impl_for { - fn $ops_assign_fn_name(&mut self, rhs: &'r $impl_for) { - for (lhs, rhs) in self.as_bytes_mut().iter_mut().zip(rhs.as_bytes()) { - *lhs $ops_assign_tok rhs; - } - } - } - - impl $crate::core_::ops::$ops_assign_trait_name<$impl_for> for $impl_for { - #[inline] - fn $ops_assign_fn_name(&mut self, rhs: $impl_for) { - *self $ops_assign_tok &rhs; - } - } - - impl<'l, 'r> $crate::core_::ops::$ops_trait_name<&'r $impl_for> for &'l $impl_for { - type Output = $impl_for; - - fn $ops_fn_name(self, rhs: &'r $impl_for) -> Self::Output { - let mut ret = self.clone(); - ret $ops_assign_tok rhs; - ret - } - } - - impl $crate::core_::ops::$ops_trait_name<$impl_for> for $impl_for { - type Output = $impl_for; - - #[inline] - fn $ops_fn_name(self, rhs: Self) -> Self::Output { - &self $ops_tok &rhs - } - } - }; -} - /// Implements lossy conversions between the given types. /// /// # Note diff --git a/fixed-hash/src/lib.rs b/fixed-hash/src/lib.rs index 228f551e0..5f365e997 100644 --- a/fixed-hash/src/lib.rs +++ b/fixed-hash/src/lib.rs @@ -30,10 +30,6 @@ pub use static_assertions; #[doc(hidden)] pub use static_assertions::const_assert; -#[cfg(feature = "byteorder")] -#[doc(hidden)] -pub use byteorder; - #[cfg(feature = "rustc-hex")] #[doc(hidden)] pub use rustc_hex; diff --git a/fixed-hash/src/tests.rs b/fixed-hash/src/tests.rs index 3ba8a6508..a0975462f 100644 --- a/fixed-hash/src/tests.rs +++ b/fixed-hash/src/tests.rs @@ -155,7 +155,6 @@ mod is_zero { } } -#[cfg(feature = "byteorder")] mod to_low_u64 { use super::*; @@ -195,7 +194,6 @@ mod to_low_u64 { } } -#[cfg(feature = "byteorder")] mod from_low_u64 { use super::*; @@ -328,8 +326,8 @@ fn from_h256_to_h160_lossy() { assert_eq!(h160, expected); } -#[cfg(all(feature = "std", feature = "byteorder"))] #[test] +#[cfg(feature = "std")] fn display_and_debug() { fn test_for(x: u64, hex: &'static str, display: &'static str) { let hash = H64::from_low_u64_be(x); diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 34ddca0bb..7e11ae8df 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -28,7 +28,6 @@ jsonschema = { version = "0.17", default-features = false } default = ["std", "rand"] std = ["uint/std", "fixed-hash/std", "impl-codec?/std"] rand = ["fixed-hash/rand"] -byteorder = ["fixed-hash/byteorder"] rustc-hex = ["fixed-hash/rustc-hex"] serde = ["std", "impl-serde", "impl-serde/std"] json-schema = ["dep:schemars"] From 6a5bff137154cb3db8720d37516226bc494d43b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Oct 2024 10:00:02 +0200 Subject: [PATCH 353/359] build(deps): bump Swatinem/rust-cache from 2.7.3 to 2.7.5 (#876) Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.7.3 to 2.7.5. - [Release notes](https://github.com/swatinem/rust-cache/releases) - [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md) - [Commits](https://github.com/swatinem/rust-cache/compare/23bce251a8cd2ffc3c1075eaa2367cf899916d84...82a92a6e8fbeee089604da2575dc567ae9ddeaab) --- updated-dependencies: - dependency-name: Swatinem/rust-cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f12e38087..a3be07c05 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 - uses: actions-rs/cargo@v1 with: @@ -43,7 +43,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 - run: rustup target add wasm32-unknown-unknown - run: rustup target add mips64-unknown-linux-muslabi64 @@ -137,7 +137,7 @@ jobs: override: true - name: Rust Cache - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 - uses: actions-rs/cargo@v1 with: From 6afa7833f08d9640144e6f935f2bc86d1b8c96c9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Oct 2024 10:00:19 +0200 Subject: [PATCH 354/359] build(deps): update jsonschema requirement from 0.17 to 0.23 (#875) Updates the requirements on [jsonschema](https://github.com/Stranger6667/jsonschema-rs) to permit the latest version. - [Release notes](https://github.com/Stranger6667/jsonschema-rs/releases) - [Changelog](https://github.com/Stranger6667/jsonschema-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/Stranger6667/jsonschema-rs/compare/rust-v0.17.0...rust-v0.23.0) --- updated-dependencies: - dependency-name: jsonschema dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- primitive-types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitive-types/Cargo.toml b/primitive-types/Cargo.toml index 7e11ae8df..20292d014 100644 --- a/primitive-types/Cargo.toml +++ b/primitive-types/Cargo.toml @@ -22,7 +22,7 @@ schemars = { version = ">=0.8.12", default-features = true, optional = true } [dev-dependencies] num-traits = "0.2" serde_json = { version = "1.0", default-features = false } -jsonschema = { version = "0.17", default-features = false } +jsonschema = { version = "0.23", default-features = false } [features] default = ["std", "rand"] From 315299d1f0712a38a0088c508c88b1cf9100da04 Mon Sep 17 00:00:00 2001 From: ordian Date: Mon, 4 Nov 2024 12:42:47 +0100 Subject: [PATCH 355/359] try fixing macos ci (#882) * bump rust msrv to 1.75 * bump CI rust to 1.75 * remove big-endian tests --- .github/workflows/ci.yml | 12 +----------- bounded-collections/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a3be07c05..d499b2515 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -39,14 +39,13 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.74.0 + toolchain: 1.75.0 override: true - name: Rust Cache uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 - run: rustup target add wasm32-unknown-unknown - - run: rustup target add mips64-unknown-linux-muslabi64 - name: Test no-default-features uses: actions-rs/cargo@v1 @@ -117,14 +116,6 @@ jobs: command: test args: -p bounded-collections --all-features - - name: Test uint on bigendian - if: runner.os == 'Linux' - uses: actions-rs/cargo@v1 - with: - use-cross: true - command: test - args: -p uint --target=mips64-unknown-linux-muslabi64 - test_windows: name: Test Windows runs-on: windows-latest @@ -144,7 +135,6 @@ jobs: command: test args: --workspace --exclude kvdb-rocksdb - fmt: name: Rustfmt runs-on: ubuntu-latest diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index 674b934f1..ea1bdddcb 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" description = "Bounded types and their supporting traits" edition = "2021" -rust-version = "1.60.0" +rust-version = "1.75.0" [dependencies] serde = { version = "1.0.101", default-features = false, optional = true, features=["alloc", "derive"] } From 31ae2341bab13604013419a7c3627058e453f162 Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Mon, 4 Nov 2024 21:55:34 +0900 Subject: [PATCH 356/359] bounded-collections: Add Const(Int|Uint) for generic const getter support (#878) * bounded-collections: Add Const(Int|Uint) for generic const getter support * bounded-collections: Update changelog * bounded-collections: Clean up helper types for ConstUint * bounded-collections: Fix broken compile-time check * bounded-collections: Add compile_fail doc test for overflown ConstUint * bounded-collections: Address review comments --------- Co-authored-by: ordian --- bounded-collections/CHANGELOG.md | 3 + bounded-collections/src/const_int.rs | 153 +++++++++++++++++++++++++++ bounded-collections/src/lib.rs | 2 + 3 files changed, 158 insertions(+) create mode 100644 bounded-collections/src/const_int.rs diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index f6e016254..59fc603c8 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,6 +4,9 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ +## [Unreleased] +- Added `ConstInt` and `ConstUint` types. [#878](https://github.com/paritytech/parity-common/pull/878) + ## [0.2.1] - 2024-10-08 - Added `serde` support for `BoundedBTreeMap`. [#870](https://github.com/paritytech/parity-common/pull/870) diff --git a/bounded-collections/src/const_int.rs b/bounded-collections/src/const_int.rs new file mode 100644 index 000000000..df1f4a16f --- /dev/null +++ b/bounded-collections/src/const_int.rs @@ -0,0 +1,153 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use crate::{Get, TypedGet}; +use core::marker::PhantomData; + +// Numbers which have constant upper and lower bounds. +trait ConstBounded { + const MIN: T; + const MAX: T; +} + +macro_rules! impl_const_bounded { + ($bound:ty, $t:ty) => { + impl ConstBounded<$bound> for $t { + const MIN: $bound = <$t>::MIN as $bound; + const MAX: $bound = <$t>::MAX as $bound; + } + }; +} + +impl_const_bounded!(u128, u8); +impl_const_bounded!(u128, u16); +impl_const_bounded!(u128, u32); +impl_const_bounded!(u128, u64); +impl_const_bounded!(u128, u128); +impl_const_bounded!(u128, usize); + +impl_const_bounded!(i128, i8); +impl_const_bounded!(i128, i16); +impl_const_bounded!(i128, i32); +impl_const_bounded!(i128, i64); +impl_const_bounded!(i128, i128); + +// Check whether a unsigned integer is within the bounds of a type. +struct CheckOverflowU128, const N: u128>(PhantomData); + +impl, const N: u128> CheckOverflowU128 { + const ASSERTION: () = assert!(N >= T::MIN && N <= T::MAX); +} + +// Check whether an integer is within the bounds of a type. +struct CheckOverflowI128, const N: i128>(PhantomData); + +impl, const N: i128> CheckOverflowI128 { + const ASSERTION: () = assert!(N >= T::MIN && N <= T::MAX); +} + +/// Const getter for unsigned integers. +/// +/// # Compile-time checks +/// +/// ```compile_fail +/// # use bounded_collections::{ConstUint, Get}; +/// let _ = as Get>::get(); +/// ``` +#[derive(Default, Clone)] +pub struct ConstUint; + +impl core::fmt::Debug for ConstUint { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { + fmt.write_str(&alloc::format!("ConstUint<{}>", N)) + } +} + +impl TypedGet for ConstUint { + type Type = u128; + fn get() -> u128 { + N + } +} + +/// Const getter for signed integers. +#[derive(Default, Clone)] +pub struct ConstInt; + +impl core::fmt::Debug for ConstInt { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { + fmt.write_str(&alloc::format!("ConstInt<{}>", N)) + } +} + +impl TypedGet for ConstInt { + type Type = i128; + fn get() -> i128 { + N + } +} + +macro_rules! impl_const_int { + ($t:ident, $check:ident, $bound:ty, $target:ty) => { + impl Get<$target> for $t { + fn get() -> $target { + let _ = <$check<$target, N>>::ASSERTION; + N as $target + } + } + impl Get> for $t { + fn get() -> Option<$target> { + let _ = <$check<$target, N>>::ASSERTION; + Some(N as $target) + } + } + }; +} + +impl_const_int!(ConstUint, CheckOverflowU128, u128, u8); +impl_const_int!(ConstUint, CheckOverflowU128, u128, u16); +impl_const_int!(ConstUint, CheckOverflowU128, u128, u32); +impl_const_int!(ConstUint, CheckOverflowU128, u128, u64); +impl_const_int!(ConstUint, CheckOverflowU128, u128, u128); +impl_const_int!(ConstUint, CheckOverflowU128, u128, usize); + +impl_const_int!(ConstInt, CheckOverflowI128, i128, i8); +impl_const_int!(ConstInt, CheckOverflowI128, i128, i16); +impl_const_int!(ConstInt, CheckOverflowI128, i128, i32); +impl_const_int!(ConstInt, CheckOverflowI128, i128, i64); +impl_const_int!(ConstInt, CheckOverflowI128, i128, i128); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn const_uint_works() { + assert_eq!( as Get>::get(), 42); + assert_eq!( as Get>>::get(), Some(42)); + assert_eq!( as Get>::get(), 42); + assert_eq!( as Get>::get(), 42); + assert_eq!( as Get>::get(), 42); + assert_eq!( as Get>::get(), 42); + assert_eq!( as Get>::get(), 42); + assert_eq!( as TypedGet>::get(), 42); + // compile-time error + // assert_eq!( as Get>::get() as u128, 256); + } + + #[test] + fn const_int_works() { + assert_eq!( as Get>::get(), -42); + assert_eq!( as Get>>::get(), Some(-42)); + assert_eq!( as Get>::get(), -42); + assert_eq!( as Get>::get(), -42); + assert_eq!( as Get>::get(), -42); + assert_eq!( as Get>::get(), -42); + assert_eq!( as TypedGet>::get(), -42); + } +} diff --git a/bounded-collections/src/lib.rs b/bounded-collections/src/lib.rs index ee706da5f..c7d5d1f7d 100644 --- a/bounded-collections/src/lib.rs +++ b/bounded-collections/src/lib.rs @@ -16,6 +16,7 @@ pub extern crate alloc; pub mod bounded_btree_map; pub mod bounded_btree_set; pub mod bounded_vec; +pub mod const_int; pub mod weak_bounded_vec; mod test; @@ -23,6 +24,7 @@ mod test; pub use bounded_btree_map::BoundedBTreeMap; pub use bounded_btree_set::BoundedBTreeSet; pub use bounded_vec::{BoundedSlice, BoundedVec}; +pub use const_int::{ConstInt, ConstUint}; pub use weak_bounded_vec::WeakBoundedVec; /// A trait for querying a single value from a type defined in the trait. From e3787dc768b08e10809834c65419ad3c255b5cac Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Fri, 8 Nov 2024 20:36:26 +0900 Subject: [PATCH 357/359] bounded-collections: Bump version to 0.2.2 (#883) --- bounded-collections/CHANGELOG.md | 2 +- bounded-collections/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bounded-collections/CHANGELOG.md b/bounded-collections/CHANGELOG.md index 59fc603c8..101024fe0 100644 --- a/bounded-collections/CHANGELOG.md +++ b/bounded-collections/CHANGELOG.md @@ -4,7 +4,7 @@ The format is based on [Keep a Changelog]. [Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ -## [Unreleased] +## [0.2.2] - 2024-11-08 - Added `ConstInt` and `ConstUint` types. [#878](https://github.com/paritytech/parity-common/pull/878) ## [0.2.1] - 2024-10-08 diff --git a/bounded-collections/Cargo.toml b/bounded-collections/Cargo.toml index ea1bdddcb..3f72b5e99 100644 --- a/bounded-collections/Cargo.toml +++ b/bounded-collections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bounded-collections" -version = "0.2.1" +version = "0.2.2" authors = ["Parity Technologies "] license = "MIT OR Apache-2.0" homepage = "https://github.com/paritytech/parity-common" From 80826913b1b941574819073b68da50c2fe51ab7e Mon Sep 17 00:00:00 2001 From: leopardracer <136604165+leopardracer@users.noreply.github.com> Date: Wed, 4 Dec 2024 11:47:10 +0200 Subject: [PATCH 358/359] Update CHANGELOG.md (#885) --- uint/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uint/CHANGELOG.md b/uint/CHANGELOG.md index 110def6a4..99e30f197 100644 --- a/uint/CHANGELOG.md +++ b/uint/CHANGELOG.md @@ -24,7 +24,7 @@ The format is based on [Keep a Changelog]. ## [0.9.2] - 2022-01-28 - Migrated to 2021 edition, enforcing MSRV of `1.56.1`. [#601](https://github.com/paritytech/parity-common/pull/601) -- Display formatting support. [#603](ttps://github.com/paritytech/parity-common/pull/603) +- Display formatting support. [#603](https://github.com/paritytech/parity-common/pull/603) ## [0.9.1] - 2021-06-30 - Added `integer_sqrt` method. [#554](https://github.com/paritytech/parity-common/pull/554) From d54ac847cc5447941390b5e9e5e3baef436e1451 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Mon, 9 Dec 2024 05:10:32 +0000 Subject: [PATCH 359/359] Add `is_full` API to all bounded collections (#887) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add `is_full` api to all bounded collections * Update bounded-collections/src/weak_bounded_vec.rs Co-authored-by: Bastian Köcher --------- Co-authored-by: Bastian Köcher --- bounded-collections/src/bounded_btree_map.rs | 17 +++++++++++++++++ bounded-collections/src/bounded_btree_set.rs | 17 +++++++++++++++++ bounded-collections/src/bounded_vec.rs | 14 +++++++++++++- bounded-collections/src/weak_bounded_vec.rs | 17 +++++++++++++++++ 4 files changed, 64 insertions(+), 1 deletion(-) diff --git a/bounded-collections/src/bounded_btree_map.rs b/bounded-collections/src/bounded_btree_map.rs index 6a4419412..574f074fa 100644 --- a/bounded-collections/src/bounded_btree_map.rs +++ b/bounded-collections/src/bounded_btree_map.rs @@ -271,6 +271,11 @@ where .collect::, _>>()?, )) } + + /// Returns true if this map is full. + pub fn is_full(&self) -> bool { + self.len() >= Self::bound() + } } impl Default for BoundedBTreeMap @@ -784,4 +789,16 @@ mod test { } } } + + #[test] + fn is_full_works() { + let mut bounded = boundedmap_from_keys::>(&[1, 2, 3]); + assert!(!bounded.is_full()); + bounded.try_insert(0, ()).unwrap(); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + + assert!(bounded.is_full()); + assert!(bounded.try_insert(9, ()).is_err()); + assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3])); + } } diff --git a/bounded-collections/src/bounded_btree_set.rs b/bounded-collections/src/bounded_btree_set.rs index e651c862e..0942f34e8 100644 --- a/bounded-collections/src/bounded_btree_set.rs +++ b/bounded-collections/src/bounded_btree_set.rs @@ -207,6 +207,11 @@ where { self.0.take(value) } + + /// Returns true if this set is full. + pub fn is_full(&self) -> bool { + self.len() >= Self::bound() + } } impl Default for BoundedBTreeSet @@ -587,6 +592,18 @@ mod test { let _foo = Foo::default(); } + #[test] + fn is_full_works() { + let mut bounded = boundedset_from_keys::>(&[1, 2, 3]); + assert!(!bounded.is_full()); + bounded.try_insert(0).unwrap(); + assert_eq!(*bounded, set_from_keys(&[1, 0, 2, 3])); + + assert!(bounded.is_full()); + assert!(bounded.try_insert(9).is_err()); + assert_eq!(*bounded, set_from_keys(&[1, 0, 2, 3])); + } + #[cfg(feature = "serde")] mod serde { use super::*; diff --git a/bounded-collections/src/bounded_vec.rs b/bounded-collections/src/bounded_vec.rs index 4d56971eb..1c3a5b34f 100644 --- a/bounded-collections/src/bounded_vec.rs +++ b/bounded-collections/src/bounded_vec.rs @@ -437,7 +437,7 @@ impl> BoundedVec { S::get() as usize } - /// Returns true of this collection is full. + /// Returns true if this collection is full. pub fn is_full(&self) -> bool { self.len() >= Self::bound() } @@ -1368,4 +1368,16 @@ mod test { } let _foo = Foo { bar: 42, slice: BoundedSlice::truncate_from(&[0, 1][..]), map: BoundedVec::default() }; } + + #[test] + fn is_full_works() { + let mut bounded: BoundedVec> = bounded_vec![1, 2, 3]; + assert!(!bounded.is_full()); + bounded.try_insert(1, 0).unwrap(); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + + assert!(bounded.is_full()); + assert!(bounded.try_insert(0, 9).is_err()); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + } } diff --git a/bounded-collections/src/weak_bounded_vec.rs b/bounded-collections/src/weak_bounded_vec.rs index de5fa7128..b6f0846e9 100644 --- a/bounded-collections/src/weak_bounded_vec.rs +++ b/bounded-collections/src/weak_bounded_vec.rs @@ -225,6 +225,11 @@ impl> WeakBoundedVec { Err(()) } } + + /// Returns true if this collection is full. + pub fn is_full(&self) -> bool { + self.len() >= Self::bound() + } } impl Default for WeakBoundedVec { @@ -517,4 +522,16 @@ mod test { let w = WeakBoundedVec::>::decode(&mut &v.encode()[..]).unwrap(); assert_eq!(v, *w); } + + #[test] + fn is_full_works() { + let mut bounded: WeakBoundedVec> = vec![1, 2, 3].try_into().unwrap(); + assert!(!bounded.is_full()); + bounded.try_insert(1, 0).unwrap(); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + + assert!(bounded.is_full()); + assert!(bounded.try_insert(0, 9).is_err()); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + } }