diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 2d4a1b00..8fc04b22 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -2,7 +2,7 @@ name: Rust on: push: - branches: [master, dev] + branches: [master, dev, dilation-1] pull_request: branches: [master, dev] @@ -56,7 +56,7 @@ jobs: - ubuntu-latest - windows-latest rust: - - 1.61.0 # MSRV (also change in Cargo.toml) + - 1.66.0 # MSRV (also change in Cargo.toml) - stable - nightly steps: @@ -66,6 +66,7 @@ jobs: profile: minimal toolchain: ${{ matrix.rust }} override: true + target: wasm32-unknown-unknown - name: Cache ~/.cargo uses: actions/cache@v1 with: @@ -96,11 +97,21 @@ jobs: with: command: build args: -p magic-wormhole --no-default-features --features=forwarding + - name: build library (features=dilation) + uses: actions-rs/cargo@v1 + with: + command: build + args: -p magic-wormhole --no-default-features --features=dilation - name: build CLI uses: actions-rs/cargo@v1 with: command: build args: --all-targets + - name: build WASM + uses: actions-rs/cargo@v1 + with: + command: build + args: --target wasm32-unknown-unknown --no-default-features --package magic-wormhole --features transit --features transfer - name: test uses: actions-rs/cargo@v1 with: @@ -154,6 +165,10 @@ jobs: with: toolchain: nightly override: true + - uses: actions-rs/install@v0.1 + with: + crate: cargo-tarpaulin + version: latest - name: Cache ~/.cargo uses: actions/cache@v1 with: @@ -164,9 +179,11 @@ jobs: with: path: target key: ${{ runner.os }}-coverage-cargo-build-target - - uses: actions-rs/tarpaulin@v0.1 + - name: Run tarpaulin + uses: actions-rs/cargo@v1 with: - args: --all + command: tarpaulin + args: --workspace --out Xml - name: upload coverage uses: codecov/codecov-action@v1 with: diff --git a/.gitignore b/.gitignore index 2bb17fd2..1417d84d 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ **/*.rs.bk .idea/ +.vscode/ diff --git a/Cargo.lock b/Cargo.lock index 2a979b15..368ea0f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -55,18 +55,18 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-attributes" @@ -75,7 +75,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -91,9 +91,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" +checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" dependencies = [ "async-lock", "async-task", @@ -120,39 +120,38 @@ dependencies = [ [[package]] name = "async-io" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", "autocfg", + "cfg-if", "concurrent-queue", "futures-lite", - "libc", "log", "parking", "polling", + "rustix", "slab", - "socket2", + "socket2 0.4.9", "waker-fn", - "windows-sys", ] [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", - "futures-lite", ] [[package]] name = "async-process" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6381ead98388605d0d9ff86371043b5aa922a3905824244de40dc263a14fcba4" +checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" dependencies = [ "async-io", "async-lock", @@ -161,9 +160,9 @@ dependencies = [ "cfg-if", "event-listener", "futures-lite", - "libc", + "rustix", "signal-hook", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -204,45 +203,46 @@ dependencies = [ "filetime", "libc", "pin-project", - "redox_syscall", + "redox_syscall 0.2.16", "xattr", ] [[package]] name = "async-task" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" +checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-tls" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f23d769dbf1838d5df5156e7b1ad404f4c463d1ac2c6aeb6cd943630f8a8400" +checksum = "cfeefd0ca297cbbb3bd34fd6b228401c2a5177038257afd751bc29f0a2da4795" dependencies = [ "futures-core", "futures-io", "rustls", + "rustls-pemfile", "webpki", "webpki-roots", ] [[package]] name = "async-trait" -version = "0.1.59" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.18", ] [[package]] name = "async-tungstenite" -version = "0.17.2" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b71b31561643aa8e7df3effe284fa83ab1a840e52294c5f4bd7bfd8b2becbb" +checksum = "ce01ac37fdc85f10a43c43bc582cbd566720357011578a935761075f898baf58" dependencies = [ "async-std", "async-tls", @@ -253,11 +253,22 @@ dependencies = [ "tungstenite", ] +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version", +] + [[package]] name = "atomic-waker" -version = "1.0.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] name = "atty" @@ -293,15 +304,9 @@ dependencies = [ [[package]] name = "base64" -version = "0.13.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "bitflags" @@ -311,11 +316,11 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "blake2" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -326,18 +331,18 @@ checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "blocking" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c67b173a56acffd6d2326fb7ab938ba0b00a71480e14902b2591c87bc5741e8" +checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" dependencies = [ "async-channel", "async-lock", @@ -345,13 +350,14 @@ dependencies = [ "atomic-waker", "fastrand", "futures-lite", + "log", ] [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bytecodec" @@ -377,21 +383,18 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" - -[[package]] -name = "cache-padded" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "cc" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +dependencies = [ + "jobserver", +] [[package]] name = "cfg-if" @@ -441,9 +444,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.23" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" dependencies = [ "atty", "bitflags", @@ -451,9 +454,9 @@ dependencies = [ "clap_lex", "indexmap", "once_cell", - "strsim 0.10.0", + "strsim", "termcolor", - "terminal_size 0.2.3", + "terminal_size", "textwrap", ] @@ -468,15 +471,15 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.2.18" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" +checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck", "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -504,9 +507,9 @@ dependencies = [ [[package]] name = "clipboard-win" -version = "4.4.2" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4ab1b92798304eedc095b53942963240037c0516452cb11aeba709d420b2219" +checksum = "7191c27c2357d9b7ef96baac1773290d4ca63b24205b82a3fd8a0637afcf0362" dependencies = [ "error-code", "str-buf", @@ -542,56 +545,55 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.0.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] [[package]] name = "console" -version = "0.15.2" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c050367d967ced717c04b65d8c619d863ef9292ce0c5760028655a2fb298718c" +checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" dependencies = [ "encode_unicode", "lazy_static", "libc", - "terminal_size 0.1.17", "unicode-width", - "winapi", + "windows-sys 0.45.0", ] [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "03e69e28e9f7f77debdedbaafa2866e1de9ba56df55a8bd7cfc724c25a09987c" dependencies = [ "libc", ] [[package]] name = "crc" -version = "2.1.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49fc9a695bca7f35f5f4c15cddc84415f66a74ea78eef08e90c5024f2b540e23" +checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "1.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" +checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -606,7 +608,7 @@ dependencies = [ "crossterm_winapi", "libc", "mio", - "parking_lot", + "parking_lot 0.12.1", "signal-hook", "signal-hook-mio", "winapi", @@ -614,9 +616,9 @@ dependencies = [ [[package]] name = "crossterm_winapi" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ae1b35a484aa10e07fe0638d02301c5ad24de82d310ccbd2f3693da5f09bf1c" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" dependencies = [ "winapi", ] @@ -631,16 +633,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" -dependencies = [ - "quote", - "syn", -] - [[package]] name = "ctr" version = "0.8.0" @@ -652,12 +644,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.4" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1631ca6e3c59112501a9d87fd86f21591ff77acd31331e8a73f8d80a65bbdd71" +checksum = "2a011bbe2c35ce9c1f143b7af6f94f29a167beb4cd1d29e6740ce836f723120e" dependencies = [ - "nix 0.26.1", - "windows-sys", + "nix 0.26.2", + "windows-sys 0.48.0", ] [[package]] @@ -674,39 +666,10 @@ dependencies = [ ] [[package]] -name = "darling" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d706e75d87e35569db781a9b5e2416cff1236a47ed380831f959382ccd5f858" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.10.2" +name = "data-encoding" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.9.3", - "syn", -] - -[[package]] -name = "darling_macro" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" -dependencies = [ - "darling_core", - "quote", - "syn", -] +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "derive-new" @@ -716,7 +679,7 @@ checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -727,20 +690,27 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "dialoguer" -version = "0.10.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92e7e37ecef6857fdc0c0c5d42fd5b0938e46590c2183cc92dd310a6d078eb1" +checksum = "59c6f2989294b9a498d3ad5491a79c6deb604617378e1cdc4bfc1c1361fe2f87" dependencies = [ "console", + "shell-words", "tempfile", "zeroize", ] +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.9.0" @@ -752,21 +722,33 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", "subtle", ] +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "downcast-rs" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" +[[package]] +name = "either" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" + [[package]] name = "encode_unicode" version = "0.3.6" @@ -788,13 +770,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -835,23 +817,23 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] [[package]] name = "filetime" -version = "0.2.19" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e884668cd0c7480504233e951174ddc3b382f7c2666e3b7310b5c4e7b0c37f9" +checksum = "5cbc844cecaee9d4443931972e1289c8ff485cb4cc2767cb03ca139ed6885153" dependencies = [ "cfg-if", "libc", - "redox_syscall", - "windows-sys", + "redox_syscall 0.2.16", + "windows-sys 0.48.0", ] [[package]] @@ -860,6 +842,15 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" @@ -868,18 +859,24 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + [[package]] name = "futures" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -892,9 +889,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -902,15 +899,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -919,15 +916,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ "fastrand", "futures-core", @@ -940,32 +937,32 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.18", ] [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -981,22 +978,21 @@ dependencies = [ [[package]] name = "futures_ringbuf" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b905098b5519bd63b2a1f9f4615198b0e38a473ce201ffdbd4dea6eb63087ddc" +checksum = "6628abb6eb1fc74beaeb20cd0670c43d158b0150f7689b38c3eaf663f99bdec7" dependencies = [ "futures", "log", - "log-derive", "ringbuf", "rustc_version", ] [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1025,13 +1021,15 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -1046,15 +1044,15 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.0" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "gloo-timers" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c4a8d6391675c6b2ee1a6c8d06e8e2d03605c44cec1270675985a4c2a5500b" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" dependencies = [ "futures-channel", "futures-core", @@ -1070,9 +1068,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "heck" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" @@ -1085,12 +1083,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.2.6" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" [[package]] name = "hex" @@ -1116,7 +1111,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -1125,14 +1120,14 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1333fad8d94b82cab989da428b0b36a3435db3870d85e971a1d6dc0a8576722" dependencies = [ - "sha1", + "sha1 0.2.0", ] [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -1151,17 +1146,11 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1169,12 +1158,12 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.7.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +checksum = "2cfc4a06638d2fd0dda83b01126fefd38ef9f04f54d2fc717a938df68b83a68d" dependencies = [ "libc", - "winapi", + "windows-sys 0.45.0", ] [[package]] @@ -1185,9 +1174,9 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", @@ -1195,11 +1184,12 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.2" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4295cbb7573c16d310e99e713cf9e75101eb190ab31fccd35f2d2691b4352b19" +checksum = "8ff8cc23a7393a397ed1d7f56e6365cba772aba9f9912ab968b03043c395d057" dependencies = [ "console", + "instant", "number_prefix", "portable-atomic", "unicode-width", @@ -1212,41 +1202,63 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] name = "io-lifetimes" -version = "1.0.3" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ + "hermit-abi 0.3.1", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "is-terminal" -version = "0.4.1" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927609f78c2913a6f6ac3c27a4fe87f43e2a35367c0c4b0f8265e8f49a104330" +checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ - "hermit-abi 0.2.6", + "hermit-abi 0.3.1", "io-lifetimes", "rustix", - "windows-sys", + "windows-sys 0.48.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", ] [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" + +[[package]] +name = "jobserver" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +dependencies = [ + "libc", +] [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -1268,21 +1280,21 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.138" +version = "0.2.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8" +checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -1290,26 +1302,13 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" dependencies = [ - "cfg-if", "value-bag", ] -[[package]] -name = "log-derive" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a42526bb432bcd1b43571d5f163984effa25409a29f1a3242a54d0577d55bcf" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "magic-wormhole" version = "0.6.0" @@ -1319,7 +1318,7 @@ dependencies = [ "async-tar", "async-trait", "async-tungstenite", - "base64 0.20.0", + "base64", "bytecodec", "derive_more", "dialoguer", @@ -1327,11 +1326,15 @@ dependencies = [ "eyre", "futures", "futures_ringbuf", + "getrandom 0.2.10", "hex", "hkdf", "if-addrs", + "instant", "libc", "log", + "mockall", + "mockall_double", "noise-protocol", "noise-rust-crypto", "percent-encoding", @@ -1342,13 +1345,17 @@ dependencies = [ "serde_json", "sha-1", "sha2", - "socket2", + "socket2 0.5.3", "spake2", "stun_codec", + "tar", "thiserror", "time", "url", + "wasm-timer", + "ws_stream_wasm", "xsalsa20poly1305", + "zstd", ] [[package]] @@ -1398,14 +1405,53 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.48.0", +] + +[[package]] +name = "mockall" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "mockall_double" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae71c7bb287375187c775cf82e2dcf1bef3388aaf58f0789a77f9c7ab28466f6" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -1422,9 +1468,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46a58d1d356c6597d08cde02c2f09d785b09e28711837b1ed667dc652c08a694" +checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ "bitflags", "cfg-if", @@ -1450,7 +1496,7 @@ dependencies = [ "aes-gcm", "blake2", "chacha20poly1305", - "getrandom 0.2.8", + "getrandom 0.2.10", "noise-protocol", "sha2", "x25519-dalek", @@ -1459,14 +1505,20 @@ dependencies = [ [[package]] name = "nom" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "num-traits" version = "0.2.15" @@ -1513,18 +1565,18 @@ dependencies = [ [[package]] name = "object" -version = "0.30.0" +version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239da7f290cfa979f43f85a8efeee9a8a76d0827c356d37f9d3d7254d6b537fb" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.16.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -1534,19 +1586,19 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "os_pipe" -version = "1.1.2" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6a252f1f8c11e84b3ab59d7a488e48e4478a93937e027076638c49536204639" +checksum = "0ae859aa07428ca9a929b936690f8b12dc5f11dd8c6992a18ca93919f28bc177" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "os_str_bytes" -version = "6.4.1" +version = "6.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" +checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac" [[package]] name = "owo-colors" @@ -1556,9 +1608,20 @@ checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "parking" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" + +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core 0.8.6", +] [[package]] name = "parking_lot" @@ -1567,62 +1630,86 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core", + "parking_lot_core 0.9.8", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall 0.2.16", + "smallvec", + "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.5" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.3.5", "smallvec", - "windows-sys", + "windows-targets 0.48.0", ] [[package]] name = "paste" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1c2c742266c2f1041c914ba65355a83ae8747b05f208319784083583494b4b" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "petgraph" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", "indexmap", ] +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures", + "rustc_version", +] + [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.18", ] [[package]] @@ -1639,22 +1726,24 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "polling" -version = "2.5.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg", + "bitflags", "cfg-if", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys", + "pin-project-lite", + "windows-sys 0.48.0", ] [[package]] @@ -1682,9 +1771,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "0.3.18" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bdd679d533107e090c2704a35982fc06302e30898e63ffa26a81155c012e92" +checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" [[package]] name = "ppv-lite86" @@ -1692,6 +1781,36 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "predicates" +version = "2.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" +dependencies = [ + "difflib", + "float-cmp", + "itertools", + "normalize-line-endings", + "predicates-core", + "regex", +] + +[[package]] +name = "predicates-core" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" + +[[package]] +name = "predicates-tree" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -1701,7 +1820,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -1718,9 +1837,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.47" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ "unicode-ident", ] @@ -1746,9 +1865,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.21" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ "proc-macro2", ] @@ -1789,7 +1908,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", ] [[package]] @@ -1801,11 +1920,20 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags", +] + [[package]] name = "regex" -version = "1.7.0" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ "aho-corasick", "memchr", @@ -1814,18 +1942,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "ring" @@ -1844,11 +1963,11 @@ dependencies = [ [[package]] name = "ringbuf" -version = "0.2.8" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f65af18d50f789e74aaf23bbb3f65dcd22a3cb6e029b5bced149f6bd57c5c2a2" +checksum = "79abed428d1fd2a128201cec72c5f6938e2da607c6f3745f769fabea399d950a" dependencies = [ - "cache-padded", + "crossbeam-utils", ] [[package]] @@ -1875,9 +1994,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" @@ -1890,36 +2009,44 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.5" +version = "0.37.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3807b5d10909833d3e9acd1eb5fb988f79376ff10fce42937de71a449c4c588" +checksum = "b96e891d04aa506a6d1f318d2771bcb1c7dfda84e126660ace067c9b474bb2c0" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.19.1" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ - "base64 0.13.1", "log", "ring", "sct", "webpki", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +dependencies = [ + "base64", +] + [[package]] name = "ryu" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "salsa20" @@ -1939,9 +2066,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", @@ -1949,35 +2076,41 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" + +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.150" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e326c9ec8042f1b5da33252c8a37e9ffbd2c9bef0155215b6e6c80c790e05f91" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.150" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a3df25b0713732468deadad63ab9da1f1fd75a48a15024b50363f128db627e" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.18", ] [[package]] name = "serde_json" -version = "1.0.89" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" +checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" dependencies = [ "itoa", "ryu", @@ -1992,7 +2125,7 @@ checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -2001,15 +2134,26 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c" +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -2021,11 +2165,17 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shell-words" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" + [[package]] name = "signal-hook" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" +checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" dependencies = [ "libc", "signal-hook-registry", @@ -2044,18 +2194,18 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] @@ -2068,14 +2218,24 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", ] +[[package]] +name = "socket2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "spake2" version = "0.3.1" @@ -2106,12 +2266,6 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0" -[[package]] -name = "strsim" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" - [[package]] name = "strsim" version = "0.10.0" @@ -2120,16 +2274,16 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "stun_codec" -version = "0.2.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f1df6c4e592afc1ffed8f4bc50ab9f4d70cfccb42829662b7d276247cbef3b1" +checksum = "4089f66744a63bc909eed6ece965b493030ca896f21c24d9f26c659926c7e05b" dependencies = [ "bytecodec", "byteorder", "crc", "hmac-sha1", "md5", - "trackable 1.2.0", + "trackable 1.3.0", ] [[package]] @@ -2140,9 +2294,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.105" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -2150,59 +2304,65 @@ dependencies = [ ] [[package]] -name = "synstructure" -version = "0.12.6" +name = "syn" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" dependencies = [ "proc-macro2", "quote", - "syn", - "unicode-xid", + "unicode-ident", +] + +[[package]] +name = "tar" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b55807c0344e1e6c04d7c965f5289c39a8d94ae23ed5c0b57aabac549f871c6" +dependencies = [ + "filetime", + "libc", + "xattr", ] [[package]] name = "tempfile" -version = "3.3.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ + "autocfg", "cfg-if", "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", + "redox_syscall 0.3.5", + "rustix", + "windows-sys 0.48.0", ] [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] [[package]] name = "terminal_size" -version = "0.1.17" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" +checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237" dependencies = [ - "libc", - "winapi", + "rustix", + "windows-sys 0.48.0", ] [[package]] -name = "terminal_size" -version = "0.2.3" +name = "termtree" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb20089a8ba2b69debd491f8d2d023761cbf196e999218c591fa1e7e15a21907" -dependencies = [ - "rustix", - "windows-sys", -] +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "textwrap" @@ -2210,43 +2370,44 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" dependencies = [ - "terminal_size 0.2.3", + "terminal_size", ] [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.18", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if", "once_cell", ] [[package]] name = "time" -version = "0.3.17" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" dependencies = [ "itoa", "serde", @@ -2256,15 +2417,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] @@ -2280,9 +2441,9 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tracing" @@ -2297,9 +2458,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -2317,9 +2478,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "sharded-slab", "thread_local", @@ -2332,15 +2493,15 @@ version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b98abb9e7300b9ac902cc04920945a874c1973e08c310627cc4458c04b70dd32" dependencies = [ - "trackable 1.2.0", + "trackable 1.3.0", "trackable_derive", ] [[package]] name = "trackable" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017e2a1a93718e4e8386d037cfb8add78f1d690467f4350fb582f55af1203167" +checksum = "b15bd114abb99ef8cee977e517c8f37aee63f184f2d08e3e6ceca092373369ae" dependencies = [ "trackable_derive", ] @@ -2352,7 +2513,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2371,18 +2532,18 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.17.3" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" +checksum = "15fba1a6d6bb030745759a9a2a588bfe8490fc8b4751a277db3a0be1c9ebbf67" dependencies = [ - "base64 0.13.1", "byteorder", "bytes", + "data-encoding", "http", "httparse", "log", "rand", - "sha-1", + "sha1 0.10.5", "thiserror", "url", "utf-8", @@ -2396,15 +2557,15 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" +checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" [[package]] name = "unicode-normalization" @@ -2421,12 +2582,6 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" -[[package]] -name = "unicode-xid" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" - [[package]] name = "universal-hash" version = "0.4.1" @@ -2445,9 +2600,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", "idna", @@ -2469,13 +2624,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.0.0-alpha.9" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" -dependencies = [ - "ctor", - "version_check", -] +checksum = "a4d330786735ea358f3bc09eea4caa098569c1c93f342d9aca0514915022fe7e" [[package]] name = "version_check" @@ -2503,9 +2654,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -2513,24 +2664,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.18", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.33" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -2540,9 +2691,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2550,22 +2701,37 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.18", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" + +[[package]] +name = "wasm-timer" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" +dependencies = [ + "futures", + "js-sys", + "parking_lot 0.11.2", + "pin-utils", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] [[package]] name = "wayland-client" @@ -2628,9 +2794,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -2638,9 +2804,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.4" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ "ring", "untrusted", @@ -2648,22 +2814,13 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.21.1" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ "webpki", ] -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - [[package]] name = "winapi" version = "0.3.9" @@ -2706,60 +2863,135 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" -version = "0.42.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows-targets 0.42.2", ] +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.0" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.42.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "wl-clipboard-rs" @@ -2798,17 +3030,37 @@ dependencies = [ "magic-wormhole", "number_prefix", "qr2term", + "rand", "serde", "serde_derive", "serde_json", "url", ] +[[package]] +name = "ws_stream_wasm" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "log", + "pharos", + "rustc_version", + "send_wrapper", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "x11-clipboard" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0827f86aa910c4e73329a4f619deabe88ebb4b042370bf023c2d5d8b4eb54695" +checksum = "980b9aa9226c3b7de8e2adb11bf20124327c054e0e5812d2aac0b5b5a87e7464" dependencies = [ "x11rb", ] @@ -2857,9 +3109,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.4" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" +checksum = "52839dc911083a8ef63efa4d039d1f58b5e409f923e44c80828f206f66e5541c" [[package]] name = "xsalsa20poly1305" @@ -2886,12 +3138,41 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.3.3" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.18", +] + +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.8+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +dependencies = [ + "cc", + "libc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 759b6043..3b4af100 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ repository = "https://github.com/magic-wormhole/magic-wormhole.rs" documentation = "https://docs.rs/magic-wormhole/latest/" license = "EUPL-1.2" edition = "2021" -rust-version = "1.61" # MSRV (also change in CI) +rust-version = "1.66" # MSRV (also change in CI) [dependencies] dialoguer = { version = "0.10", features = ["completion"] } @@ -27,53 +27,71 @@ sha-1 = "0.10.0" sha2 = "0.10.0" hkdf = "0.12.2" hex = { version = "0.4.2", features = ["serde"] } -rand = "0.8.3" +rand = "0.8.0" log = "0.4.13" # zeroize = { version = "1.2.0", features = ["zeroize_derive"] } -base64 = "0.20.0" -futures_ringbuf = "0.3.1" +base64 = "0.21.0" +futures_ringbuf = "0.4.0" +async-trait = "0.1.57" +mockall_double = "0.3.0" time = { version = "0.3.7", features = ["formatting"] } +instant = { version = "0.1.12", features = ["wasm-bindgen"] } derive_more = { version = "0.99.0", default-features = false, features = ["display", "deref", "from"] } thiserror = "1.0.24" futures = "0.3.12" -async-std = { version = "1.12.0", features = ["attributes", "unstable"] } -async-tungstenite = { version = "0.17.1", features = ["async-std-runtime", "async-tls"] } -async-io = "1.6.0" -libc = "0.2.101" url = { version = "2.2.2", features = ["serde"] } percent-encoding = { version = "2.1.0" } # Transit dependencies -socket2 = { version = "0.4.1", optional = true } -stun_codec = { version = "0.2.0", optional = true } -if-addrs = { version = "0.7.0", optional = true } +stun_codec = { version = "0.3.0", optional = true } bytecodec = { version = "0.4.15", optional = true } -async-trait = { version = "0.1.57", optional = true } noise-protocol = { version = "0.1.4", optional = true } noise-rust-crypto = { version = "0.5.0", optional = true } # Transfer dependencies -async-tar = { version = "0.4.2", optional = true } rmp-serde = { version = "1.0.0", optional = true } +tar = { version = "0.4.33", optional = true } # Forwarding dependencies # rmp-serde = … # defined above +[target.'cfg(not(target_family = "wasm"))'.dependencies] +libc = "0.2.101" +async-std = { version = "1.12.0", features = ["attributes", "unstable"] } +async-tungstenite = { version = "0.22.2", features = ["async-std-runtime", "async-tls"] } +async-io = "1.6.0" + +# Transit +socket2 = { version = "0.5.0", optional = true, features = ["all"] } +if-addrs = { version = "0.10.0", optional = true } + +# Transfer + +async-tar = { version = "0.4", optional = true } +zstd = { version = "0.11.1", optional = true } + +[target.'cfg(target_family = "wasm")'.dependencies] +wasm-timer = "0.2.5" +ws_stream_wasm = "0.7.3" +getrandom = { version = "0.2.5", features = ["js"] } + # for some tests [dev-dependencies] env_logger = "0.10.0" eyre = "0.6.5" +mockall = "0.11.4" [features] -transit = ["socket2", "stun_codec", "if-addrs", "bytecodec", "async-trait", "noise-protocol", "noise-rust-crypto"] -transfer = ["transit", "async-tar", "rmp-serde"] +transit = ["socket2", "stun_codec", "if-addrs", "bytecodec", "noise-protocol", "noise-rust-crypto"] +transfer = ["transit", "tar", "async-tar", "rmp-serde", "zstd"] +dilation = ["transit"] forwarding = ["transit", "rmp-serde"] -default = ["transit", "transfer"] +default = ["transfer", "dilation"] all = ["default", "forwarding"] [profile.release] diff --git a/README.md b/README.md index 252c37c4..84f4068e 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,7 @@ If you don't fear touching code and want to contribute, `./src/lib.rs`, `./src/t ## Applications using Wormhole Rust as library - [Warp](https://gitlab.gnome.org/World/warp), a GUI client using Gtk +- [Wormhole File Transfer](https://gitlab.com/lukas-heiligenbrunner/wormhole), a Android client using Flutter (feel free to add yours) diff --git a/changelog.md b/changelog.md index 14a983e6..c5978341 100644 --- a/changelog.md +++ b/changelog.md @@ -2,6 +2,9 @@ ## Unreleased +- Added compilation support for WASM targets. +- \[lib\]\[breaking\] replaced `transit::TransitInfo` with a struct containing the address, the old enum has been renamed to `transit::ConnectionType`. + ## Version 0.6.0 - Add shell completion support for the CLI diff --git a/cli/Cargo.toml b/cli/Cargo.toml index f00a6837..200b3d28 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -11,6 +11,7 @@ log = "0.4.13" url = { version = "2.2.2", features = ["serde"] } futures = "0.3.12" async-std = { version = "1.12.0", features = ["attributes", "unstable"] } +rand = "0.8.3" # CLI specific dependencies magic-wormhole = { path = "..", features = ["all"] } diff --git a/cli/src/main.rs b/cli/src/main.rs index 0f3fa519..b5740c3d 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,12 +1,9 @@ #![allow(clippy::too_many_arguments)] mod util; -use std::{ - ops::Deref, - time::{Duration, Instant}, -}; +use std::time::{Duration, Instant}; -use async_std::{fs::OpenOptions, sync::Arc}; +use async_std::sync::Arc; use clap::{Args, CommandFactory, Parser, Subcommand}; use cli_clipboard::{ClipboardContext, ClipboardProvider}; use color_eyre::{eyre, eyre::Context}; @@ -21,6 +18,12 @@ use std::{ path::{Path, PathBuf}, }; +use std::{io::Write, path::PathBuf}; + +use magic_wormhole::{ + dilated_transfer, forwarding, transfer, transit, MailboxConnection, Wormhole, +}; + fn install_ctrlc_handler( ) -> eyre::Result futures::future::BoxFuture<'static, ()> + Clone> { use async_std::sync::{Condvar, Mutex}; @@ -59,14 +62,29 @@ fn install_ctrlc_handler( }) } +// send, receive, +#[derive(Debug, Args)] +struct CommonTransferArgs { + /// Enable dilation + #[clap(long = "with-dilation", alias = "with-dilation")] + with_dilation: bool, +} + // send, send-many #[derive(Debug, Args)] struct CommonSenderArgs { /// Suggest a different name to the receiver to keep the file's actual name secret. + /// Not allowed when sending more than one file. #[clap(long = "rename", visible_alias = "name", value_name = "FILE_NAME")] - file_name: Option, - #[clap(index = 1, required = true, value_name = "FILENAME|DIRNAME", value_hint = clap::ValueHint::AnyPath)] - file: PathBuf, + file_name: Option, + #[clap( + index = 1, + required = true, + min_values = 1, + value_name = "FILENAME|DIRNAME", + value_hint = clap::ValueHint::AnyPath, + )] + files: Vec, } // send, send-many, serve @@ -83,9 +101,6 @@ struct CommonLeaderArgs { // receive #[derive(Debug, Args)] struct CommonReceiverArgs { - /// Rename the received file or folder, overriding the name suggested by the sender. - #[clap(long = "rename", visible_alias = "name", value_name = "FILE_NAME")] - file_name: Option, /// Store transferred file or folder in the specified directory. Defaults to $PWD. #[clap(long = "out-dir", value_name = "PATH", default_value = ".", value_hint = clap::ValueHint::DirPath)] file_path: PathBuf, @@ -174,13 +189,14 @@ enum WormholeCommand { mut_arg("help", |a| a.help("Print this help message")), )] Send { - /// The file or directory to send #[clap(flatten)] common: CommonArgs, #[clap(flatten)] common_leader: CommonLeaderArgs, #[clap(flatten)] common_send: CommonSenderArgs, + #[clap(flatten)] + common_transfer: CommonTransferArgs, }, /// Send a file to many recipients. READ HELP PAGE FIRST! #[clap( @@ -223,6 +239,8 @@ enum WormholeCommand { common_follower: CommonFollowerArgs, #[clap(flatten)] common_receiver: CommonReceiverArgs, + #[clap(flatten)] + common_transfer: CommonTransferArgs, }, /// Forward ports from one machine to another #[clap(subcommand)] @@ -290,43 +308,15 @@ async fn main() -> eyre::Result<()> { }) .ok(); - let concat_file_name = |file_path: &Path, file_name: Option<_>| { - // TODO this has gotten out of hand (it ugly) - // The correct solution would be to make `file_name` an Option everywhere and - // move the ".tar" part further down the line. - // The correct correct solution would be to have working file transfer instead - // of sending stupid archives. - file_name - .map(std::ffi::OsString::from) - .or_else(|| { - let mut name = file_path.file_name().map(std::ffi::OsString::from); - if file_path.is_dir() { - name = name.map(|mut name| { - name.push(".tar"); - name - }); - } - name - }) - .ok_or_else(|| { - eyre::format_err!("You can't send a file without a name. Maybe try --rename") - }) - }; - match app.command { WormholeCommand::Send { common, common_leader: CommonLeaderArgs { code, code_length }, - common_send: - CommonSenderArgs { - file_name, - file: file_path, - }, + common_send: CommonSenderArgs { file_name, files }, + common_transfer: CommonTransferArgs { with_dilation: _ }, .. } => { - let file_name = concat_file_name(&file_path, file_name.as_ref())?; - - eyre::ensure!(file_path.exists(), "{} does not exist", file_path.display()); + let offer = make_send_offer(files, file_name).await?; let transit_abilities = parse_transit_args(&common); let (wormhole, _code, relay_hints) = match util::cancellable( @@ -351,19 +341,19 @@ async fn main() -> eyre::Result<()> { Box::pin(send( wormhole, relay_hints, - file_path.as_ref(), - &file_name, + offer, transit_abilities, ctrl_c.clone(), )) .await?; }, + #[allow(unused_variables)] WormholeCommand::SendMany { tries, timeout, common, common_leader: CommonLeaderArgs { code, code_length }, - common_send: CommonSenderArgs { file_name, file }, + common_send: CommonSenderArgs { file_name, files }, .. } => { let transit_abilities = parse_transit_args(&common); @@ -385,13 +375,11 @@ async fn main() -> eyre::Result<()> { }; let timeout = Duration::from_secs(timeout * 60); - let file_name = concat_file_name(&file, file_name.as_ref())?; - Box::pin(send_many( relay_hints, &code, - file.as_ref(), - &file_name, + files, + file_name, tries, timeout, wormhole, @@ -405,22 +393,32 @@ async fn main() -> eyre::Result<()> { noconfirm, common, common_follower: CommonFollowerArgs { code }, - common_receiver: - CommonReceiverArgs { - file_name, - file_path, - }, + common_receiver: CommonReceiverArgs { file_path }, + common_transfer: CommonTransferArgs { with_dilation }, .. } => { + if with_dilation { + log::warn!("The dilation feature is still work in progress. Please remove the `--with-dilation` argument to avoid this."); + } + let transit_abilities = parse_transit_args(&common); let (wormhole, _code, relay_hints) = { + let app_config = dilated_transfer::APP_CONFIG.with_dilation(with_dilation); + let app_config = if with_dilation { + app_config.app_version(dilated_transfer::AppVersion::new(Some( + dilated_transfer::FileTransferV2Mode::Receive, + ))) + } else { + app_config + }; + let connect_fut = Box::pin(parse_and_connect( &mut term, common, code, None, false, - transfer::APP_CONFIG, + app_config, None, clipboard.as_mut(), )); @@ -430,16 +428,21 @@ async fn main() -> eyre::Result<()> { } }; - Box::pin(receive( - wormhole, - relay_hints, - file_path.as_os_str(), - file_name.map(std::ffi::OsString::from).as_deref(), - noconfirm, - transit_abilities, - ctrl_c, - )) - .await?; + if with_dilation && peer_allows_dilation(&wormhole.peer_version()) { + log::debug!("dilate wormhole"); + let mut dilated_wormhole = wormhole.dilate()?; // need to pass transit relay URL + dilated_wormhole.run().await; + } else { + Box::pin(receive( + wormhole, + relay_hints, + &file_path, + noconfirm, + transit_abilities, + ctrl_c, + )) + .await?; + } }, WormholeCommand::Forward(ForwardCommand::Serve { targets, @@ -592,6 +595,11 @@ async fn main() -> eyre::Result<()> { Ok(()) } +fn peer_allows_dilation(_version: &serde_json::Value) -> bool { + // TODO needs to be implemented + true +} + fn parse_transit_args(args: &CommonArgs) -> transit::Abilities { match (args.force_direct, args.force_relay) { (false, false) => transit::Abilities::ALL_ABILITIES, @@ -647,7 +655,7 @@ async fn parse_and_connect( uri_rendezvous = Some(rendezvous_server.clone()); app_config = app_config.rendezvous_url(rendezvous_server.to_string().into()); } - let (wormhole, code) = match code { + let mailbox_connection = match code { Some(code) => { if is_send { print_code.expect("`print_code` must be `Some` when `is_send` is `true`")( @@ -656,21 +664,16 @@ async fn parse_and_connect( &uri_rendezvous, )?; } - let (server_welcome, wormhole) = - magic_wormhole::Wormhole::connect_with_code(app_config, code).await?; - print_welcome(term, &server_welcome)?; - (wormhole, server_welcome.code) + MailboxConnection::connect(app_config, code, true).await? }, None => { - let numwords = code_length.unwrap(); + let mailbox_connection = + MailboxConnection::create(app_config, code_length.unwrap()).await?; - let (server_welcome, connector) = - magic_wormhole::Wormhole::connect_without_code(app_config, numwords).await?; - print_welcome(term, &server_welcome)?; /* Print code and also copy it to clipboard */ if is_send { if let Some(clipboard) = clipboard { - match clipboard.set_contents(server_welcome.code.to_string()) { + match clipboard.set_contents(mailbox_connection.code.to_string()) { Ok(()) => log::info!("Code copied to clipboard"), Err(err) => log::warn!("Failed to copy code to clipboard: {}", err), } @@ -678,17 +681,68 @@ async fn parse_and_connect( print_code.expect("`print_code` must be `Some` when `is_send` is `true`")( term, - &server_welcome.code, + &mailbox_connection.code, &uri_rendezvous, )?; } - let wormhole = connector.await?; - (wormhole, server_welcome.code) + mailbox_connection }, }; + print_welcome(term, &mailbox_connection.welcome)?; + let code = mailbox_connection.code.clone(); + let wormhole = Wormhole::connect(mailbox_connection).await?; eyre::Result::<_>::Ok((wormhole, code, relay_hints)) } +async fn make_send_offer( + mut files: Vec, + file_name: Option, +) -> eyre::Result { + for file in &files { + eyre::ensure!( + async_std::path::Path::new(&file).exists().await, + "{} does not exist", + file.display() + ); + } + log::trace!("Making send offer in {files:?}, with name {file_name:?}"); + + match (files.len(), file_name) { + (0, _) => unreachable!("Already checked by CLI parser"), + (1, Some(file_name)) => { + let file = files.remove(0); + Ok(transfer::OfferSend::new_file_or_folder(file_name, file).await?) + }, + (1, None) => { + let file = files.remove(0); + let file_name = file + .file_name() + .ok_or_else(|| { + eyre::format_err!("You can't send a file without a name. Maybe try --rename") + })? + .to_str() + .ok_or_else(|| eyre::format_err!("File path must be a valid UTF-8 string"))? + .to_owned(); + Ok(transfer::OfferSend::new_file_or_folder(file_name, file).await?) + }, + (_, Some(_)) => Err(eyre::format_err!( + "Can't customize file name when sending multiple files" + )), + (_, None) => { + let mut names = std::collections::BTreeMap::new(); + for path in &files { + eyre::ensure!(path.file_name().is_some(), "'{}' has no name. You need to send it separately and use the --rename flag, or rename it on the file system", path.display()); + if let Some(old) = names.insert(path.file_name(), path) { + eyre::bail!( + "'{}' and '{}' have the same file name. Rename one of them on disk, or send them in separate transfers", old.display(), path.display(), + ); + } + } + Ok(transfer::OfferSend::new_paths(files).await?) + }, + } +} + fn create_progress_bar(file_size: u64) -> ProgressBar { use indicatif::ProgressStyle; @@ -703,6 +757,17 @@ fn create_progress_bar(file_size: u64) -> ProgressBar { pb } +fn create_progress_handler(pb: ProgressBar) -> impl FnMut(u64, u64) { + move |sent, total| { + if sent == 0 { + pb.reset_elapsed(); + pb.set_length(total); + pb.enable_steady_tick(std::time::Duration::from_millis(250)); + } + pb.set_position(sent); + } +} + fn enter_code() -> eyre::Result { let completion = PgpWordList::default(); let input = Input::new() @@ -714,8 +779,8 @@ fn enter_code() -> eyre::Result { input } -fn print_welcome(term: &mut Term, welcome: &magic_wormhole::WormholeWelcome) -> eyre::Result<()> { - if let Some(welcome) = &welcome.welcome { +fn print_welcome(term: &mut Term, welcome: &Option) -> eyre::Result<()> { + if let Some(welcome) = &welcome { writeln!(term, "Got welcome from server: {}", welcome)?; } Ok(()) @@ -779,28 +844,19 @@ fn server_print_code( async fn send( wormhole: Wormhole, relay_hints: Vec, - file_path: &std::ffi::OsStr, - file_name: &std::ffi::OsStr, + offer: transfer::OfferSend, transit_abilities: transit::Abilities, ctrl_c: impl Fn() -> futures::future::BoxFuture<'static, ()>, ) -> eyre::Result<()> { let pb = create_progress_bar(0); let pb2 = pb.clone(); - transfer::send_file_or_folder( + transfer::send( wormhole, relay_hints, - file_path, - file_name, transit_abilities, + offer, &transit::log_transit_connection, - move |sent, total| { - if sent == 0 { - pb.reset_elapsed(); - pb.set_length(total); - pb.enable_steady_tick(std::time::Duration::from_millis(250)); - } - pb.set_position(sent); - }, + create_progress_handler(pb), ctrl_c(), ) .await @@ -812,8 +868,8 @@ async fn send( async fn send_many( relay_hints: Vec, code: &magic_wormhole::Code, - file_path: &std::ffi::OsStr, - file_name: &std::ffi::OsStr, + files: Vec, + file_name: Option, max_tries: u64, timeout: Duration, wormhole: Wormhole, @@ -828,19 +884,12 @@ async fn send_many( * for us at the moment, so we'll have to do without for now. */ let mp = MultiProgress::new(); - - let file_path = Arc::new(file_path.to_owned()); - let file_name = Arc::new(file_name.to_owned()); - // TODO go back to reference counting again - //let url = Arc::new(relay_server); - let time = Instant::now(); /* Special-case the first send with reusing the existing connection */ send_in_background( relay_hints.clone(), - Arc::clone(&file_path), - Arc::clone(&file_name), + make_send_offer(files.clone(), file_name.clone()).await?, wormhole, term.clone(), &mp, @@ -862,12 +911,13 @@ async fn send_many( break; } - let (_server_welcome, wormhole) = - magic_wormhole::Wormhole::connect_with_code(transfer::APP_CONFIG, code.clone()).await?; + let wormhole = Wormhole::connect( + MailboxConnection::connect(transfer::APP_CONFIG, code.clone(), false).await?, + ) + .await?; send_in_background( relay_hints.clone(), - Arc::clone(&file_path), - Arc::clone(&file_name), + make_send_offer(files.clone(), file_name.clone()).await?, wormhole, term.clone(), &mp, @@ -879,8 +929,7 @@ async fn send_many( async fn send_in_background( relay_hints: Vec, - file_name: Arc, - file_path: Arc, + offer: transfer::OfferSend, wormhole: Wormhole, mut term: Term, mp: &MultiProgress, @@ -893,21 +942,13 @@ async fn send_many( async_std::task::spawn(async move { let pb2 = pb.clone(); let result = async move { - transfer::send_file_or_folder( + transfer::send( wormhole, relay_hints, - file_path.deref(), - file_name.deref(), transit_abilities, + offer, &transit::log_transit_connection, - move |sent, total| { - if sent == 0 { - pb2.reset_elapsed(); - pb2.set_length(total); - pb2.enable_steady_tick(std::time::Duration::from_millis(250)); - } - pb2.set_position(sent); - }, + create_progress_handler(pb2), cancel, ) .await?; @@ -933,20 +974,33 @@ async fn send_many( async fn receive( wormhole: Wormhole, relay_hints: Vec, - target_dir: &std::ffi::OsStr, - file_name: Option<&std::ffi::OsStr>, + target_dir: &std::path::Path, noconfirm: bool, transit_abilities: transit::Abilities, ctrl_c: impl Fn() -> futures::future::BoxFuture<'static, ()>, ) -> eyre::Result<()> { - let req = transfer::request_file(wormhole, relay_hints, transit_abilities, ctrl_c()) + let req = transfer::request(wormhole, relay_hints, transit_abilities, ctrl_c()) .await .context("Could not get an offer")?; /* If None, the task got cancelled */ - let req = match req { - Some(req) => req, - None => return Ok(()), - }; + match req { + Some(transfer::ReceiveRequest::V1(req)) => { + receive_inner_v1(req, target_dir, noconfirm, ctrl_c).await + }, + Some(transfer::ReceiveRequest::V2(req)) => { + receive_inner_v2(req, target_dir, noconfirm, ctrl_c).await + }, + None => Ok(()), + } +} + +async fn receive_inner_v1( + req: transfer::ReceiveRequestV1, + target_dir: &std::path::Path, + noconfirm: bool, + ctrl_c: impl Fn() -> futures::future::BoxFuture<'static, ()>, +) -> eyre::Result<()> { + use async_std::fs::OpenOptions; /* * Control flow is a bit tricky here: @@ -961,7 +1015,7 @@ async fn receive( || util::ask_user( format!( "Receive file '{}' ({})?", - req.filename.display(), + req.filename, match NumberPrefix::binary(req.filesize as f64) { NumberPrefix::Standalone(bytes) => format!("{} bytes", bytes), NumberPrefix::Prefixed(prefix, n) => @@ -975,17 +1029,11 @@ async fn receive( return req.reject().await.context("Could not reject offer"); } - let file_name = file_name - .or_else(|| req.filename.file_name()) - .ok_or_else(|| eyre::format_err!("The sender did not specify a valid file name, and neither did you. Try using --rename."))?; - let file_path = Path::new(target_dir).join(file_name); + // TODO validate untrusted input here + let file_path = std::path::Path::new(target_dir).join(&req.filename); let pb = create_progress_bar(req.filesize); - let on_progress = move |received, _total| { - pb.set_position(received); - }; - /* Then, accept if the file exists */ if !file_path.exists() || noconfirm { let mut file = OpenOptions::new() @@ -997,8 +1045,8 @@ async fn receive( return req .accept( &transit::log_transit_connection, - on_progress, &mut file, + create_progress_handler(pb), ctrl_c(), ) .await @@ -1024,14 +1072,116 @@ async fn receive( req .accept( &transit::log_transit_connection, - on_progress, &mut file, + create_progress_handler(pb), ctrl_c(), ) .await .context("Receive process failed") } +async fn receive_inner_v2( + req: transfer::ReceiveRequestV2, + target_dir: &std::path::Path, + noconfirm: bool, + ctrl_c: impl Fn() -> futures::future::BoxFuture<'static, ()>, +) -> eyre::Result<()> { + let offer = req.offer(); + let file_size = offer.total_size(); + let offer_name = offer.offer_name(); + + use number_prefix::NumberPrefix; + if !(noconfirm + || util::ask_user( + format!( + "Receive {} ({})?", + offer_name, + match NumberPrefix::binary(file_size as f64) { + NumberPrefix::Standalone(bytes) => format!("{} bytes", bytes), + NumberPrefix::Prefixed(prefix, n) => + format!("{:.1} {}B in size", n, prefix.symbol()), + }, + ), + true, + ) + .await) + { + return req.reject().await.context("Could not reject offer"); + } + + let pb = create_progress_bar(file_size); + + let on_progress = move |received, _total| { + pb.set_position(received); + }; + + /* Create a temporary directory for receiving */ + use rand::Rng; + let tmp_dir = target_dir.join(&format!( + "wormhole-tmp-{:06}", + rand::thread_rng().gen_range(0..1_000_000) + )); + async_std::fs::create_dir_all(&tmp_dir) + .await + .context("Failed to create temporary directory for receiving")?; + + /* Prepare the receive by creating all directories */ + offer.create_directories(&tmp_dir).await?; + + /* Accept the offer and receive it */ + let answer = offer.accept_all(&tmp_dir); + req.accept( + &transit::log_transit_connection, + answer, + on_progress, + ctrl_c(), + ) + .await + .context("Receive process failed")?; + + // /* Put in all the symlinks last, this greatly reduces the attack surface */ + // offer.create_symlinks(&tmp_dir).await?; + + /* TODO walk the output directory and delete things we did not accept; this will be important for resumption */ + + /* Move the received files to their target location */ + use futures::TryStreamExt; + async_std::fs::read_dir(&tmp_dir) + .await? + .map_err(Into::into) + .and_then(|file| { + let tmp_dir = tmp_dir.clone(); + async move { + let path = file.path(); + let name = path.file_name().expect("Internal error: this should never happen"); + let target_path = target_dir.join(name); + + /* This suffers some TOCTTOU, sorry about that: https://internals.rust-lang.org/t/rename-file-without-overriding-existing-target/17637 */ + if async_std::path::Path::new(&target_path).exists().await { + eyre::bail!( + "Target destination {} exists, you can manually extract the file from {}", + target_path.display(), + tmp_dir.display(), + ); + } else { + async_std::fs::rename(&path, &target_path).await?; + } + Ok(()) + }}) + .try_collect::<()>() + .await?; + + /* Delete the temporary directory */ + async_std::fs::remove_dir_all(&tmp_dir) + .await + .context(format!( + "Failed to delete {}, please do it manually", + tmp_dir.display() + ))?; + + Ok(()) +} + #[cfg(test)] mod test { use super::*; diff --git a/src/core.rs b/src/core.rs index 9b55be04..7b50900a 100644 --- a/src/core.rs +++ b/src/core.rs @@ -1,18 +1,24 @@ -pub(super) mod key; -pub mod rendezvous; -mod server_messages; -#[cfg(test)] -mod test; -pub mod wordlist; +use std::{any::Any, borrow::Cow}; +use crate::core::protocol::{WormholeProtocol, WormholeProtocolDefault}; +#[cfg(feature = "dilation")] +use crate::dilation::DilatedWormhole; +use log::*; +use serde; use serde_derive::{Deserialize, Serialize}; -use std::borrow::Cow; +use serde_json::Value; +use xsalsa20poly1305 as secretbox; use self::rendezvous::*; pub(self) use self::server_messages::EncryptedMessage; -use log::*; -use xsalsa20poly1305 as secretbox; +pub(super) mod key; +pub(crate) mod protocol; +pub mod rendezvous; +mod server_messages; +#[cfg(test)] +pub(crate) mod test; +mod wordlist; #[derive(Debug, thiserror::Error)] #[non_exhaustive] @@ -42,6 +48,10 @@ pub enum WormholeError { PakeFailed, #[error("Cannot decrypt a received message")] Crypto, + #[error("Nameplate is unclaimed: {}", _0)] + UnclaimedNameplate(Nameplate), + #[error("Dilation version mismatch")] + DilationVersion, } impl WormholeError { @@ -61,6 +71,10 @@ impl From for WormholeError { * The result of the client-server handshake */ #[derive(Clone, Debug, PartialEq, Eq)] +#[deprecated( + since = "0.7.0", + note = "part of the response of `Wormhole::connect_without_code(...)` and `Wormhole::connect_with_code(...) please use 'MailboxConnection::create(...)`/`MailboxConnection::connect(..)` and `Wormhole::connect(mailbox_connection)' instead" +)] pub struct WormholeWelcome { /** A welcome message from the server (think of "message of the day"). Should be displayed to the user if present. */ pub welcome: Option, @@ -73,9 +87,8 @@ pub struct WormholeWelcome { * You can send and receive arbitrary messages in form of byte slices over it, using [`Wormhole::send`] and [`Wormhole::receive`]. * Everything else (including encryption) will be handled for you. * - * To create a wormhole, use the [`Wormhole::connect_without_code`], [`Wormhole::connect_with_code`] etc. methods, depending on - * which values you have. Typically, the sender side connects without a code (which will create one), and the receiver side - * has one (the user entered it, who got it from the sender). + * To create a wormhole, use the mailbox connection created via [`MailboxConnection::create`] or [`MailboxConnection::connect*`] with the [`Wormhole::connect`] method. + * Typically, the sender side connects without a code (which will create one), and the receiver side has one (the user entered it, who got it from the sender). * * # Clean shutdown * @@ -85,38 +98,162 @@ pub struct WormholeWelcome { * Maybe a better way to handle application level protocols is to create a trait for them and then * to paramterize over them. */ + +/// A `MailboxConnection` contains a `RendezvousServer` which is connected to the mailbox +pub struct MailboxConnection { + /// A copy of `AppConfig`, + config: AppConfig, + /// The `RendezvousServer` with an open mailbox connection + server: RendezvousServer, + /// The welcome message received from the mailbox server + pub welcome: Option, + /// The mailbox id of the created mailbox + pub mailbox: Mailbox, + /// The Code which is required to connect to the mailbox. + pub code: Code, +} + +impl MailboxConnection { + /// Create a connection to a mailbox which is configured with a `Code` starting with the nameplate and by a given number of wordlist based random words. + /// + /// # Arguments + /// + /// * `config`: Application configuration + /// * `code_length`: number of words used for the password. The words are taken from the default wordlist. + /// + /// # Examples + /// + /// ```no_run + /// # fn main() -> eyre::Result<()> { async_std::task::block_on(async { + /// use magic_wormhole::{transfer::APP_CONFIG, AppConfig, MailboxConnection}; + /// let config = APP_CONFIG; + /// let mailbox_connection = MailboxConnection::create(config, 2).await?; + /// # Ok(()) })} + /// ``` + pub async fn create(config: AppConfig, code_length: usize) -> Result { + Self::create_with_password( + config, + &wordlist::default_wordlist(code_length).choose_words(), + ) + .await + } + + /// Create a connection to a mailbox which is configured with a `Code` containing the nameplate and the given password. + /// + /// # Arguments + /// + /// * `config`: Application configuration + /// * `password`: Free text password which will be appended to the nameplate number to form the `Code` + /// + /// # Examples + /// + /// ```no_run + /// # fn main() -> eyre::Result<()> { async_std::task::block_on(async { + /// use magic_wormhole::{transfer::APP_CONFIG, MailboxConnection}; + /// let config = APP_CONFIG; + /// let mailbox_connection = MailboxConnection::create_with_password(config, "secret").await?; + /// # Ok(()) })} + /// ``` + pub async fn create_with_password( + config: AppConfig, + password: &str, + ) -> Result { + let (mut server, welcome) = + RendezvousServer::connect(&config.id, &config.rendezvous_url).await?; + let (nameplate, mailbox) = server.allocate_claim_open().await?; + let code = Code::new(&nameplate, &password); + + Ok(MailboxConnection { + config, + server, + mailbox, + code, + welcome, + }) + } + + /// Create a connection to a mailbox defined by a `Code` which contains the `Nameplate` and the password to authorize the access. + /// + /// # Arguments + /// + /// * `config`: Application configuration + /// * `code`: The `Code` required to authorize to connect to an existing mailbox. + /// * `allocate`: `true`: Allocates a `Nameplate` if it does not exist. + /// `false`: The call fails with a `WormholeError::UnclaimedNameplate` when the `Nameplate` does not exist. + /// + /// # Examples + /// + /// ```no_run + /// # fn main() -> eyre::Result<()> { async_std::task::block_on(async { + /// use magic_wormhole::{transfer::APP_CONFIG, Code, MailboxConnection, Nameplate}; + /// let config = APP_CONFIG; + /// let code = Code::new(&Nameplate::new("5"), "password"); + /// let mut mailbox_connection = MailboxConnection::connect(config, code, false).await?; + /// # Ok(()) })} + /// ``` + pub async fn connect( + config: AppConfig, + code: Code, + allocate: bool, + ) -> Result { + let (mut server, welcome) = + RendezvousServer::connect(&config.id, &config.rendezvous_url).await?; + let nameplate = code.nameplate(); + if !allocate { + let nameplates = server.list_nameplates().await?; + if !nameplates.contains(&nameplate) { + server.shutdown(Mood::Errory).await?; + return Err(WormholeError::UnclaimedNameplate(nameplate)); + } + } + let mailbox = server.claim_open(nameplate).await?; + + Ok(MailboxConnection { + config, + server, + mailbox, + code, + welcome, + }) + } + + /// Shut down the connection to the mailbox + /// + /// # Arguments + /// + /// * `mood`: `Mood` should give a hint of the reason of the shutdown + /// + /// # Examples + /// + /// ``` + /// # fn main() -> eyre::Result<()> { use magic_wormhole::WormholeError; + /// async_std::task::block_on(async { + /// use magic_wormhole::{transfer::APP_CONFIG, MailboxConnection, Mood}; + /// let config = APP_CONFIG; + /// let mut mailbox_connection = MailboxConnection::create_with_password(config, "secret") + /// .await?; + /// mailbox_connection.shutdown(Mood::Happy).await?; + /// # Ok(())})} + /// ``` + pub async fn shutdown(&mut self, mood: Mood) -> Result<(), WormholeError> { + self.server + .shutdown(mood) + .await + .map_err(WormholeError::ServerError) + } +} + #[derive(Debug)] pub struct Wormhole { - server: RendezvousServer, - phase: u64, - key: key::Key, - appid: AppID, - /** - * If you're paranoid, let both sides check that they calculated the same verifier. - * - * PAKE hardens a standard key exchange with a password ("password authenticated") in order - * to mitigate potential man in the middle attacks that would otherwise be possible. Since - * the passwords usually are not of hight entropy, there is a low-probability possible of - * an attacker guessing the password correctly, enabling them to MitM the connection. - * - * Not only is that probability low, but they also have only one try per connection and a failed - * attempts will be noticed by both sides. Nevertheless, comparing the verifier mitigates that - * attack vector. - */ - pub verifier: Box, - /** - * Our "app version" information that we sent. See the [`peer_version`] for more information. - */ - pub our_version: Box, - /** - * Protocol version information from the other side. - * This is bound by the [`AppID`]'s protocol and thus shall be handled on a higher level - * (e.g. by the file transfer API). - */ - pub peer_version: serde_json::Value, + protocol: Box, } impl Wormhole { + #[cfg(test)] + pub fn new(protocol: Box) -> Self { + Wormhole { protocol } + } + /** * Generate a code and connect to the rendezvous server. * @@ -126,6 +263,11 @@ impl Wormhole { * do the rest of the client-client handshake and yield the [`Wormhole`] object * on success. */ + #[deprecated( + since = "0.7.0", + note = "please use 'MailboxConnection::create(...) and Wormhole::connect(mailbox_connection)' instead" + )] + #[allow(deprecated)] pub async fn connect_without_code( config: AppConfig, code_length: usize, @@ -136,77 +278,56 @@ impl Wormhole { ), WormholeError, > { - let AppConfig { - id: appid, - rendezvous_url, - app_version: versions, - } = config; - let (mut server, welcome) = RendezvousServer::connect(&appid, &rendezvous_url).await?; - let (nameplate, mailbox) = server.allocate_claim_open().await?; - log::debug!("Connected to mailbox {}", mailbox); - - let code = Code::new( - &nameplate, - &wordlist::default_wordlist(code_length).choose_words(), - ); - + let mailbox_connection = MailboxConnection::create(config, code_length).await?; Ok(( WormholeWelcome { - welcome, - code: code.clone(), + welcome: mailbox_connection.welcome.clone(), + code: mailbox_connection.code.clone(), }, - Self::connect_custom(server, appid, code.0, versions), + Self::connect(mailbox_connection), )) } /** * Connect to a peer with a code. */ + #[deprecated( + since = "0.7.0", + note = "please use 'MailboxConnection::connect(...) and Wormhole::connect(mailbox_connection)' instead" + )] + #[allow(deprecated)] pub async fn connect_with_code( config: AppConfig, code: Code, + expect_claimed_nameplate: bool, ) -> Result<(WormholeWelcome, Self), WormholeError> { - let AppConfig { - id: appid, - rendezvous_url, - app_version: versions, - } = config; - let (mut server, welcome) = RendezvousServer::connect(&appid, &rendezvous_url).await?; - - let nameplate = code.nameplate(); - let mailbox = server.claim_open(nameplate).await?; - log::debug!("Connected to mailbox {}", mailbox); - - Ok(( + let mailbox_connection = + MailboxConnection::connect(config, code.clone(), !expect_claimed_nameplate).await?; + return Ok(( WormholeWelcome { - welcome, - code: code.clone(), + welcome: mailbox_connection.welcome.clone(), + code: code, }, - Self::connect_custom(server, appid, code.0, versions).await?, - )) - } - - /** TODO */ - pub async fn connect_with_seed() { - todo!() + Self::connect(mailbox_connection).await?, + )); } - /// Do only the client-client part of the connection setup - /// - /// The rendezvous server must already have an opened mailbox. + /// Set up a Wormhole which is the client-client part of the connection setup /// - /// # Panics - /// - /// If the [`RendezvousServer`] is not properly initialized, i.e. if the - /// mailbox is not open. - pub async fn connect_custom( - mut server: RendezvousServer, - appid: AppID, - password: String, - app_versions: impl serde::Serialize + Send + Sync + 'static, + /// The MailboxConnection already contains a rendezvous server with an opened mailbox. + pub async fn connect( + mailbox_connection: MailboxConnection, ) -> Result { + let MailboxConnection { + config, + mut server, + mailbox: _mailbox, + code, + welcome: _welcome, + } = mailbox_connection; + /* Send PAKE */ - let (pake_state, pake_msg_ser) = key::make_pake(&password, &appid); + let (pake_state, pake_msg_ser) = key::make_pake(&code.0, &config.id); server.send_peer_message(Phase::PAKE, pake_msg_ser).await?; /* Receive PAKE */ @@ -218,7 +339,11 @@ impl Wormhole { /* Send versions message */ let mut versions = key::VersionsMessage::new(); - versions.set_app_versions(serde_json::to_value(&app_versions).unwrap()); + versions.set_app_versions(serde_json::to_value(&config.app_version).unwrap()); + #[cfg(feature = "dilation")] + if config.with_dilation { + versions.enable_dilation(); + } let (version_phase, version_msg) = key::build_version_msg(server.side(), &key, &versions); server.send_peer_message(version_phase, version_msg).await?; let peer_version = server.next_peer_message_some().await?; @@ -241,26 +366,33 @@ impl Wormhole { /* We are now fully initialized! Up and running! :tada: */ Ok(Self { - server, - appid, - phase: 0, - key: key::Key::new(key.into()), - verifier: Box::new(key::derive_verifier(&key)), - our_version: Box::new(app_versions), - peer_version, + protocol: Box::new(WormholeProtocolDefault::new( + server, + config, + key::Key::new(key.into()), + peer_version, + )), }) } - /** Send an encrypted message to peer */ - pub async fn send(&mut self, plaintext: Vec) -> Result<(), WormholeError> { - let phase_string = Phase::numeric(self.phase); - self.phase += 1; - let data_key = key::derive_phase_key(self.server.side(), &self.key, &phase_string); - let (_nonce, encrypted) = key::encrypt_data(&data_key, &plaintext); - self.server - .send_peer_message(phase_string, encrypted) - .await?; - Ok(()) + /** TODO */ + pub async fn connect_with_seed() { + todo!() + } + + /** + * create a dilated wormhole + */ + #[cfg(feature = "dilation")] + pub fn dilate(self) -> Result { + // XXX: create endpoints? + // get versions from the other side and check if they support dilation. + let can_they_dilate = &self.protocol.peer_version()["can-dilate"]; + if !can_they_dilate.is_null() && can_they_dilate[0] != "1" { + return Err(WormholeError::DilationVersion); + } + + Ok(DilatedWormhole::new(self, MySide::generate(8))) } /** @@ -273,33 +405,21 @@ impl Wormhole { * * If the serialization fails */ - pub async fn send_json( + pub async fn send_json( &mut self, message: &T, ) -> Result<(), WormholeError> { - self.send(serde_json::to_vec(message).unwrap()).await - } - - /** Receive an encrypted message from peer */ - pub async fn receive(&mut self) -> Result, WormholeError> { - loop { - let peer_message = match self.server.next_peer_message().await? { - Some(peer_message) => peer_message, - None => continue, - }; - if peer_message.phase.to_num().is_none() { - // TODO: log and ignore, for future expansion - todo!("log and ignore, for future expansion"); - } - - // TODO maybe reorder incoming messages by phase numeral? - let decrypted_message = peer_message - .decrypt(&self.key) - .ok_or(WormholeError::Crypto)?; + self.send_json_with_phase(message, Phase::numeric).await + } - // Send to client - return Ok(decrypted_message); - } + pub async fn send_json_with_phase( + &mut self, + message: &T, + phase_provider: PhaseProvider, + ) -> Result<(), WormholeError> { + self.protocol + .send_with_phase(serde_json::to_vec(message).unwrap(), phase_provider) + .await } /** @@ -313,7 +433,7 @@ impl Wormhole { where T: for<'a> serde::Deserialize<'a>, { - self.receive().await.map(|data: Vec| { + self.protocol.receive().await.map(|data: Vec| { serde_json::from_slice(&data).map_err(|e| { log::error!( "Received invalid data from peer: '{}'", @@ -324,9 +444,8 @@ impl Wormhole { }) } - pub async fn close(self) -> Result<(), WormholeError> { - log::debug!("Closing Wormhole…"); - self.server.shutdown(Mood::Happy).await.map_err(Into::into) + pub async fn close(&mut self) -> Result<(), WormholeError> { + self.protocol.close().await } /** @@ -334,7 +453,7 @@ impl Wormhole { * This determines the upper-layer protocol. Only wormholes with the same value can talk to each other. */ pub fn appid(&self) -> &AppID { - &self.appid + self.protocol.appid() } /** @@ -342,13 +461,29 @@ impl Wormhole { * Can be used to derive sub-keys for different purposes. */ pub fn key(&self) -> &key::Key { - &self.key + self.protocol.key() + } + + pub fn peer_version(&self) -> &Value { + self.protocol.peer_version() + } + + pub fn our_version(&self) -> &Box { + &self.protocol.our_version() } } // the serialized forms of these variants are part of the wire protocol, so // they must be spelled exactly as shown -#[derive(Debug, PartialEq, Copy, Clone, Deserialize, Serialize, derive_more::Display)] +#[derive( + Debug, + PartialEq, + Copy, + Clone, + serde_derive::Deserialize, + serde_derive::Serialize, + derive_more::Display, +)] pub enum Mood { #[serde(rename = "happy")] Happy, @@ -362,8 +497,10 @@ pub enum Mood { Unwelcome, } +pub const APPID_RAW: &str = "lothar.com/wormhole/text-or-file-xfer"; + /** - * Wormhole configuration corresponding to an uppler layer protocol + * Wormhole configuration corresponding to an upper layer protocol * * There are multiple different protocols built on top of the core * Wormhole protocol. They are identified by a unique URI-like ID string @@ -374,13 +511,14 @@ pub enum Mood { * See [`crate::transfer::APP_CONFIG`], which entails */ #[derive(PartialEq, Eq, Clone, Debug)] -pub struct AppConfig { +pub struct AppConfig { pub id: AppID, pub rendezvous_url: Cow<'static, str>, pub app_version: V, + pub with_dilation: bool, } -impl AppConfig { +impl AppConfig { pub fn id(mut self, id: AppID) -> Self { self.id = id; self @@ -390,9 +528,12 @@ impl AppConfig { self.rendezvous_url = rendezvous_url; self } -} -impl AppConfig { + pub fn with_dilation(mut self, with_dilation: bool) -> Self { + self.with_dilation = with_dilation; + self + } + pub fn app_version(mut self, app_version: V) -> Self { self.app_version = app_version; self @@ -424,17 +565,25 @@ impl From for AppID { // MySide is used for the String that we send in all our outbound messages #[derive( - PartialEq, Eq, Clone, Debug, Deserialize, Serialize, derive_more::Display, derive_more::Deref, + PartialOrd, + PartialEq, + Eq, + Clone, + Debug, + Deserialize, + Serialize, + derive_more::Display, + derive_more::Deref, )] #[serde(transparent)] #[display(fmt = "MySide({})", "&*_0")] pub struct MySide(EitherSide); impl MySide { - pub fn generate() -> MySide { + pub fn generate(length: usize) -> MySide { use rand::{rngs::OsRng, RngCore}; - let mut bytes: [u8; 5] = [0; 5]; + let mut bytes = vec![0; length]; OsRng.fill_bytes(&mut bytes); MySide(EitherSide(hex::encode(bytes))) @@ -450,7 +599,15 @@ impl MySide { // TheirSide is used for the string that arrives inside inbound messages #[derive( - PartialEq, Eq, Clone, Debug, Deserialize, Serialize, derive_more::Display, derive_more::Deref, + PartialOrd, + PartialEq, + Eq, + Clone, + Debug, + Deserialize, + Serialize, + derive_more::Display, + derive_more::Deref, )] #[serde(transparent)] #[display(fmt = "TheirSide({})", "&*_0")] @@ -463,7 +620,15 @@ impl> From for TheirSide { } #[derive( - PartialEq, Eq, Clone, Debug, Deserialize, Serialize, derive_more::Display, derive_more::Deref, + PartialOrd, + PartialEq, + Eq, + Clone, + Debug, + Deserialize, + Serialize, + derive_more::Display, + derive_more::Deref, )] #[serde(transparent)] #[deref(forward)] @@ -476,6 +641,12 @@ impl> From for EitherSide { } } +impl From for TheirSide { + fn from(side: MySide) -> TheirSide { + TheirSide(side.0.into()) + } +} + #[derive(PartialEq, Eq, Clone, Debug, Hash, Deserialize, Serialize, derive_more::Display)] #[serde(transparent)] pub struct Phase(pub Cow<'static, str>); @@ -488,6 +659,10 @@ impl Phase { Phase(phase.to_string().into()) } + pub fn dilation(phase: u64) -> Self { + Phase(format!("dilate-{}", phase.to_string()).to_string().into()) + } + pub fn is_version(&self) -> bool { self == &Self::VERSION } @@ -499,6 +674,8 @@ impl Phase { } } +type PhaseProvider = fn(u64) -> Phase; + #[derive(PartialEq, Eq, Clone, Debug, Deserialize, Serialize, derive_more::Display)] #[serde(transparent)] pub struct Mailbox(pub String); @@ -510,6 +687,7 @@ pub struct Mailbox(pub String); #[deref(forward)] #[display(fmt = "{}", _0)] pub struct Nameplate(pub String); + impl Nameplate { pub fn new(n: &str) -> Self { Nameplate(String::from(n)) @@ -548,3 +726,15 @@ impl Code { Nameplate::new(self.0.split('-').next().unwrap()) } } + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "kebab-case", tag = "type")] +pub enum Ability { + DirectTcpV1, + RelayV1, + RelayV2, + #[cfg(all())] + NoiseCryptoV1, + #[serde(other)] + Other, +} diff --git a/src/core/key.rs b/src/core/key.rs index c6b316ec..431c943a 100644 --- a/src/core/key.rs +++ b/src/core/key.rs @@ -102,24 +102,43 @@ pub fn make_pake(password: &str, appid: &AppID) -> (Spake2, Vec, - #[serde(default)] + //#[serde(default)] + pub can_dilate: Option<[Cow<'static, str>; 1]>, + //#[serde(default)] + pub dilation_abilities: Option>, + //#[serde(default)] + #[serde(rename = "app_versions")] pub app_versions: serde_json::Value, // resume: Option, } impl VersionsMessage { pub fn new() -> Self { - Default::default() + // Default::default() + Self { + abilities: vec![], + can_dilate: None, + dilation_abilities: Some(std::borrow::Cow::Borrowed(&[ + Ability::DirectTcpV1, + Ability::RelayV1, + ])), + app_versions: serde_json::Value::Null, + } } pub fn set_app_versions(&mut self, versions: serde_json::Value) { self.app_versions = versions; } + pub fn enable_dilation(&mut self) { + self.can_dilate = Some([std::borrow::Cow::Borrowed("1")]) + } + // pub fn add_resume_ability(&mut self, _resume: ()) { // self.abilities.push("resume-v1".into()) // } @@ -362,4 +381,15 @@ mod test { // None => panic!(), // } // } + + #[test] + fn test_versions_message_can_dilate() { + let mut message = VersionsMessage::new(); + + assert_eq!(message.can_dilate, None); + + message.enable_dilation(); + + assert_eq!(message.can_dilate, Some([std::borrow::Cow::Borrowed("1")])); + } } diff --git a/src/core/protocol.rs b/src/core/protocol.rs new file mode 100644 index 00000000..38c99b21 --- /dev/null +++ b/src/core/protocol.rs @@ -0,0 +1,155 @@ +use async_trait::async_trait; +use std::{any::Any, fmt::Debug}; + +#[cfg(test)] +use mockall::automock; + +use crate::{ + core::{ + key::{derive_phase_key, derive_verifier, encrypt_data}, + Phase, PhaseProvider, + }, + rendezvous::RendezvousServer, + AppConfig, AppID, Key, Mood, WormholeError, WormholeKey, +}; + +#[derive(Debug)] +pub struct WormholeProtocolDefault { + server: RendezvousServer, + phase: u64, + key: Key, + appid: AppID, + /** + * If you're paranoid, let both sides check that they calculated the same verifier. + * + * PAKE hardens a standard key exchange with a password ("password authenticated") in order + * to mitigate potential man in the middle attacks that would otherwise be possible. Since + * the passwords usually are not of hight entropy, there is a low-probability possible of + * an attacker guessing the password correctly, enabling them to MitM the connection. + * + * Not only is that probability low, but they also have only one try per connection and a failed + * attempts will be noticed by both sides. Nevertheless, comparing the verifier mitigates that + * attack vector. + */ + pub verifier: Box, + /** + * Our "app version" information that we sent. See the [`peer_version`] for more information. + */ + pub our_version: Box, + /** + * Protocol version information from the other side. + * This is bound by the [`AppID`]'s protocol and thus shall be handled on a higher level + * (e.g. by the file transfer API). + */ + pub peer_version: serde_json::Value, +} + +impl WormholeProtocolDefault { + pub fn new( + server: RendezvousServer, + config: AppConfig, + key: Key, + peer_version: serde_json::Value, + ) -> Self + where + T: serde::Serialize + Send + Sync + Sized + 'static, + { + let verifier = Box::new(derive_verifier(&key)); + Self { + server, + appid: config.id, + phase: 0, + key, + verifier, + our_version: Box::new(config.app_version), + peer_version, + } + } +} + +#[async_trait] +impl WormholeProtocol for WormholeProtocolDefault { + /** Send an encrypted message to peer */ + async fn send(&mut self, plaintext: Vec) -> Result<(), WormholeError> { + self.send_with_phase(plaintext, Phase::numeric).await + } + + async fn send_with_phase( + &mut self, + plaintext: Vec, + phase_provider: PhaseProvider, + ) -> Result<(), WormholeError> { + let current_phase = phase_provider(self.phase); + self.phase += 1; + let data_key = derive_phase_key(self.server.side(), &self.key, ¤t_phase); + let (_nonce, encrypted) = encrypt_data(&data_key, &plaintext); + self.server + .send_peer_message(current_phase, encrypted) + .await?; + Ok(()) + } + + /** Receive an encrypted message from peer */ + async fn receive(&mut self) -> Result, WormholeError> { + loop { + let peer_message = match self.server.next_peer_message().await? { + Some(peer_message) => peer_message, + None => continue, + }; + + // TODO maybe reorder incoming messages by phase numeral? + let decrypted_message = peer_message + .decrypt(&self.key) + .ok_or(WormholeError::Crypto)?; + + // Send to client + return Ok(decrypted_message); + } + } + + async fn close(&mut self) -> Result<(), WormholeError> { + log::debug!("Closing Wormhole…"); + self.server.shutdown(Mood::Happy).await.map_err(Into::into) + } + + /** + * The `AppID` this wormhole is bound to. + * This determines the upper-layer protocol. Only wormholes with the same value can talk to each other. + */ + fn appid(&self) -> &AppID { + &self.appid + } + + /** + * The symmetric encryption key used by this connection. + * Can be used to derive sub-keys for different purposes. + */ + fn key(&self) -> &Key { + &self.key + } + + fn peer_version(&self) -> &serde_json::Value { + &self.peer_version + } + + fn our_version(&self) -> &Box { + &self.our_version + } +} + +#[cfg_attr(test, automock)] +#[async_trait] +pub trait WormholeProtocol: Debug + Send + Sync { + async fn send(&mut self, plaintext: Vec) -> Result<(), WormholeError>; + async fn send_with_phase( + &mut self, + plaintext: Vec, + phase_provider: PhaseProvider, + ) -> Result<(), WormholeError>; + async fn receive(&mut self) -> Result, WormholeError>; + async fn close(&mut self) -> Result<(), WormholeError>; + fn appid(&self) -> &AppID; + fn key(&self) -> &Key; + fn peer_version(&self) -> &serde_json::Value; + fn our_version(&self) -> &Box; +} diff --git a/src/core/rendezvous.rs b/src/core/rendezvous.rs index ec999809..a1acc183 100644 --- a/src/core/rendezvous.rs +++ b/src/core/rendezvous.rs @@ -2,6 +2,7 @@ //! //! Wormhole builds upon this, so you usually don't need to bother. +#[cfg(not(target_family = "wasm"))] use async_tungstenite::tungstenite as ws2; use futures::prelude::*; use std::collections::VecDeque; @@ -38,11 +39,19 @@ pub enum RendezvousError { _0 )] Login(Vec), + #[cfg(not(target_family = "wasm"))] #[error("Websocket IO error")] IO( #[from] #[source] - async_tungstenite::tungstenite::Error, + ws2::Error, + ), + #[cfg(target_family = "wasm")] + #[error("Websocket IO error")] + IO( + #[from] + #[source] + ws_stream_wasm::WsErr, ), } @@ -65,11 +74,23 @@ impl RendezvousError { type MessageQueue = VecDeque; +#[derive(Clone, Debug, derive_more::Display)] +#[display(fmt = "{:?}", _0)] +struct NameplateList(Vec); + +#[cfg(not(target_family = "wasm"))] struct WsConnection { connection: async_tungstenite::WebSocketStream, } +#[cfg(target_family = "wasm")] +struct WsConnection { + connection: ws_stream_wasm::WsStream, + meta: ws_stream_wasm::WsMeta, +} + impl WsConnection { + #[cfg(not(target_family = "wasm"))] async fn send_message( &mut self, message: &OutboundMessage, @@ -83,6 +104,22 @@ impl WsConnection { Ok(()) } + #[cfg(target_family = "wasm")] + async fn send_message( + &mut self, + message: &OutboundMessage, + queue: Option<&mut MessageQueue>, + ) -> Result<(), RendezvousError> { + log::debug!("Sending {:?}", message); + self.connection + .send(ws_stream_wasm::WsMessage::Text( + serde_json::to_string(message).unwrap(), + )) + .await?; + self.receive_ack(queue).await?; + Ok(()) + } + async fn receive_ack( &mut self, mut queue: Option<&mut MessageQueue>, @@ -141,6 +178,9 @@ impl WsConnection { Some(InboundMessage::Error { error, orig: _ }) => { break Err(RendezvousError::Server(error.into())); }, + Some(InboundMessage::Nameplates { nameplates }) => { + break Ok(RendezvousReply::Nameplates(NameplateList(nameplates))) + }, Some(other) => { break Err(RendezvousError::protocol(format!( "Got unexpected message type from server '{}'", @@ -160,6 +200,7 @@ impl WsConnection { } } + #[cfg(not(target_family = "wasm"))] async fn receive_message(&mut self) -> Result, RendezvousError> { let message = self .connection @@ -195,6 +236,42 @@ impl WsConnection { }, } } + + #[cfg(target_family = "wasm")] + async fn receive_message(&mut self) -> Result, RendezvousError> { + let message = self + .connection + .next() + .await + .expect("TODO this should always be Some"); + match message { + ws_stream_wasm::WsMessage::Text(message_plain) => { + let message = serde_json::from_str(&message_plain)?; + log::debug!("Received {:?}", message); + match message { + InboundMessage::Unknown => { + log::warn!("Got unknown message, ignoring: '{}'", message_plain); + Ok(None) + }, + InboundMessage::Error { error, orig: _ } => Err(RendezvousError::server(error)), + message => Ok(Some(message)), + } + }, + ws_stream_wasm::WsMessage::Binary(_) => Err(RendezvousError::protocol( + "WebSocket messages must be UTF-8 encoded text", + )), + } + } + + #[cfg(not(target_family = "wasm"))] + async fn close(&mut self) -> Result<(), ws2::Error> { + self.connection.close(None).await + } + + #[cfg(target_family = "wasm")] + async fn close(&mut self) -> Result { + self.meta.close().await + } } #[derive(Clone, Debug, derive_more::Display)] @@ -203,6 +280,7 @@ enum RendezvousReply { Released, Claimed(Mailbox), Closed, + Nameplates(NameplateList), } #[derive(Clone, Debug, derive_more::Display)] @@ -261,9 +339,23 @@ impl RendezvousServer { appid: &AppID, relay_url: &str, ) -> Result<(Self, Option), RendezvousError> { - let side = MySide::generate(); - let (connection, _) = async_tungstenite::async_std::connect_async(relay_url).await?; - let mut connection = WsConnection { connection }; + let side = MySide::generate(5); + let mut connection; + + #[cfg(not(target_arch = "wasm32"))] + { + let (stream, _) = async_tungstenite::async_std::connect_async(relay_url).await?; + connection = WsConnection { connection: stream }; + } + + #[cfg(target_arch = "wasm32")] + { + let (meta, stream) = ws_stream_wasm::WsMeta::connect(relay_url, None).await?; + connection = WsConnection { + meta, + connection: stream, + }; + } let welcome = match connection.receive_message_some().await? { InboundMessage::Welcome { welcome } => welcome, @@ -444,6 +536,19 @@ impl RendezvousServer { .is_some() } + /** + * Gets the list of currently claimed nameplates. + * This can be called at any time. + */ + pub async fn list_nameplates(&mut self) -> Result, RendezvousError> { + self.send_message(&OutboundMessage::List).await?; + let nameplate_reply = self.receive_reply().await?; + match nameplate_reply { + RendezvousReply::Nameplates(x) => Ok(x.0), + other => Err(RendezvousError::invalid_message("nameplates", other)), + } + } + pub async fn release_nameplate(&mut self) -> Result<(), RendezvousError> { let nameplate = &mut self .state @@ -483,34 +588,37 @@ impl RendezvousServer { Ok(()) } - pub async fn shutdown(mut self, mood: Mood) -> Result<(), RendezvousError> { + pub async fn shutdown(&mut self, mood: Mood) -> Result<(), RendezvousError> { if let Some(MailboxMachine { - nameplate, - mailbox, - mut queue, + ref nameplate, + ref mailbox, + ref mut queue, .. }) = self.state { if let Some(nameplate) = nameplate { self.connection - .send_message(&OutboundMessage::release(nameplate), Some(&mut queue)) + .send_message(&OutboundMessage::release(nameplate.to_owned()), Some(queue)) .await?; - match self.connection.receive_reply(Some(&mut queue)).await? { + match self.connection.receive_reply(Some(queue)).await? { RendezvousReply::Released => (), other => return Err(RendezvousError::invalid_message("released", other)), }; } self.connection - .send_message(&OutboundMessage::close(mailbox, mood), Some(&mut queue)) + .send_message( + &OutboundMessage::close(mailbox.to_owned(), mood), + Some(queue), + ) .await?; - match self.connection.receive_reply(Some(&mut queue)).await? { + match self.connection.receive_reply(Some(queue)).await? { RendezvousReply::Closed => (), other => return Err(RendezvousError::invalid_message("closed", other)), }; } - self.connection.connection.close(None).await?; + self.connection.close().await?; Ok(()) } } diff --git a/src/core/server_messages.rs b/src/core/server_messages.rs index d0fa5264..487bf4c7 100644 --- a/src/core/server_messages.rs +++ b/src/core/server_messages.rs @@ -244,7 +244,8 @@ pub enum InboundMessage { #[display(fmt = "Error {{ error: {:?}, .. }}", error)] Error { error: String, - orig: Box, + /// A copy of the original message that caused the error. + orig: Box, }, #[serde(other)] Unknown, @@ -253,7 +254,6 @@ pub enum InboundMessage { #[cfg(test)] mod test { use super::*; - use serde_json::{from_str, json, Value}; #[test] fn test_bind() { @@ -262,10 +262,10 @@ mod test { MySide::unchecked_from_string(String::from("side1")), ); let s = serde_json::to_string(&m1).unwrap(); - let m2: Value = from_str(&s).unwrap(); + let m2: serde_json::Value = serde_json::from_str(&s).unwrap(); assert_eq!( m2, - json!({"type": "bind", "appid": "appid", + serde_json::json!({"type": "bind", "appid": "appid", "side": "side1"}) ); } @@ -274,50 +274,59 @@ mod test { fn test_list() { let m1 = OutboundMessage::List; let s = serde_json::to_string(&m1).unwrap(); - let m2: Value = from_str(&s).unwrap(); - assert_eq!(m2, json!({"type": "list"})); + let m2: serde_json::Value = serde_json::from_str(&s).unwrap(); + assert_eq!(m2, serde_json::json!({"type": "list"})); } #[test] fn test_allocate() { let m1 = OutboundMessage::Allocate; let s = serde_json::to_string(&m1).unwrap(); - let m2: Value = from_str(&s).unwrap(); - assert_eq!(m2, json!({"type": "allocate"})); + let m2: serde_json::Value = serde_json::from_str(&s).unwrap(); + assert_eq!(m2, serde_json::json!({"type": "allocate"})); } #[test] fn test_claim() { let m1 = OutboundMessage::claim("nameplate1"); let s = serde_json::to_string(&m1).unwrap(); - let m2: Value = from_str(&s).unwrap(); - assert_eq!(m2, json!({"type": "claim", "nameplate": "nameplate1"})); + let m2: serde_json::Value = serde_json::from_str(&s).unwrap(); + assert_eq!( + m2, + serde_json::json!({"type": "claim", "nameplate": "nameplate1"}) + ); } #[test] fn test_release() { let m1 = OutboundMessage::release("nameplate1"); let s = serde_json::to_string(&m1).unwrap(); - let m2: Value = from_str(&s).unwrap(); - assert_eq!(m2, json!({"type": "release", "nameplate": "nameplate1"})); + let m2: serde_json::Value = serde_json::from_str(&s).unwrap(); + assert_eq!( + m2, + serde_json::json!({"type": "release", "nameplate": "nameplate1"}) + ); } #[test] fn test_open() { let m1 = OutboundMessage::open(Mailbox(String::from("mailbox1"))); let s = serde_json::to_string(&m1).unwrap(); - let m2: Value = from_str(&s).unwrap(); - assert_eq!(m2, json!({"type": "open", "mailbox": "mailbox1"})); + let m2: serde_json::Value = serde_json::from_str(&s).unwrap(); + assert_eq!( + m2, + serde_json::json!({"type": "open", "mailbox": "mailbox1"}) + ); } #[test] fn test_add() { let m1 = OutboundMessage::add(Phase("phase1".into()), b"body".to_vec()); let s = serde_json::to_string(&m1).unwrap(); - let m2: Value = from_str(&s).unwrap(); + let m2: serde_json::Value = serde_json::from_str(&s).unwrap(); assert_eq!( m2, - json!({"type": "add", "phase": "phase1", + serde_json::json!({"type": "add", "phase": "phase1", "body": "626f6479"}) ); // body is hex-encoded } @@ -326,10 +335,10 @@ mod test { fn test_close() { let m1 = OutboundMessage::close(Mailbox(String::from("mailbox1")), Mood::Happy); let s = serde_json::to_string(&m1).unwrap(); - let m2: Value = from_str(&s).unwrap(); + let m2: serde_json::Value = serde_json::from_str(&s).unwrap(); assert_eq!( m2, - json!({"type": "close", "mailbox": "mailbox1", + serde_json::json!({"type": "close", "mailbox": "mailbox1", "mood": "happy"}) ); } @@ -338,10 +347,10 @@ mod test { fn test_close_errory() { let m1 = OutboundMessage::close(Mailbox(String::from("mailbox1")), Mood::Errory); let s = serde_json::to_string(&m1).unwrap(); - let m2: Value = from_str(&s).unwrap(); + let m2: serde_json::Value = serde_json::from_str(&s).unwrap(); assert_eq!( m2, - json!({"type": "close", "mailbox": "mailbox1", + serde_json::json!({"type": "close", "mailbox": "mailbox1", "mood": "errory"}) ); } @@ -350,10 +359,10 @@ mod test { fn test_close_scared() { let m1 = OutboundMessage::close(Mailbox(String::from("mailbox1")), Mood::Scared); let s = serde_json::to_string(&m1).unwrap(); - let m2: Value = from_str(&s).unwrap(); + let m2: serde_json::Value = serde_json::from_str(&s).unwrap(); assert_eq!( m2, - json!({"type": "close", "mailbox": "mailbox1", + serde_json::json!({"type": "close", "mailbox": "mailbox1", "mood": "scary"}) ); } @@ -422,9 +431,12 @@ mod test { bits: 6, resource: "resource-string".into(), }), - other: [("dark-ritual".to_string(), json!({ "hocrux": true }))] - .into_iter() - .collect() + other: [( + "dark-ritual".to_string(), + serde_json::json!({ "hocrux": true }) + )] + .into_iter() + .collect() }), current_cli_version: None, error: None, diff --git a/src/core/test.rs b/src/core/test.rs index b060a3dc..6123550b 100644 --- a/src/core/test.rs +++ b/src/core/test.rs @@ -1,23 +1,29 @@ use super::{Mood, Phase}; +use rand::Rng; use std::{borrow::Cow, time::Duration}; -use crate::{self as magic_wormhole, AppConfig, AppID, Code, Wormhole}; +use crate::{ + self as magic_wormhole, + core::{MailboxConnection, Nameplate}, + AppConfig, AppID, Code, Wormhole, WormholeError, +}; #[cfg(feature = "transfer")] use crate::{transfer, transit}; pub const TEST_APPID: AppID = AppID(std::borrow::Cow::Borrowed( - "lothar.com/wormhole/rusty-wormhole-test", + "piegames.de/wormhole/rusty-wormhole-test", )); pub const APP_CONFIG: AppConfig<()> = AppConfig::<()> { id: TEST_APPID, rendezvous_url: Cow::Borrowed(crate::rendezvous::DEFAULT_RENDEZVOUS_SERVER), app_version: (), + with_dilation: false, }; const TIMEOUT: Duration = Duration::from_secs(60); -fn init_logger() { +pub fn init_logger() { /* Ignore errors from succeedent initialization tries */ let _ = env_logger::builder() .filter_level(log::LevelFilter::Debug) @@ -35,289 +41,355 @@ fn default_relay_hints() -> Vec { ] } -/** Send a file using the Rust implementation. This does not guarantee compatibility with Python! ;) */ -#[cfg(feature = "transfer")] #[async_std::test] -pub async fn test_file_rust2rust() -> eyre::Result<()> { +pub async fn test_connect_with_unknown_code_and_allocate_passes() -> eyre::Result<(), WormholeError> +{ init_logger(); - let (code_tx, code_rx) = futures::channel::oneshot::channel(); - - let sender_task = async_std::task::Builder::new() - .name("sender".to_owned()) - .spawn(async { - let (welcome, connector) = - Wormhole::connect_without_code(transfer::APP_CONFIG.id(TEST_APPID), 2).await?; - if let Some(welcome) = &welcome.welcome { - log::info!("Got welcome: {}", welcome); - } - log::info!("This wormhole's code is: {}", &welcome.code); - code_tx.send(welcome.code).unwrap(); - let wormhole = connector.await?; - eyre::Result::<_>::Ok( - transfer::send_file( - wormhole, - default_relay_hints(), - &mut async_std::fs::File::open("tests/example-file.bin").await?, - "example-file.bin", - std::fs::metadata("tests/example-file.bin").unwrap().len(), - magic_wormhole::transit::Abilities::ALL_ABILITIES, - &transit::log_transit_connection, - |_sent, _total| {}, - futures::future::pending(), - ) - .await?, - ) - })?; - let receiver_task = async_std::task::Builder::new() - .name("receiver".to_owned()) - .spawn(async { - let code = code_rx.await?; - log::info!("Got code over local: {}", &code); - let (welcome, wormhole) = - Wormhole::connect_with_code(transfer::APP_CONFIG.id(TEST_APPID), code).await?; - if let Some(welcome) = &welcome.welcome { - log::info!("Got welcome: {}", welcome); - } + let code = generate_random_code(); - let req = transfer::request_file( - wormhole, - default_relay_hints(), - magic_wormhole::transit::Abilities::ALL_ABILITIES, - futures::future::pending(), - ) - .await? - .unwrap(); - - let mut buffer = Vec::::new(); - req.accept( - &transit::log_transit_connection, - |_received, _total| {}, - &mut buffer, - futures::future::pending(), - ) - .await?; - Ok(buffer) - })?; + let mailbox_connection = + MailboxConnection::connect(transfer::APP_CONFIG.id(TEST_APPID).clone(), code, true).await; - sender_task.await?; - let original = std::fs::read("tests/example-file.bin")?; - let received: Vec = (receiver_task.await as eyre::Result>)?; + assert!(mailbox_connection.is_ok()); - assert_eq!(original, received, "Files differ"); - Ok(()) + mailbox_connection.unwrap().shutdown(Mood::Happy).await } -/** Send a file using the Rust implementation that has exactly 4096 bytes (our chunk size) */ -#[cfg(feature = "transfer")] #[async_std::test] -pub async fn test_4096_file_rust2rust() -> eyre::Result<()> { +pub async fn test_connect_with_unknown_code_and_no_allocate_fails() { init_logger(); - let (code_tx, code_rx) = futures::channel::oneshot::channel(); + let code = generate_random_code(); - const FILENAME: &str = "tests/example-file-4096.bin"; + let mailbox_connection = MailboxConnection::connect( + transfer::APP_CONFIG.id(TEST_APPID).clone(), + code.clone(), + false, + ) + .await; - let sender_task = async_std::task::Builder::new() - .name("sender".to_owned()) - .spawn(async { - let (welcome, connector) = - Wormhole::connect_without_code(transfer::APP_CONFIG.id(TEST_APPID), 2).await?; - if let Some(welcome) = &welcome.welcome { - log::info!("Got welcome: {}", welcome); - } - log::info!("This wormhole's code is: {}", &welcome.code); - code_tx.send(welcome.code).unwrap(); - let wormhole = connector.await?; - eyre::Result::<_>::Ok( - transfer::send_file( + assert!(mailbox_connection.is_err()); + let error = mailbox_connection.err().unwrap(); + match error { + WormholeError::UnclaimedNameplate(nameplate) => { + assert_eq!(nameplate, code.nameplate()); + }, + _ => { + assert!(false); + }, + } +} + +/** Generate common offers for testing, together with a pre-made answer that checks the received content */ +async fn file_offers() -> eyre::Result> { + async fn offer(name: &str) -> eyre::Result<(transfer::OfferSend, transfer::OfferAccept)> { + let path = format!("tests/{name}"); + let offer = transfer::OfferSend::new_file_or_folder(name.into(), &path).await?; + let answer = transfer::OfferSend::new_file_or_folder(name.into(), &path) + .await? + .set_content(|_path| { + use std::{ + io, + pin::Pin, + task::{Context, Poll}, + }; + + let path = path.clone(); + let content = transfer::new_accept_content(move |_append| { + struct Writer { + closed: bool, + send_bytes: Vec, + receive_bytes: Vec, + } + + impl futures::io::AsyncWrite for Writer { + fn poll_write( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + self.receive_bytes.extend_from_slice(buf); + Poll::Ready(Ok(buf.len())) + } + + fn poll_close( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + self.closed = true; + if self.send_bytes == self.receive_bytes { + Poll::Ready(Ok(())) + } else { + Poll::Ready(Err(io::Error::new( + io::ErrorKind::Other, + "Send and receive are not the same", + ))) + } + } + + fn poll_flush( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + } + + impl Drop for Writer { + fn drop(&mut self) { + assert!(self.closed, "Implementation forgot to close Writer"); + } + } + + let path = path.clone(); + async move { + Ok(Writer { + closed: false, + send_bytes: async_std::fs::read(&path).await?, + receive_bytes: Vec::new(), + }) + } + }); + transfer::AcceptInner { + content, + offset: 0, + sha256: None, + } + }); + + Ok((offer, answer)) + } + + Ok(vec![ + offer("example-file.bin").await?, + /* Empty file: https://github.com/magic-wormhole/magic-wormhole.rs/issues/160 */ + offer("example-file-empty").await?, + /* 4k file: https://github.com/magic-wormhole/magic-wormhole.rs/issues/152 */ + offer("example-file-4096.bin").await?, + ]) +} + +/** Send a file using the Rust implementation (using deprecated API). This does not guarantee compatibility with Python! ;) */ +#[cfg(feature = "transfer")] +#[async_std::test] +#[allow(deprecated)] +pub async fn test_file_rust2rust_deprecated() -> eyre::Result<()> { + init_logger(); + + for (offer, answer) in file_offers().await? { + let (code_tx, code_rx) = futures::channel::oneshot::channel(); + + let sender_task = async_std::task::Builder::new() + .name("sender".to_owned()) + .spawn(async { + let (welcome, wormhole_future) = + Wormhole::connect_without_code(transfer::APP_CONFIG.id(TEST_APPID).clone(), 2) + .await?; + if let Some(welcome) = &welcome.welcome { + log::info!("Got welcome: {}", welcome); + } + log::info!("This wormhole's code is: {}", &welcome.code); + code_tx.send(welcome.code.clone()).unwrap(); + let wormhole = wormhole_future.await?; + eyre::Result::<_>::Ok( + transfer::send( + wormhole, + default_relay_hints(), + magic_wormhole::transit::Abilities::ALL_ABILITIES, + offer, + &transit::log_transit_connection, + |_sent, _total| {}, + futures::future::pending(), + ) + .await?, + ) + })?; + let receiver_task = async_std::task::Builder::new() + .name("receiver".to_owned()) + .spawn(async { + let code = code_rx.await?; + let config = transfer::APP_CONFIG.id(TEST_APPID); + log::info!("Got code over local: {}", &code); + let (welcome, wormhole) = + Wormhole::connect_with_code(config.clone(), code, true).await?; + if let Some(welcome) = &welcome.welcome { + log::info!("Got welcome: {}", welcome); + } + + // Hacky v1-compat conversion for now + let mut answer = + (answer.into_iter_files().next().unwrap().1.content)(false).await?; + + let transfer::ReceiveRequest::V1(req) = transfer::request( wormhole, default_relay_hints(), - &mut async_std::fs::File::open(FILENAME).await?, - "example-file.bin", - std::fs::metadata(FILENAME).unwrap().len(), magic_wormhole::transit::Abilities::ALL_ABILITIES, + futures::future::pending(), + ) + .await? + .unwrap() + else {panic!("v2 should be disabled for now")}; + req.accept( &transit::log_transit_connection, - |_sent, _total| {}, + &mut answer, + |_received, _total| {}, futures::future::pending(), ) - .await?, - ) - })?; - let receiver_task = async_std::task::Builder::new() - .name("receiver".to_owned()) - .spawn(async { - let code = code_rx.await?; - log::info!("Got code over local: {}", &code); - let (welcome, wormhole) = - Wormhole::connect_with_code(transfer::APP_CONFIG.id(TEST_APPID), code).await?; - if let Some(welcome) = &welcome.welcome { - log::info!("Got welcome: {}", welcome); - } + .await?; + eyre::Result::<_>::Ok(()) + })?; - let req = transfer::request_file( - wormhole, - default_relay_hints(), - magic_wormhole::transit::Abilities::ALL_ABILITIES, - futures::future::pending(), - ) - .await? - .unwrap(); - - let mut buffer = Vec::::new(); - req.accept( - &transit::log_transit_connection, - |_received, _total| {}, - &mut buffer, - futures::future::pending(), - ) - .await?; - Ok(buffer) - })?; - - sender_task.await?; - let original = std::fs::read(FILENAME)?; - let received: Vec = (receiver_task.await as eyre::Result>)?; - - assert_eq!(original, received, "Files differ"); + sender_task.await?; + receiver_task.await?; + } Ok(()) } -/** https://github.com/magic-wormhole/magic-wormhole.rs/issues/160 */ +/** Send a file using the Rust implementation. This does not guarantee compatibility with Python! ;) */ #[cfg(feature = "transfer")] #[async_std::test] -pub async fn test_empty_file_rust2rust() -> eyre::Result<()> { +pub async fn test_file_rust2rust() -> eyre::Result<()> { init_logger(); - let (code_tx, code_rx) = futures::channel::oneshot::channel(); - - let sender_task = async_std::task::Builder::new() - .name("sender".to_owned()) - .spawn(async { - let (welcome, connector) = - Wormhole::connect_without_code(transfer::APP_CONFIG.id(TEST_APPID), 2).await?; - if let Some(welcome) = &welcome.welcome { - log::info!("Got welcome: {}", welcome); - } - log::info!("This wormhole's code is: {}", &welcome.code); - code_tx.send(welcome.code).unwrap(); - let wormhole = connector.await?; - eyre::Result::<_>::Ok( - transfer::send_file( + for (offer, answer) in file_offers().await? { + let (code_tx, code_rx) = futures::channel::oneshot::channel(); + + let sender_task = async_std::task::Builder::new() + .name("sender".to_owned()) + .spawn(async { + let mailbox_connection = + MailboxConnection::create(transfer::APP_CONFIG.id(TEST_APPID).clone(), 2) + .await?; + if let Some(welcome) = &mailbox_connection.welcome { + log::info!("Got welcome: {}", welcome); + } + log::info!("This wormhole's code is: {}", &mailbox_connection.code); + code_tx.send(mailbox_connection.code.clone()).unwrap(); + let wormhole = Wormhole::connect(mailbox_connection).await?; + eyre::Result::<_>::Ok( + transfer::send( + wormhole, + default_relay_hints(), + magic_wormhole::transit::Abilities::ALL_ABILITIES, + offer, + &transit::log_transit_connection, + |_sent, _total| {}, + futures::future::pending(), + ) + .await?, + ) + })?; + let receiver_task = async_std::task::Builder::new() + .name("receiver".to_owned()) + .spawn(async { + let code = code_rx.await?; + let config = transfer::APP_CONFIG.id(TEST_APPID); + let mailbox = MailboxConnection::connect(config, code.clone(), false).await?; + if let Some(welcome) = mailbox.welcome.clone() { + log::info!("Got welcome: {}", welcome); + } + let wormhole = Wormhole::connect(mailbox).await?; + + // Hacky v1-compat conversion for now + let mut answer = + (answer.into_iter_files().next().unwrap().1.content)(false).await?; + + let transfer::ReceiveRequest::V1(req) = transfer::request( wormhole, default_relay_hints(), - &mut async_std::fs::File::open("tests/example-file-empty").await?, - "example-file-empty", - std::fs::metadata("tests/example-file-empty").unwrap().len(), magic_wormhole::transit::Abilities::ALL_ABILITIES, + futures::future::pending(), + ) + .await? + .unwrap() + else {panic!("v2 should be disabled for now")}; + req.accept( &transit::log_transit_connection, - |_sent, _total| {}, + &mut answer, + |_received, _total| {}, futures::future::pending(), ) - .await?, - ) - })?; - let receiver_task = async_std::task::Builder::new() - .name("receiver".to_owned()) - .spawn(async { - let code = code_rx.await?; - log::info!("Got code over local: {}", &code); - let (welcome, wormhole) = - Wormhole::connect_with_code(transfer::APP_CONFIG.id(TEST_APPID), code).await?; - if let Some(welcome) = &welcome.welcome { - log::info!("Got welcome: {}", welcome); - } - - let req = transfer::request_file( - wormhole, - default_relay_hints(), - magic_wormhole::transit::Abilities::ALL_ABILITIES, - futures::future::pending(), - ) - .await? - .unwrap(); - - let mut buffer = Vec::::new(); - req.accept( - &transit::log_transit_connection, - |_received, _total| {}, - &mut buffer, - futures::future::pending(), - ) - .await?; - eyre::Result::>::Ok(buffer) - })?; - - sender_task.await?; + .await?; + eyre::Result::<_>::Ok(()) + })?; - assert!(&receiver_task.await?.is_empty()); + sender_task.await?; + receiver_task.await?; + } Ok(()) } -/** Test the functionality used by the `send-many` subcommand. It logically builds upon the - * `test_eventloop_exit` tests. We send us a file five times, and check if it arrived. +/** Test the functionality used by the `send-many` subcommand. */ #[cfg(feature = "transfer")] #[async_std::test] pub async fn test_send_many() -> eyre::Result<()> { init_logger(); - let (welcome, connector) = - Wormhole::connect_without_code(transfer::APP_CONFIG.id(TEST_APPID), 2).await?; - - let code = welcome.code; + let mailbox = MailboxConnection::create(transfer::APP_CONFIG.id(TEST_APPID), 2).await?; + let code = mailbox.code.clone(); log::info!("The code is {:?}", code); - let correct_data = std::fs::read("tests/example-file.bin")?; + async fn gen_offer() -> eyre::Result { + file_offers().await.map(|mut vec| vec.remove(0).0) + } + + async fn gen_accept() -> eyre::Result { + file_offers().await.map(|mut vec| vec.remove(0).1) + } /* Send many */ let sender_code = code.clone(); let senders = async_std::task::spawn(async move { // let mut senders = Vec::, eyre::Error>>>::new(); - let mut senders = Vec::new(); + let mut senders: Vec>> = Vec::new(); /* The first time, we reuse the current session for sending */ { log::info!("Sending file #{}", 0); - let wormhole = connector.await?; + let wormhole = Wormhole::connect(mailbox).await?; senders.push(async_std::task::spawn(async move { - default_relay_hints(); - crate::transfer::send_file( - wormhole, - default_relay_hints(), - &mut async_std::fs::File::open("tests/example-file.bin").await?, - "example-file.bin", - std::fs::metadata("tests/example-file.bin").unwrap().len(), - magic_wormhole::transit::Abilities::ALL_ABILITIES, - &transit::log_transit_connection, - |_, _| {}, - futures::future::pending(), + eyre::Result::Ok( + crate::transfer::send( + wormhole, + default_relay_hints(), + magic_wormhole::transit::Abilities::ALL_ABILITIES, + gen_offer().await?, + &transit::log_transit_connection, + |_, _| {}, + futures::future::pending(), + ) + .await?, ) - .await })); } for i in 1..5usize { log::info!("Sending file #{}", i); - let (_welcome, wormhole) = Wormhole::connect_with_code( - transfer::APP_CONFIG.id(TEST_APPID), - sender_code.clone(), + let wormhole = Wormhole::connect( + MailboxConnection::connect( + transfer::APP_CONFIG.id(TEST_APPID), + sender_code.clone(), + true, + ) + .await?, ) .await?; + let gen_offer = gen_offer.clone(); senders.push(async_std::task::spawn(async move { - default_relay_hints(); - crate::transfer::send_file( - wormhole, - default_relay_hints(), - &mut async_std::fs::File::open("tests/example-file.bin").await?, - "example-file.bin", - std::fs::metadata("tests/example-file.bin").unwrap().len(), - magic_wormhole::transit::Abilities::ALL_ABILITIES, - &transit::log_transit_connection, - |_, _| {}, - futures::future::pending(), + eyre::Result::Ok( + crate::transfer::send( + wormhole, + default_relay_hints(), + magic_wormhole::transit::Abilities::ALL_ABILITIES, + gen_offer().await?, + &transit::log_transit_connection, + |_, _| {}, + futures::future::pending(), + ) + .await?, ) - .await })); } eyre::Result::<_>::Ok(senders) @@ -328,27 +400,39 @@ pub async fn test_send_many() -> eyre::Result<()> { /* Receive many */ for i in 0..5usize { log::info!("Receiving file #{}", i); - let (_welcome, wormhole) = - Wormhole::connect_with_code(transfer::APP_CONFIG.id(TEST_APPID), code.clone()).await?; - log::info!("Got key: {}", &wormhole.key); - let req = crate::transfer::request_file( + let wormhole = Wormhole::connect( + MailboxConnection::connect(transfer::APP_CONFIG.id(TEST_APPID), code.clone(), true) + .await?, + ) + .await?; + log::info!("Got key: {}", &wormhole.key()); + let transfer::ReceiveRequest::V1(req) = crate::transfer::request( wormhole, default_relay_hints(), magic_wormhole::transit::Abilities::ALL_ABILITIES, futures::future::pending(), ) .await? - .unwrap(); + .unwrap() + else {panic!("v2 should be disabled for now")}; + + // Hacky v1-compat conversion for now + let mut answer = (gen_accept() + .await? + .into_iter_files() + .next() + .unwrap() + .1 + .content)(false) + .await?; - let mut buffer = Vec::::new(); req.accept( &transit::log_transit_connection, + &mut answer, |_, _| {}, - &mut buffer, futures::future::pending(), ) .await?; - assert_eq!(correct_data, buffer, "Files #{} differ", i); } for sender in senders.await? { @@ -368,14 +452,15 @@ pub async fn test_wrong_code() -> eyre::Result<()> { let sender_task = async_std::task::Builder::new() .name("sender".to_owned()) .spawn(async { - let (welcome, connector) = Wormhole::connect_without_code(APP_CONFIG, 2).await?; - if let Some(welcome) = &welcome.welcome { + let mailbox = MailboxConnection::create(APP_CONFIG, 2).await?; + if let Some(welcome) = &mailbox.welcome { log::info!("Got welcome: {}", welcome); } - log::info!("This wormhole's code is: {}", &welcome.code); - code_tx.send(welcome.code.nameplate()).unwrap(); + let code = mailbox.code.clone(); + log::info!("This wormhole's code is: {}", &code); + code_tx.send(code.nameplate()).unwrap(); - let result = connector.await; + let result = Wormhole::connect(mailbox).await; /* This should have failed, due to the wrong code */ assert!(result.is_err()); eyre::Result::<_>::Ok(()) @@ -385,10 +470,14 @@ pub async fn test_wrong_code() -> eyre::Result<()> { .spawn(async { let nameplate = code_rx.await?; log::info!("Got nameplate over local: {}", &nameplate); - let result = Wormhole::connect_with_code( - APP_CONFIG, - /* Making a wrong code here by appending bullshit */ - Code::new(&nameplate, "foo-bar"), + let result = Wormhole::connect( + MailboxConnection::connect( + APP_CONFIG, + /* Making a wrong code here by appending bullshit */ + Code::new(&nameplate, "foo-bar"), + true, + ) + .await?, ) .await; @@ -408,14 +497,17 @@ pub async fn test_wrong_code() -> eyre::Result<()> { pub async fn test_crowded() -> eyre::Result<()> { init_logger(); - let (welcome, connector1) = Wormhole::connect_without_code(APP_CONFIG, 2).await?; - log::info!("This test's code is: {}", &welcome.code); + let initial_mailbox_connection = MailboxConnection::create(APP_CONFIG, 2).await?; + log::info!("This test's code is: {}", &initial_mailbox_connection.code); + let code = initial_mailbox_connection.code.clone(); - let connector2 = Wormhole::connect_with_code(APP_CONFIG, welcome.code.clone()); + let mailbox_connection_1 = MailboxConnection::connect(APP_CONFIG.clone(), code.clone(), false); + let mailbox_connection_2 = MailboxConnection::connect(APP_CONFIG.clone(), code.clone(), false); - let connector3 = Wormhole::connect_with_code(APP_CONFIG, welcome.code.clone()); - - match futures::try_join!(connector1, connector2, connector3).unwrap_err() { + match futures::try_join!(mailbox_connection_1, mailbox_connection_2) + .err() + .unwrap() + { magic_wormhole::WormholeError::ServerError( magic_wormhole::rendezvous::RendezvousError::Server(error), ) => { @@ -427,6 +519,31 @@ pub async fn test_crowded() -> eyre::Result<()> { Ok(()) } +#[async_std::test] +pub async fn test_connect_with_code_expecting_nameplate() -> eyre::Result<()> { + let code = generate_random_code(); + let result = MailboxConnection::connect(APP_CONFIG, code.clone(), false).await; + let error = result.err().unwrap(); + match error { + magic_wormhole::WormholeError::UnclaimedNameplate(x) => { + assert_eq!(x, code.nameplate()); + }, + other => panic!( + "Got wrong error type {:?}. Expected `NameplateNotFound`", + other + ), + } + + Ok(()) +} + +fn generate_random_code() -> Code { + let mut rng = rand::thread_rng(); + let nameplate_string = format!("{}-guitarist-revenge", rng.gen_range(1000..10000)); + let nameplate = Nameplate::new(&nameplate_string); + Code::new(&nameplate, "guitarist-revenge") +} + #[test] fn test_phase() { let p = Phase::PAKE; diff --git a/src/core/wordlist.rs b/src/core/wordlist.rs index 485de8d5..2a7531ab 100644 --- a/src/core/wordlist.rs +++ b/src/core/wordlist.rs @@ -1,6 +1,6 @@ use rand::{rngs::OsRng, seq::SliceRandom}; use serde_json::{self, Value}; - +use std::fmt; use dialoguer::Completion; use std::collections::HashMap; @@ -89,7 +89,8 @@ impl Completion for PgpWordList { } fn load_pgpwords() -> Vec> { - let raw_words_value: Value = serde_json::from_str(include_str!("pgpwords.json")).unwrap(); + let raw_words_value: serde_json::Value = + serde_json::from_str(include_str!("pgpwords.json")).unwrap(); let raw_words = raw_words_value.as_object().unwrap(); let mut even_words: Vec = Vec::with_capacity(256); even_words.resize(256, String::from("")); diff --git a/src/dilated_transfer/mod.rs b/src/dilated_transfer/mod.rs new file mode 100644 index 00000000..7bb54d66 --- /dev/null +++ b/src/dilated_transfer/mod.rs @@ -0,0 +1,77 @@ +use crate::{core::APPID_RAW, AppID}; +use std::borrow::Cow; + +pub const APP_CONFIG: crate::AppConfig = crate::AppConfig:: { + id: AppID(Cow::Borrowed(APPID_RAW)), + rendezvous_url: Cow::Borrowed(crate::rendezvous::DEFAULT_RENDEZVOUS_SERVER), + app_version: AppVersion::new(Some(FileTransferV2Mode::Send)), + with_dilation: false, +}; + +#[derive(Clone, serde_derive::Serialize, serde_derive::Deserialize)] +#[serde(rename_all = "kebab-case")] +#[serde(rename = "transfer")] +pub enum FileTransferV2Mode { + Send, + Receive, + Connect, +} + +#[derive(Clone, serde_derive::Serialize, serde_derive::Deserialize)] +#[serde(rename_all = "kebab-case")] +struct DilatedTransfer { + mode: FileTransferV2Mode, +} + +#[derive(Clone, serde_derive::Serialize, serde_derive::Deserialize)] +#[serde(rename_all = "kebab-case")] +pub struct AppVersion { + // #[serde(default)] + // abilities: Cow<'static, [Cow<'static, str>]>, + // #[serde(default)] + // transfer_v2: Option, + + // XXX: we don't want to send "can-dilate" key for non-dilated + // wormhole, would making this an Option help? i.e. when the value + // is a None, we don't serialize that into the json and do it only + // when it is a "Some" value? + // overall versions payload is of the form: + // b'{"can-dilate": ["1"], "dilation-abilities": [{"type": "direct-tcp-v1"}, {"type": "relay-v1"}], "app_versions": {"transfer": {"mode": "send", "features": {}}}}' + + //can_dilate: Option<[Cow<'static, str>; 1]>, + //dilation_abilities: Cow<'static, [Ability; 2]>, + #[serde(rename = "transfer")] + app_versions: Option, +} + +impl AppVersion { + pub const fn new(mode: Option) -> Self { + // let can_dilate: Option<[Cow<'static, str>; 1]> = if enable_dilation { + // Some([std::borrow::Cow::Borrowed("1")]) + // } else { + // None + // }; + + let option = match mode { + Some(mode) => Some(DilatedTransfer { mode }), + None => None, + }; + + Self { + // abilities: Cow::Borrowed([Cow::Borrowed("transfer-v1"), Cow::Borrowed("transfer-v2")]), + // transfer_v2: Some(AppVersionTransferV2Hint::new()) + // can_dilate: can_dilate, + // dilation_abilities: std::borrow::Cow::Borrowed(&[ + // Ability{ ty: std::borrow::Cow::Borrowed("direct-tcp-v1") }, + // Ability{ ty: std::borrow::Cow::Borrowed("relay-v1") }, + // ]), + app_versions: option, + } + } +} + +impl Default for AppVersion { + fn default() -> Self { + Self::new(Some(FileTransferV2Mode::Send)) + } +} diff --git a/src/dilation/api.rs b/src/dilation/api.rs new file mode 100644 index 00000000..d790d5f4 --- /dev/null +++ b/src/dilation/api.rs @@ -0,0 +1,33 @@ +use crate::core::MySide; +use derive_more::Display; +use serde_derive::{Deserialize, Serialize}; + +// from IO to DilationCore +#[derive(Debug, Clone, PartialEq, Display, Deserialize)] +pub enum IOEvent { + WormholeMessageReceived(String), + TCPConnectionLost, + TCPConnectionMade, +} + +/// Commands to be executed +#[derive(Debug, Clone, PartialEq, Display)] +pub enum ManagerCommand { + // XXX: include API calls to IO layer + Protocol(ProtocolCommand), + IO(IOCommand), +} + +/// Protocol level commands +#[derive(Debug, Clone, PartialEq, Display, Serialize)] +#[serde(tag = "type")] +pub enum ProtocolCommand { + #[serde(rename = "please")] + SendPlease { side: MySide }, +} + +/// Protocol level commands +#[derive(Debug, Clone, PartialEq, Display)] +pub enum IOCommand { + CloseConnection, +} diff --git a/src/dilation/events.rs b/src/dilation/events.rs new file mode 100644 index 00000000..a465b97c --- /dev/null +++ b/src/dilation/events.rs @@ -0,0 +1,89 @@ +use derive_more::Display; +use serde_derive::Deserialize; + +use crate::{ + core::TheirSide, + dilation::api::{IOEvent, ManagerCommand}, + transit::Hints, +}; + +use super::api::ProtocolCommand; + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub enum Event { + //IO(IOAction), + // All state machine events + Manager(ManagerEvent), + Connection(IOEvent), +} + +impl From for ManagerCommand { + fn from(r: ProtocolCommand) -> ManagerCommand { + ManagerCommand::Protocol(r) + } +} + +impl From for Event { + fn from(r: ManagerEvent) -> Event { + Event::Manager(r) + } +} + +// individual fsm events +#[derive(Display, Debug, Clone, PartialEq, Deserialize)] +#[serde(tag = "type")] +pub enum ManagerEvent { + #[serde(rename = "start")] + Start, + #[serde(rename = "please")] + RxPlease { + side: TheirSide, + }, + #[serde(rename = "connection-hints")] + RxHints { + hints: Hints, + }, + RxReconnect, + RxReconnecting, + ConnectionMade, + ConnectionLostLeader, + ConnectionLostFollower, + Stop, +} + +// XXX: for Connector fsm events +// ... +// XXX + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_display_please_event() { + let event = ManagerEvent::RxPlease { + side: TheirSide::from("f91dcdaccc7cc336"), + }; + assert_eq!(format!("{}", event), "TheirSide(f91dcdaccc7cc336)"); + } + + #[test] + fn test_manager_event_deserialisation_start() { + let result: ManagerEvent = + serde_json::from_str(r#"{"type": "start"}"#).expect("parse error"); + assert_eq!(result, ManagerEvent::Start); + } + + #[test] + fn test_manager_event_deserialisation_rxplease() { + let result: ManagerEvent = + serde_json::from_str(r#"{"type": "please", "side": "f91dcdaccc7cc336"}"#) + .expect("parse error"); + assert_eq!( + result, + ManagerEvent::RxPlease { + side: TheirSide::from("f91dcdaccc7cc336") + } + ); + } +} diff --git a/src/dilation/manager.rs b/src/dilation/manager.rs new file mode 100644 index 00000000..e3a5cbee --- /dev/null +++ b/src/dilation/manager.rs @@ -0,0 +1,270 @@ +use derive_more::Display; +#[cfg(test)] +use mockall::automock; + +use crate::{ + core::{MySide, TheirSide}, + dilation::api::ManagerCommand, + WormholeError, +}; + +use super::{api::ProtocolCommand, events::ManagerEvent}; + +#[derive(Debug, PartialEq, Display)] +pub enum Role { + Leader, + Follower, +} + +#[derive(Debug, PartialEq, Clone, Copy, Display)] +pub enum State { + Waiting, + Wanting, + Connecting, + Connected, + Abandoning, + Flushing, + Lonely, + Stopping, + Stopped, +} + +pub struct ManagerMachine { + pub side: MySide, + pub role: Role, + pub state: Option, +} + +#[cfg_attr(test, automock)] +impl ManagerMachine { + pub fn new(side: MySide) -> Self { + let machine = ManagerMachine { + side, + role: Role::Follower, + state: Some(State::Wanting), + }; + machine + } + + pub fn current_state(&self) -> Option { + self.state + } + + fn choose_role(&self, theirside: &TheirSide) -> Role { + let myside: TheirSide = self.side.clone().into(); + if myside > *theirside { + Role::Leader + } else { + Role::Follower + } + } + + pub fn process( + &mut self, + event: ManagerEvent, + side: &MySide, + command_handler: &mut dyn FnMut(ManagerCommand) -> Result<(), WormholeError>, + ) { + log::debug!( + "processing event: state={}, event={}", + self.state.unwrap(), + &event + ); + // given the event and the current state, generate output + // event and move to the new state + use State::*; + let mut command = None; + let current_state = self.state.unwrap(); + let new_state = match current_state { + Waiting => match event { + ManagerEvent::Start => { + command = Some(ManagerCommand::from(ProtocolCommand::SendPlease { + side: side.clone(), + })); + Wanting + }, + ManagerEvent::Stop => { + // actions.addAction(NotifyStopped) + Stopped + }, + _ => { + panic! {"unexpected event {:?} for state {:?}", current_state, event} + }, + }, + Wanting => match event { + ManagerEvent::RxPlease { side: their_side } => { + command = Some(ManagerCommand::from(ProtocolCommand::SendPlease { + side: side.clone(), + })); + let role = self.choose_role(&their_side.clone()); + log::debug!( + "role: {}", + if role == Role::Leader { + "leader" + } else { + "follower" + } + ); + self.role = role; + Connecting + }, + ManagerEvent::Stop => Stopped, + ManagerEvent::RxHints { hints: _ } => current_state, + _ => { + panic! {"unexpected event {:?} for state {:?}", current_state, event} + }, + }, + Connecting => match event { + ManagerEvent::RxHints { hints } => { + log::debug!("received connection hints: {:?}", hints); + // TODO store the other side's hints + current_state + }, + ManagerEvent::Stop => Stopped, + ManagerEvent::ConnectionMade => Connected, + ManagerEvent::RxReconnect => current_state, + _ => { + panic! {"unexpected event {:?} for state {:?}", current_state, event} + }, + }, + Connected => match event { + ManagerEvent::RxReconnect => Abandoning, + ManagerEvent::RxHints { hints: _ } => current_state, + ManagerEvent::ConnectionLostFollower => Lonely, + ManagerEvent::ConnectionLostLeader => Flushing, + ManagerEvent::Stop => Stopped, + _ => { + panic! {"unexpected event {:?} for state {:?}", current_state, event} + }, + }, + Abandoning => match event { + ManagerEvent::RxHints { hints: _ } => current_state, + ManagerEvent::ConnectionLostFollower => Connecting, + ManagerEvent::Stop => Stopped, + _ => { + panic! {"unexpected event {:?} for state {:?}", current_state, event} + }, + }, + Flushing => match event { + ManagerEvent::RxReconnecting => Connecting, + ManagerEvent::Stop => Stopped, + ManagerEvent::RxHints { hints: _ } => current_state, + _ => { + panic! {"unexpected event {:?} for state {:?}", current_state, event} + }, + }, + Lonely => match event { + ManagerEvent::RxReconnect => Connecting, + ManagerEvent::Stop => Stopped, + ManagerEvent::RxHints { hints: _ } => current_state, + _ => { + panic! {"unexpected event {:?} for state {:?}", current_state, event} + }, + }, + Stopping => match event { + ManagerEvent::RxHints { hints: _ } => current_state, + ManagerEvent::ConnectionLostFollower => Stopped, + ManagerEvent::ConnectionLostLeader => Stopped, + _ => { + panic! {"unexpected event {:?} for state {:?}", current_state, event} + }, + }, + Stopped => current_state, + }; + + let command_result = match command.clone() { + Some(command) => command_handler(command), + None => Ok(()), + }; + + match command_result { + Ok(_result) => { + self.state = Some(new_state); + log::debug!( + "processing event finished: state={}, command={}", + self.state.unwrap(), + command + .clone() + .map(|cmd| cmd.to_string()) + .unwrap_or("n/a".to_string()) + ); + }, + Err(wormhole_error) => { + panic!("processing event errored: {}", wormhole_error); + }, + }; + } + + pub(crate) fn is_done(&self) -> bool { + self.state == Option::from(State::Stopped) + } +} + +#[cfg(test)] +mod test { + use crate::core::{MySide, TheirSide}; + + use super::*; + + struct TestHandler { + command: Option, + } + + impl TestHandler { + fn new() -> Self { + TestHandler { command: None } + } + + fn handle_command(&mut self, command: ManagerCommand) -> Result<(), WormholeError> { + self.command = Some(command); + Ok(()) + } + } + + #[test] + fn test_manager_machine() { + // Sends Start event during construction: + let mut manager_fsm = + ManagerMachine::new(MySide::unchecked_from_string("test123".to_string())); + let side = MySide::generate(8); + + assert_eq!(manager_fsm.current_state(), Some(State::Wanting)); + assert_eq!(manager_fsm.is_done(), false); + + let mut handler = TestHandler::new(); + + // generate an input Event and see if we get the desired state and output Actions + manager_fsm.process( + ManagerEvent::RxPlease { + side: TheirSide::from("test"), + }, + &side, + &mut |cmd| handler.handle_command(cmd), + ); + + assert_eq!(manager_fsm.current_state(), Some(State::Connecting)); + assert_eq!( + handler.command, + Some(ManagerCommand::Protocol(ProtocolCommand::SendPlease { + side: side, + })) + ) + } + + #[test] + #[should_panic(expected = "Protocol error: foo")] + fn test_manager_machine_handle_error() { + let side = MySide::generate(8); + let mut manager_fsm = ManagerMachine { + side: side.clone(), + role: Role::Follower, + state: Some(State::Waiting), + }; + + assert_eq!(manager_fsm.current_state(), Some(State::Waiting)); + + manager_fsm.process(ManagerEvent::Start, &side, &mut |cmd| { + Err(WormholeError::Protocol("foo".into())) + }); + } +} diff --git a/src/dilation/mod.rs b/src/dilation/mod.rs new file mode 100644 index 00000000..c96763e8 --- /dev/null +++ b/src/dilation/mod.rs @@ -0,0 +1,421 @@ +use std::{cell::RefCell, rc::Rc}; + +use futures::executor; + +use crate::{ + core::{MySide, Phase}, + dilation::api::{ManagerCommand, ProtocolCommand}, + Wormhole, WormholeError, +}; + +#[cfg(test)] +use crate::core::protocol::MockWormholeProtocol; + +#[mockall_double::double] +use crate::dilation::manager::ManagerMachine; + +mod api; +mod events; +mod manager; + +#[mockall_double::double] +type WormholeConnection = WormholeConnectionDefault; + +pub struct WormholeConnectionDefault { + wormhole: Rc>, +} + +#[cfg_attr(test, mockall::automock)] +impl WormholeConnectionDefault { + fn new(wormhole: Wormhole) -> Self { + Self { + wormhole: Rc::new(RefCell::new(wormhole)), + } + } + + async fn receive_json(&self) -> Result + where + T: for<'a> serde::Deserialize<'a> + 'static, + { + let message = self.wormhole.borrow_mut().receive_json().await; + match message { + Ok(result) => match result { + Ok(result) => Ok(result), + Err(error) => Err(WormholeError::ProtocolJson(error)), + }, + Err(error) => Err(error), + } + } + + async fn send_json(&self, command: &ProtocolCommand) -> Result<(), WormholeError> { + self.wormhole + .borrow_mut() + .send_json_with_phase(command, Phase::dilation) + .await + } +} + +pub struct DilatedWormhole { + wormhole: WormholeConnection, + side: MySide, + manager: ManagerMachine, +} + +impl DilatedWormhole { + pub fn new(wormhole: Wormhole, side: MySide) -> Self { + DilatedWormhole { + wormhole: WormholeConnection::new(wormhole), + side: side.clone(), + manager: ManagerMachine::new(side.clone()), + } + } + + pub async fn run(&mut self) { + log::info!( + "start state machine: state={}", + &self.manager.current_state().unwrap() + ); + + let mut command_handler = |cmd| Self::execute_command(&self.wormhole, cmd); + + loop { + log::debug!("wait for next event"); + let event_result = self.wormhole.receive_json().await; + + match event_result { + Ok(manager_event) => { + log::debug!("received event"); + self.manager + .process(manager_event, &self.side, &mut command_handler) + }, + Err(error) => { + log::warn!("received error {}", error); + continue; + }, + }; + + if self.manager.is_done() { + log::debug!("exiting"); + break; + } + } + } + + fn execute_command( + wormhole: &WormholeConnection, + command: ManagerCommand, + ) -> Result<(), WormholeError> { + log::debug!("execute_command"); + match command { + ManagerCommand::Protocol(protocol_command) => { + log::debug!(" command: {}", protocol_command); + executor::block_on(wormhole.send_json(&protocol_command)) + }, + ManagerCommand::IO(io_command) => { + println!("io command: {}", io_command); + Ok(()) + }, + } + } +} + +#[cfg(test)] +mod test { + use crate::{ + core::test::init_logger, + dilation::{ + api::{IOCommand, ProtocolCommand}, + events::ManagerEvent, + manager::{MockManagerMachine, State}, + }, + }; + use std::sync::{Arc, Mutex}; + + use super::*; + + use mockall::predicate::{always, eq}; + + #[async_std::test] + async fn test_wormhole_connection_send() { + let mut protocol = MockWormholeProtocol::default(); + let command = ProtocolCommand::SendPlease { + side: MySide::generate(2), + }; + + let serialized_bytes = serde_json::to_vec(&command).unwrap(); + + protocol + .expect_send_with_phase() + .withf(move |bytes, provider| { + bytes == &serialized_bytes && provider(0) == Phase::dilation(0) + }) + .return_once(|_, _| Ok(())); + + let connection = WormholeConnectionDefault::new(Wormhole::new(Box::new(protocol))); + + let result = connection.send_json(&command).await; + + assert!(result.is_ok()) + } + + #[async_std::test] + async fn test_wormhole_connection_send_error() { + let mut protocol = MockWormholeProtocol::default(); + let command = ProtocolCommand::SendPlease { + side: MySide::generate(2), + }; + + protocol + .expect_send_with_phase() + .return_once(|_, _| Err(WormholeError::Protocol(Box::from("foo")))); + + let connection = WormholeConnectionDefault::new(Wormhole::new(Box::new(protocol))); + + let result = connection.send_json(&command).await; + + assert!(result.is_err()) + } + + #[async_std::test] + async fn test_wormhole_connection_receive() { + let mut protocol = MockWormholeProtocol::default(); + + let serialized_bytes = r#"{"type": "start"}"#.as_bytes().to_vec(); + + protocol + .expect_receive() + .return_once(|| Ok(serialized_bytes)); + + let connection = WormholeConnectionDefault::new(Wormhole::new(Box::new(protocol))); + + let result = connection.receive_json::().await; + + assert!(result.is_ok()) + } + + #[async_std::test] + async fn test_wormhole_connection_receive_error() { + let mut protocol = MockWormholeProtocol::default(); + + protocol + .expect_receive() + .return_once(|| Err(WormholeError::Protocol(Box::from("foo")))); + + let connection = WormholeConnectionDefault::new(Wormhole::new(Box::new(protocol))); + + let result = connection.receive_json::().await; + + assert!(result.is_err()) + } + + #[async_std::test] + async fn test_wormhole_connection_receive_deserialization_error() { + let mut protocol = MockWormholeProtocol::default(); + + let serialized_bytes = r#"{"type": "foo"}"#.as_bytes().to_vec(); + + protocol + .expect_receive() + .return_once(|| Ok(serialized_bytes)); + + let connection = WormholeConnectionDefault::new(Wormhole::new(Box::new(protocol))); + + let result = connection.receive_json::().await; + + assert!(result.is_err()) + } + + #[async_std::test] + async fn test_dilated_wormhole_new() { + let protocol = MockWormholeProtocol::default(); + + let wc_ctx = MockWormholeConnectionDefault::new_context(); + wc_ctx + .expect() + .with(always()) + .return_once(move |_| WormholeConnection::default()); + + let mm_ctx = MockManagerMachine::new_context(); + mm_ctx + .expect() + .with(always()) + .return_once(move |_| ManagerMachine::default()); + } + + #[async_std::test] + async fn test_dilated_wormhole() { + init_logger(); + + let mut manager = ManagerMachine::default(); + let mut wormhole = WormholeConnection::default(); + + let my_side = MySide::generate(23); + + manager + .expect_current_state() + .return_once(|| Some(State::Wanting)); + + wormhole + .expect_receive_json() + .return_once(|| Ok(ManagerEvent::Start)); + + manager + .expect_process() + .with(eq(ManagerEvent::Start), eq(my_side.clone()), always()) + .times(1) + .return_once(|_, _, _| ()); + + manager.expect_is_done().return_once(|| true); + + let mut dilated_wormhole = DilatedWormhole { + manager, + side: my_side, + wormhole, + }; + + dilated_wormhole.run().await; + } + + #[async_std::test] + async fn test_dilated_wormhole_receving_error() { + init_logger(); + + let mut manager = ManagerMachine::default(); + let mut wormhole = WormholeConnection::default(); + + let my_side = MySide::generate(23); + + manager + .expect_current_state() + .return_once(|| Some(State::Wanting)); + + let mut events = vec![Ok(ManagerEvent::Start), Err(WormholeError::DilationVersion)]; + wormhole + .expect_receive_json() + .returning(move || events.pop().unwrap()); + + manager + .expect_process() + .with(eq(ManagerEvent::Start), eq(my_side.clone()), always()) + .times(1) + .return_once(|_, _, _| ()); + + manager.expect_is_done().return_once(|| true); + + let mut dilated_wormhole = DilatedWormhole { + manager, + side: my_side, + wormhole, + }; + + dilated_wormhole.run().await; + } + + #[async_std::test] + async fn test_dilated_wormhole_two_iterations() { + init_logger(); + + let mut manager = ManagerMachine::default(); + let mut wormhole = WormholeConnection::default(); + + let my_side = MySide::generate(23); + + manager + .expect_current_state() + .return_once(|| Some(State::Wanting)); + + let mut events = vec![Ok(ManagerEvent::Stop), Ok(ManagerEvent::Start)]; + wormhole + .expect_receive_json() + .times(2) + .returning(move || events.pop().unwrap()); + + let verify_events = Arc::new(Mutex::new(vec![ManagerEvent::Stop, ManagerEvent::Start])); + let verify_my_side = my_side.clone(); + manager + .expect_process() + .withf(move |event, side, _| { + *event == verify_events.lock().unwrap().pop().unwrap() && side == &verify_my_side + }) + .times(2) + .returning(|_, _, _| ()); + + let mut returns = vec![true, false]; + manager + .expect_is_done() + .returning(move || returns.pop().unwrap()); + + let mut dilated_wormhole = DilatedWormhole { + manager, + side: my_side.clone(), + wormhole, + }; + + dilated_wormhole.run().await; + } + + #[test] + fn test_dilated_wormhole_execute_protocol_command() { + init_logger(); + + let mut wormhole = WormholeConnection::default(); + + let protocol_command = ProtocolCommand::SendPlease { + side: MySide::generate(2), + }; + + wormhole + .expect_send_json() + .with(eq(protocol_command.clone())) + .return_once(|_| Ok(())) + .times(1); + + let result = DilatedWormhole::execute_command( + &mut wormhole, + ManagerCommand::Protocol(protocol_command), + ); + + assert!(result.is_ok()) + } + + #[test] + fn test_dilated_wormhole_execute_protocol_command_failure() { + init_logger(); + + let mut wormhole = WormholeConnection::default(); + + let protocol_command = ProtocolCommand::SendPlease { + side: MySide::generate(2), + }; + + let protocol_command_ref = protocol_command.clone(); + wormhole + .expect_send_json() + .with(eq(protocol_command_ref)) + .return_once(|_| Err(WormholeError::Crypto)) + .times(1); + + let result = DilatedWormhole::execute_command( + &mut wormhole, + ManagerCommand::Protocol(protocol_command.clone()), + ); + + assert!(result.is_err()) + } + + #[test] + fn test_dilated_wormhole_execute_io_command() { + init_logger(); + + let mut wormhole = WormholeConnection::default(); + + wormhole.expect_send_json().times(0); + + let result = DilatedWormhole::execute_command( + &mut wormhole, + ManagerCommand::IO(IOCommand::CloseConnection), + ); + + assert!(result.is_ok()) + } +} diff --git a/src/forwarding.rs b/src/forwarding.rs index 55dc45cf..8c055fd2 100644 --- a/src/forwarding.rs +++ b/src/forwarding.rs @@ -13,18 +13,21 @@ //! and received as they come in, no additional buffering is applied. (Under the assumption that those applications //! that need buffering already do it on their side, and those who don't, don't.) -use super::*; -use async_std::net::{TcpListener, TcpStream}; -use futures::{AsyncReadExt, AsyncWriteExt, Future, SinkExt, StreamExt, TryStreamExt}; -use serde::{Deserialize, Serialize}; use std::{ borrow::Cow, collections::{HashMap, HashSet}, rc::Rc, sync::Arc, }; + +use async_std::net::{TcpListener, TcpStream}; +use futures::{AsyncReadExt, AsyncWriteExt, Future, SinkExt, StreamExt, TryStreamExt}; +use serde::{Deserialize, Serialize}; + use transit::{TransitConnectError, TransitError}; +use super::*; + const APPID_RAW: &str = "piegames.de/wormhole/port-forwarding"; /// The App ID associated with this protocol. @@ -41,6 +44,7 @@ pub const APP_CONFIG: crate::AppConfig = crate::AppConfig::, targets: Vec<(Option, u16)>, cancel: impl Future, ) -> Result<(), ForwardingError> { - let our_version: &AppVersion = wormhole - .our_version - .downcast_ref() - .expect("You may only use a Wormhole instance with the correct AppVersion type!"); - let peer_version: AppVersion = serde_json::from_value(wormhole.peer_version.clone())?; + let our_version = wormhole + .our_version() + .downcast_ref::() + .expect("You may only use a Wormhole instance with the correct AppVersion type!") + .to_owned(); + let peer_version: AppVersion = serde_json::from_value(wormhole.peer_version().to_owned())?; let connector = transit::init( - our_version.transit_abilities, - Some(peer_version.transit_abilities), + our_version.transit_abilities.clone(), + Some(peer_version.transit_abilities.clone()), relay_hints, ) .await?; - /* Send our transit hints */ wormhole .send_json(&PeerMessage::Transit { @@ -167,7 +171,7 @@ pub async fn serve( log::warn!("It seems like you are trying to forward a remote HTTP target ('{}'). Due to HTTP being host-aware this will very likely fail!", host); } (format!("{}:{}", host, port), (Some(host), port)) - }, + } None => (port.to_string(), (host, port)), }) .collect(); @@ -190,7 +194,7 @@ pub async fn serve( }, }; - let (mut transit, info, addr) = match connector + let (mut transit, info) = match connector .leader_connect( wormhole.key().derive_transit_key(wormhole.appid()), peer_version.transit_abilities, @@ -207,7 +211,7 @@ pub async fn serve( return Err(error); }, }; - transit_handler(info, addr); + transit_handler(info); /* We got a transit, now close the Wormhole */ wormhole.close().await?; @@ -518,16 +522,16 @@ impl ForwardingServe { /// no more than 1024 ports may be forwarded at once. pub async fn connect( mut wormhole: Wormhole, - transit_handler: impl FnOnce(transit::TransitInfo, std::net::SocketAddr), + transit_handler: impl FnOnce(transit::TransitInfo), relay_hints: Vec, bind_address: Option, custom_ports: &[u16], ) -> Result { let our_version: &AppVersion = wormhole - .our_version + .our_version() .downcast_ref() .expect("You may only use a Wormhole instance with the correct AppVersion type!"); - let peer_version: AppVersion = serde_json::from_value(wormhole.peer_version.clone())?; + let peer_version: AppVersion = serde_json::from_value(wormhole.peer_version().to_owned())?; let connector = transit::init( our_version.transit_abilities, Some(peer_version.transit_abilities), @@ -561,7 +565,7 @@ pub async fn connect( }, }; - let (mut transit, info, addr) = match connector + let (mut transit, info) = match connector .follower_connect( wormhole.key().derive_transit_key(wormhole.appid()), peer_version.transit_abilities, @@ -578,7 +582,7 @@ pub async fn connect( return Err(error); }, }; - transit_handler(info, addr); + transit_handler(info); /* We got a transit, now close the Wormhole */ wormhole.close().await?; diff --git a/src/lib.rs b/src/lib.rs index 6b779fca..71441d51 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -26,6 +26,10 @@ #[macro_use] mod util; mod core; +#[cfg(feature = "dilation")] +pub mod dilated_transfer; +#[cfg(feature = "dilation")] +pub mod dilation; #[cfg(feature = "forwarding")] pub mod forwarding; #[cfg(feature = "transfer")] @@ -37,7 +41,10 @@ pub mod uri; pub use crate::core::{ key::{GenericKey, Key, KeyPurpose, WormholeKey}, - rendezvous, wordlist::PgpWordList, - AppConfig, AppID, Code, Wormhole, WormholeError, WormholeWelcome, + rendezvous, AppConfig, AppID, Code, MailboxConnection, Mood, Nameplate, Wormhole, + WormholeError, }; + +#[cfg(feature = "dilation")] +pub use crate::dilation::DilatedWormhole; diff --git a/src/transfer.rs b/src/transfer.rs index 08e620ff..39c38514 100644 --- a/src/transfer.rs +++ b/src/transfer.rs @@ -8,24 +8,32 @@ //! At its core, "peer messages" are exchanged over an established wormhole connection with the other side. //! They are used to set up a [transit] portal and to exchange a file offer/accept. Then, the file is transmitted over the transit relay. -use futures::{AsyncRead, AsyncWrite}; +use futures::{AsyncRead, AsyncSeek, AsyncWrite}; use serde_derive::{Deserialize, Serialize}; #[cfg(test)] use serde_json::json; use std::sync::Arc; -use super::{core::WormholeError, transit, transit::Transit, AppID, Wormhole}; +use super::{core::WormholeError, transit, AppID, Wormhole}; use futures::Future; use log::*; -use std::{borrow::Cow, path::PathBuf}; -use transit::{TransitConnectError, TransitConnector, TransitError}; +use std::{ + borrow::Cow, + collections::BTreeMap, + path::{Path, PathBuf}, +}; +use transit::{ + Abilities as TransitAbilities, Transit, TransitConnectError, TransitConnector, TransitError, +}; -mod messages; -use messages::*; +mod cancel; mod v1; mod v2; -const APPID_RAW: &str = "lothar.com/wormhole/text-or-file-xfer"; +pub use v1::ReceiveRequest as ReceiveRequestV1; +pub use v2::ReceiveRequest as ReceiveRequestV2; + +use crate::core::APPID_RAW; /// The App ID associated with this protocol. pub const APPID: AppID = AppID(Cow::Borrowed(APPID_RAW)); @@ -38,6 +46,7 @@ pub const APP_CONFIG: crate::AppConfig = crate::AppConfig::), #[error( - "Unexpected message (protocol error): Expected '{}', but got: {:?}", + "Unexpected message (protocol error): Expected '{}', but got: '{}'", _0, _1 )] - ProtocolUnexpectedMessage(Box, Box), + ProtocolUnexpectedMessage(Box, Box), #[error("Wormhole connection error")] Wormhole( #[from] @@ -111,40 +120,39 @@ pub enum TransferError { impl TransferError { pub(self) fn unexpected_message( expected: impl Into>, - got: impl std::fmt::Debug + Send + Sync + 'static, + got: impl std::fmt::Display, ) -> Self { - Self::ProtocolUnexpectedMessage(expected.into(), Box::new(got)) + Self::ProtocolUnexpectedMessage(expected.into(), got.to_string().into()) } } /** * The application specific version information for this protocol. - * - * At the moment, this always is an empty object, but this will likely change in the future. */ #[derive(Clone, Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] pub struct AppVersion { - // #[serde(default)] - // abilities: Cow<'static, [Cow<'static, str>]>, - // #[serde(default)] - // transfer_v2: Option, + #[serde(default)] + abilities: Cow<'static, [Cow<'static, str>]>, + #[serde(default)] + transfer_v2: Option, } // TODO check invariants during deserialization - impl AppVersion { const fn new() -> Self { Self { - // abilities: Cow::Borrowed([Cow::Borrowed("transfer-v1"), Cow::Borrowed("transfer-v2")]), - // transfer_v2: Some(AppVersionTransferV2Hint::new()) + // Dont advertize v2 for now + abilities: Cow::Borrowed(&[ + Cow::Borrowed("transfer-v1"), /* Cow::Borrowed("transfer-v2") */ + ]), + transfer_v2: Some(AppVersionTransferV2Hint::new()), } } #[allow(dead_code)] fn supports_v2(&self) -> bool { - false - // self.abilities.contains(&"transfer-v2".into()) + self.abilities.contains(&"transfer-v2".into()) } } @@ -158,14 +166,14 @@ impl Default for AppVersion { // #[serde(rename_all = "kebab-case")] // pub struct AppVersionTransferV2Hint { // supported_formats: Vec>, -// transit_abilities: Vec, +// transit_abilities: Vec, // } // impl AppVersionTransferV2Hint { // const fn new() -> Self { // Self { // supported_formats: vec![Cow::Borrowed("tar.zst")], -// transit_abilities: transit::Ability::all_abilities(), +// transit_abilities: Ability::all_abilities(), // } // } // } @@ -176,156 +184,678 @@ impl Default for AppVersion { // } // } -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "kebab-case")] -struct TransitAck { - pub ack: String, - pub sha256: String, +pub struct AppVersionTransferV2Hint { + supported_formats: Cow<'static, [Cow<'static, str>]>, + transit_abilities: transit::Abilities, } -impl TransitAck { - pub fn new(msg: impl Into, sha256: impl Into) -> Self { - TransitAck { - ack: msg.into(), - sha256: sha256.into(), +impl AppVersionTransferV2Hint { + const fn new() -> Self { + Self { + supported_formats: Cow::Borrowed(&[Cow::Borrowed("plain"), Cow::Borrowed("tar")]), + transit_abilities: transit::Abilities::ALL_ABILITIES, } } +} + +impl Default for AppVersionTransferV2Hint { + fn default() -> Self { + Self::new() + } +} + +pub trait AsyncReadSeek: AsyncRead + AsyncSeek {} + +impl AsyncReadSeek for T where T: AsyncRead + AsyncSeek {} + +pub trait AsyncWriteSeek: AsyncWrite + AsyncSeek {} + +impl AsyncWriteSeek for T where T: AsyncWrite + AsyncSeek {} + +/** + * The type of message exchanged over the wormhole for this protocol + */ +#[derive(Deserialize, Serialize, derive_more::Display, Debug, Clone)] +#[serde(rename_all = "kebab-case")] +#[non_exhaustive] +pub enum PeerMessage { + /* V1 */ + #[display(fmt = "transit")] + Transit(v1::TransitV1), + #[display(fmt = "offer")] + Offer(v1::OfferMessage), + #[display(fmt = "answer")] + Answer(v1::AnswerMessage), + /* V2 */ + #[display(fmt = "transit-v2")] + TransitV2(v2::TransitV2), + + /** Tell the other side you got an error */ + #[display(fmt = "error")] + Error(String), + #[display(fmt = "unknown")] + #[serde(other)] + Unknown, +} + +impl PeerMessage { + #[allow(unused)] + fn offer_message_v1(msg: impl Into) -> Self { + PeerMessage::Offer(v1::OfferMessage::Message(msg.into())) + } - #[cfg(test)] - pub fn serialize(&self) -> String { - json!(self).to_string() + fn offer_file_v1(name: impl Into, size: u64) -> Self { + PeerMessage::Offer(v1::OfferMessage::File { + filename: name.into(), + filesize: size, + }) + } + + #[allow(dead_code)] + fn offer_directory_v1( + name: impl Into, + mode: impl Into, + compressed_size: u64, + numbytes: u64, + numfiles: u64, + ) -> Self { + PeerMessage::Offer(v1::OfferMessage::Directory { + dirname: name.into(), + mode: mode.into(), + zipsize: compressed_size, + numbytes, + numfiles, + }) } - pub fn serialize_vec(&self) -> Vec { + #[allow(dead_code)] + fn message_ack_v1(msg: impl Into) -> Self { + PeerMessage::Answer(v1::AnswerMessage::MessageAck(msg.into())) + } + + fn file_ack_v1(msg: impl Into) -> Self { + PeerMessage::Answer(v1::AnswerMessage::FileAck(msg.into())) + } + + fn error_message(msg: impl Into) -> Self { + PeerMessage::Error(msg.into()) + } + + fn transit_v1(abilities: TransitAbilities, hints: transit::Hints) -> Self { + PeerMessage::Transit(v1::TransitV1 { + abilities_v1: abilities, + hints_v1: hints, + }) + } + + fn transit_v2(hints_v2: transit::Hints) -> Self { + PeerMessage::TransitV2(v2::TransitV2 { hints_v2 }) + } + + fn check_err(&self) -> Result { + match self { + Self::Error(err) => Err(TransferError::PeerError(err.clone())), + other => Ok(other.clone()), + } + } + + #[allow(dead_code)] + fn ser_json(&self) -> Vec { serde_json::to_vec(self).unwrap() } } -pub async fn send_file_or_folder( +pub type OfferSend = Offer; + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(bound(deserialize = "T: Default"))] +pub struct Offer { + content: BTreeMap>, +} + +impl OfferSend { + /// Offer a single path (file or folder) + #[cfg(not(target_family = "wasm"))] + pub async fn new_file_or_folder( + offer_name: String, + path: impl AsRef, + ) -> std::io::Result { + let path = path.as_ref(); + log::trace!( + "OfferSend::new_file_or_folder: {offer_name}, {}", + path.display() + ); + let mut content = BTreeMap::new(); + content.insert(offer_name, OfferSendEntry::new(path).await?); + Ok(Self { content }) + } + + /// Offer list of paths (files and folders) + /// Panics if any of the paths does not have a name (like `/`). + /// Panics if any two or more of the paths have the same name. + #[cfg(not(target_family = "wasm"))] + pub async fn new_paths(paths: impl IntoIterator) -> std::io::Result { + let mut content = BTreeMap::new(); + for path in paths { + let offer_name = path.file_name().expect("Path must have a name"); + let offer_name = offer_name + .to_str() + .ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::Other, + format!( + "{} is not UTF-8 encoded", + (offer_name.as_ref() as &Path).display() + ), + ) + })? + .to_owned(); + let old = content.insert(offer_name, OfferSendEntry::new(path).await?); + assert!(old.is_none(), "Duplicate names found"); + } + Ok(Self { content }) + } + + /// Offer a single file with custom content + /// + /// You must ensure that the Reader contains exactly as many bytes + /// as advertized in file_size. + pub fn new_file_custom(offer_name: String, size: u64, content: OfferContent) -> Self { + let mut content_ = BTreeMap::new(); + content_.insert(offer_name, OfferSendEntry::RegularFile { size, content }); + Self { content: content_ } + } +} + +impl Offer { + pub fn top_level_paths(&self) -> impl Iterator + '_ { + self.content.keys() + } + + pub fn get(&self, path: &[String]) -> Option<&OfferEntry> { + match path { + [] => None, + [start, rest @ ..] => self.content.get(start).and_then(|inner| inner.get(rest)), + } + } + + pub fn get_file(&self, path: &[String]) -> Option<(&T, u64)> { + match path { + [] => None, + [start, rest @ ..] => self + .content + .get(start) + .and_then(|inner| inner.get_file(rest)), + } + } + + /** Recursively list all file paths, without directory names or symlinks. */ + pub fn iter_file_paths(&self) -> impl Iterator> + '_ { + self.iter_files().map(|val| val.0) + } + + /** Recursively list all files, without directory names or symlinks. */ + pub fn iter_files(&self) -> impl Iterator, &T, u64)> + '_ { + self.content.iter().flat_map(|(name, offer)| { + let name = name.clone(); + offer.iter_files().map(move |mut val| { + val.0.insert(0, name.clone()); + val + }) + }) + } + + pub fn total_size(&self) -> u64 { + self.iter_files().map(|v| v.2).sum() + } + + #[cfg(not(target_family = "wasm"))] + pub fn accept_all(&self, target_dir: &Path) -> OfferAccept { + self.set_content(|path| { + let full_path: PathBuf = target_dir.join(path.join("/")); + let content = new_accept_content(move |append| { + let full_path = full_path.clone(); + async_std::fs::OpenOptions::new() + .write(true) + .create(true) + .append(append) + .truncate(!append) + .open(full_path) + }); + AcceptInner { + content: Box::new(content) as _, + offset: 0, + sha256: None, + } + }) + } + + #[cfg(not(target_family = "wasm"))] + pub async fn create_directories(&self, target_path: &Path) -> std::io::Result<()> { + // TODO this could be made more efficient by passing around just one buffer + for (name, file) in &self.content { + file.create_directories(&target_path.join(name)).await?; + } + Ok(()) + } + + // #[cfg(not(target_family = "wasm"))] + // pub async fn create_symlinks(&self, target_path: &Path) -> std::io::Result<()> { + // // TODO this could be made more efficient by passing around just one buffer + // for (name, file) in &self.content { + // file.create_symlinks(&target_path.join(name)).await?; + // } + // Ok(()) + // } + + pub fn offer_name(&self) -> String { + let (name, entry) = self.content.iter().next().unwrap(); + if self.is_multiple() { + format!( + "{name} and {} other files or directories", + self.content.len() - 1 + ) + } else { + if self.is_directory() { + let count = entry.iter_files().count(); + format!("{name} with {count} files inside") + } else { + name.clone() + } + } + } + + pub fn is_multiple(&self) -> bool { + self.content.len() > 1 + } + + pub fn is_directory(&self) -> bool { + self.is_multiple() + || self + .content + .values() + .any(|f| matches!(f, OfferEntry::Directory { .. })) + } + + pub fn set_content(&self, mut f: impl FnMut(&[String]) -> U) -> Offer { + Offer { + content: self + .content + .iter() + .map(|(k, v)| (k.clone(), v.set_content(&mut vec![k.clone()], &mut f))) + .collect(), + } + } +} + +impl Offer { + /** Recursively list all files, without directory names or symlinks. */ + pub fn into_iter_files(self) -> impl Iterator, T, u64)> + Send { + self.content.into_iter().flat_map(|(name, offer)| { + offer.into_iter_files().map(move |mut val| { + val.0.insert(0, name.clone()); + val + }) + }) + } +} + +impl From<&Offer> for Offer { + fn from(from: &Offer) -> Self { + from.set_content(|_| ()) + } +} + +/// The signature is basically just `() -> io::Result`, but in async +/// +/// This may be called multiple times during the send process, an imlementations that generate their +/// output dynamically must ensure all invocations produce the same result — independently of each other +/// (things may be concurrent). +pub type OfferContent = Box< + dyn Fn() -> futures::future::BoxFuture< + 'static, + std::io::Result>, + > + Send, +>; + +pub fn new_offer_content(content: F) -> OfferContent +where + F: Fn() -> G + Send + 'static, + G: Future> + Send + 'static, + H: AsyncReadSeek + Unpin + Send + 'static, +{ + let wrap_fun = move || { + use futures::TryFutureExt; + + let fut = content(); + let wrap_fut = fut.map_ok(|read| Box::new(read) as Box); + + Box::pin(wrap_fut) as futures::future::BoxFuture<'static, _> + }; + Box::new(wrap_fun) as _ +} + +pub type OfferSendEntry = OfferEntry; + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(rename_all = "kebab-case")] +#[serde(tag = "type")] +#[serde(bound(deserialize = "T: Default"))] +pub enum OfferEntry { + RegularFile { + size: u64, + #[serde(skip)] + content: T, + }, + Directory { + content: BTreeMap, + }, + // Symlink { + // target: String, + // }, +} + +impl OfferSendEntry { + #[cfg(not(target_family = "wasm"))] + async fn new(path: impl AsRef) -> std::io::Result { + // Workaround for https://github.com/rust-lang/rust/issues/78649 + #[inline(always)] + fn new_recurse<'a>( + path: impl AsRef + 'a + Send, + ) -> futures::future::BoxFuture<'a, std::io::Result> { + Box::pin(OfferSendEntry::new(path)) + } + + let path = path.as_ref(); + // let metadata = async_std::fs::symlink_metadata(path).await?; + let metadata = async_std::fs::metadata(path).await?; + // let mtime = metadata.modified()? + // .duration_since(std::time::SystemTime::UNIX_EPOCH) + // .unwrap_or_default() + // .as_secs(); + if metadata.is_file() { + log::trace!("OfferSendEntry::new {path:?} is file"); + let path = path.to_owned(); + Ok(Self::RegularFile { + size: metadata.len(), + content: new_offer_content(move || { + let path = path.clone(); + async_std::fs::File::open(path) + }), + }) + // } else if metadata.is_symlink() { + // log::trace!("OfferSendEntry::new {path:?} is symlink"); + // let target = async_std::fs::read_link(path).await?; + // Ok(Self::Symlink { + // target: target + // .to_str() + // .ok_or_else(|| { + // std::io::Error::new( + // std::io::ErrorKind::Other, + // format!("{} is not UTF-8 encoded", target.display()), + // ) + // })? + // .to_string(), + // }) + } else if metadata.is_dir() { + use futures::TryStreamExt; + log::trace!("OfferSendEntry::new {path:?} is directory"); + + let content: BTreeMap = async_std::fs::read_dir(path) + .await? + .and_then(|file| async move { + let path = file.path(); + let name = path + .file_name() + .expect("Internal error: non-root paths should always have a name") + .to_str() + .ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::Other, + format!("{} is not UTF-8 encoded", path.display()), + ) + })? + .to_owned(); + let offer = new_recurse(path).await?; + Ok((name, offer)) + }) + .try_collect() + .await?; + Ok(Self::Directory { content }) + } else { + unreachable!() + } + } +} + +impl OfferEntry { + /** Recursively list all files, without directory names or symlinks. */ + fn iter_files(&self) -> impl Iterator, &T, u64)> + '_ { + // TODO I couldn't think up a less efficient way to do this ^^ + match self { + Self::Directory { content, .. } => { + let iter = content.iter().flat_map(|(name, offer)| { + let name = name.clone(); + offer.iter_files().map(move |mut val| { + val.0.insert(0, name.clone()); + val + }) + }); + Box::new(iter) as Box> + }, + Self::RegularFile { content, size } => { + Box::new(std::iter::once((vec![], content, *size))) as Box> + }, + // Self::Symlink { .. } => Box::new(std::iter::empty()) as Box>, + } + } + + fn get(&self, path: &[String]) -> Option<&Self> { + match path { + [] => Some(self), + [start, rest @ ..] => match self { + Self::Directory { content, .. } => { + content.get(start).and_then(|inner| inner.get(rest)) + }, + _ => None, + }, + } + } + + fn get_file(&self, path: &[String]) -> Option<(&T, u64)> { + match path { + [] => match self { + Self::RegularFile { content, size } => Some((content, *size)), + _ => None, + }, + [start, rest @ ..] => match self { + Self::Directory { content, .. } => { + content.get(start).and_then(|inner| inner.get_file(rest)) + }, + _ => None, + }, + } + } + + #[cfg(not(target_family = "wasm"))] + async fn create_directories(&self, target_path: &Path) -> std::io::Result<()> { + #[inline(always)] + fn recurse<'a, T>( + this: &'a OfferEntry, + path: &'a Path, + ) -> futures::future::LocalBoxFuture<'a, std::io::Result<()>> { + Box::pin(OfferEntry::create_directories(this, path)) + } + match self { + Self::Directory { content, .. } => { + async_std::fs::create_dir(target_path).await?; + for (name, file) in content { + recurse(file, &target_path.join(name)).await?; + } + Ok(()) + }, + _ => Ok(()), + } + } + + // #[cfg(not(target_family = "wasm"))] + // async fn create_symlinks(&self, target_path: &Path) -> std::io::Result<()> { + // #[inline(always)] + // fn recurse<'a, T>( + // this: &'a OfferEntry, + // path: &'a Path, + // ) -> futures::future::LocalBoxFuture<'a, std::io::Result<()>> { + // Box::pin(OfferEntry::create_symlinks(this, path)) + // } + // match self { + // Self::Symlink { target } => { + // todo!() + // }, + // Self::Directory { content, .. } => { + // for (name, file) in content { + // recurse(file, &target_path.join(name)).await?; + // } + // Ok(()) + // }, + // _ => Ok(()), + // } + // } + + fn set_content( + &self, + base_path: &mut Vec, + f: &mut impl FnMut(&[String]) -> U, + ) -> OfferEntry { + match self { + OfferEntry::RegularFile { size, .. } => OfferEntry::RegularFile { + size: *size, + content: f(&base_path), + }, + OfferEntry::Directory { content } => OfferEntry::Directory { + content: content + .into_iter() + .map(|(k, v)| { + base_path.push(k.clone()); + let v = v.set_content(base_path, f); + base_path.pop(); + (k.clone(), v) + }) + .collect(), + }, + // OfferEntry::Symlink { target } => OfferEntry::Symlink { + // target: target.clone(), + // }, + } + } +} + +impl OfferEntry { + /** Recursively list all files, without directory names or symlinks. */ + fn into_iter_files(self) -> impl Iterator, T, u64)> + Send { + // TODO I couldn't think up a less efficient way to do this ^^ + match self { + Self::Directory { content, .. } => { + let iter = content.into_iter().flat_map(|(name, offer)| { + offer.into_iter_files().map(move |mut val| { + val.0.insert(0, name.clone()); + val + }) + }); + Box::new(iter) as Box + Send> + }, + Self::RegularFile { content, size } => { + Box::new(std::iter::once((vec![], content, size))) + as Box + Send> + }, + // Self::Symlink { .. } => { + // Box::new(std::iter::empty()) as Box + Send> + // }, + } + } +} + +impl From<&OfferEntry> for OfferEntry { + fn from(from: &OfferEntry) -> Self { + /* Note: this violates some invariants and only works because our mapper discards the path argument */ + from.set_content(&mut vec![], &mut |_| ()) + } +} + +/// The signature is basically just `bool -> io::Result`, but in async +/// +/// The boolean parameter dictates whether we start from scratch or not: +/// true: Append to existing files +/// false: Truncate if necessary +pub type AcceptContent = Box< + dyn FnOnce( + bool, + ) -> futures::future::BoxFuture< + 'static, + std::io::Result>, + > + Send, +>; + +pub fn new_accept_content(content: F) -> AcceptContent +where + F: Fn(bool) -> G + Send + 'static, + G: Future> + Send + 'static, + H: AsyncWrite + Unpin + Send + 'static, +{ + let wrap_fun = move |append| { + use futures::TryFutureExt; + + let fut = content(append); + let wrap_fut = fut.map_ok(|write| Box::new(write) as Box); + + Box::pin(wrap_fut) as futures::future::BoxFuture<'static, _> + }; + Box::new(wrap_fun) as _ +} + +pub type OfferAccept = Offer; + +pub struct AcceptInner { + pub offset: u64, + pub sha256: Option<[u8; 32]>, + pub content: AcceptContent, +} + +pub async fn send( wormhole: Wormhole, relay_hints: Vec, - file_path: N, - file_name: M, transit_abilities: transit::Abilities, - transit_handler: G, - progress_handler: H, + offer: OfferSend, + transit_handler: impl FnOnce(transit::TransitInfo), + progress_handler: impl FnMut(u64, u64) + 'static, cancel: impl Future, -) -> Result<(), TransferError> -where - N: AsRef, - M: AsRef, - G: FnOnce(transit::TransitInfo, std::net::SocketAddr), - H: FnMut(u64, u64) + 'static, -{ - use async_std::fs::File; - let file_path = file_path.as_ref(); - let file_name = file_name.as_ref(); - - let mut file = File::open(file_path).await?; - let metadata = file.metadata().await?; - if metadata.is_dir() { - send_folder( +) -> Result<(), TransferError> { + let peer_version: AppVersion = serde_json::from_value(wormhole.peer_version().clone())?; + if peer_version.supports_v2() { + v2::send( wormhole, relay_hints, - file_path, - file_name, transit_abilities, - transit_handler, + offer, progress_handler, + peer_version, cancel, ) - .await?; + .await } else { - let file_size = metadata.len(); - send_file( + v1::send( wormhole, relay_hints, - &mut file, - file_name, - file_size, transit_abilities, - transit_handler, + offer, progress_handler, + transit_handler, + peer_version, cancel, ) - .await?; + .await } - Ok(()) -} - -/// Send a file to the other side -/// -/// You must ensure that the Reader contains exactly as many bytes -/// as advertized in file_size. -pub async fn send_file( - wormhole: Wormhole, - relay_hints: Vec, - file: &mut F, - file_name: N, - file_size: u64, - transit_abilities: transit::Abilities, - transit_handler: G, - progress_handler: H, - cancel: impl Future, -) -> Result<(), TransferError> -where - F: AsyncRead + Unpin, - N: Into, - G: FnOnce(transit::TransitInfo, std::net::SocketAddr), - H: FnMut(u64, u64) + 'static, -{ - let _peer_version: AppVersion = serde_json::from_value(wormhole.peer_version.clone())?; - // if peer_version.supports_v2() && false { - // v2::send_file(wormhole, relay_url, file, file_name, file_size, progress_handler, peer_version).await - // } else { - // log::info!("TODO"); - v1::send_file( - wormhole, - relay_hints, - file, - file_name, - file_size, - transit_abilities, - transit_handler, - progress_handler, - cancel, - ) - .await - // } -} - -/// Send a folder to the other side -/// -/// This isn't a proper folder transfer as per the Wormhole protocol -/// because it sends it in a way so that the receiver still has to manually -/// unpack it. But it's better than nothing -pub async fn send_folder( - wormhole: Wormhole, - relay_hints: Vec, - folder_path: N, - folder_name: M, - transit_abilities: transit::Abilities, - transit_handler: G, - progress_handler: H, - cancel: impl Future, -) -> Result<(), TransferError> -where - N: Into, - M: Into, - G: FnOnce(transit::TransitInfo, std::net::SocketAddr), - H: FnMut(u64, u64) + 'static, -{ - v1::send_folder( - wormhole, - relay_hints, - folder_path, - folder_name, - transit_abilities, - transit_handler, - progress_handler, - cancel, - ) - .await } /** @@ -336,300 +866,119 @@ where * * Returns `None` if the task got cancelled. */ -pub async fn request_file( - mut wormhole: Wormhole, +pub async fn request( + wormhole: Wormhole, relay_hints: Vec, transit_abilities: transit::Abilities, cancel: impl Future, ) -> Result, TransferError> { - // Error handling - let run = Box::pin(async { - let connector = transit::init(transit_abilities, None, relay_hints).await?; - - // send the transit message - debug!("Sending transit message '{:?}", connector.our_hints()); - wormhole - .send_json(&PeerMessage::transit( - *connector.our_abilities(), - (**connector.our_hints()).clone(), - )) - .await?; - - // receive transit message - let (their_abilities, their_hints): (transit::Abilities, transit::Hints) = - match wormhole.receive_json().await?? { - PeerMessage::Transit(transit) => { - debug!("received transit message: {:?}", transit); - (transit.abilities_v1, transit.hints_v1) - }, - PeerMessage::Error(err) => { - bail!(TransferError::PeerError(err)); - }, - other => { - bail!(TransferError::unexpected_message("transit", other)); - }, - }; - - // 3. receive file offer message from peer - let (filename, filesize) = match wormhole.receive_json().await?? { - PeerMessage::Offer(offer_type) => match offer_type { - Offer::File { filename, filesize } => (filename, filesize), - Offer::Directory { - mut dirname, - zipsize, - .. - } => { - dirname.set_extension("zip"); - (dirname, zipsize) - }, - _ => bail!(TransferError::UnsupportedOffer), - }, - PeerMessage::Error(err) => { - bail!(TransferError::PeerError(err)); - }, - other => { - bail!(TransferError::unexpected_message("offer", other)); - }, - }; - - Ok((filename, filesize, connector, their_abilities, their_hints)) - }); - - match crate::util::cancellable(run, cancel).await { - Ok(Ok((filename, filesize, connector, their_abilities, their_hints))) => { - Ok(Some(ReceiveRequest { - wormhole, - filename, - filesize, - connector, - their_abilities, - their_hints: Arc::new(their_hints), - })) - }, - Ok(Err(error @ TransferError::PeerError(_))) => Err(error), - Ok(Err(error)) => { - let _ = wormhole - .send_json(&PeerMessage::Error(format!("{}", error))) - .await; - Err(error) - }, - Err(cancelled) => { - let _ = wormhole - .send_json(&PeerMessage::Error(format!("{}", cancelled))) - .await; - Ok(None) - }, + let peer_version: AppVersion = serde_json::from_value(wormhole.peer_version().clone())?; + if peer_version.supports_v2() { + v2::request( + wormhole, + relay_hints, + peer_version, + transit_abilities, + cancel, + ) + .await + .map(|req| req.map(ReceiveRequest::V2)) + } else { + v1::request(wormhole, relay_hints, transit_abilities, cancel) + .await + .map(|req| req.map(ReceiveRequest::V1)) } } /** * A pending files send offer from the other side * - * You *should* consume this object, either by calling [`accept`](ReceiveRequest::accept) or [`reject`](ReceiveRequest::reject). + * You *should* consume this object, by matching on the protocol version and then calling either `accept` or `reject`. */ #[must_use] -pub struct ReceiveRequest { - wormhole: Wormhole, - connector: TransitConnector, - /// **Security warning:** this is untrusted and unverified input - pub filename: PathBuf, - pub filesize: u64, - their_abilities: transit::Abilities, - their_hints: Arc, +pub enum ReceiveRequest { + V1(ReceiveRequestV1), + V2(ReceiveRequestV2), } -impl ReceiveRequest { - /** - * Accept the file offer - * - * This will transfer the file and save it on disk. - */ - pub async fn accept( - mut self, - transit_handler: G, - progress_handler: F, - content_handler: &mut W, - cancel: impl Future, - ) -> Result<(), TransferError> - where - F: FnMut(u64, u64) + 'static, - G: FnOnce(transit::TransitInfo, std::net::SocketAddr), - W: AsyncWrite + Unpin, - { - let run = Box::pin(async { - // send file ack. - debug!("Sending ack"); - self.wormhole - .send_json(&PeerMessage::file_ack("ok")) - .await?; - - let (mut transit, info, addr) = self - .connector - .follower_connect( - self.wormhole - .key() - .derive_transit_key(self.wormhole.appid()), - self.their_abilities, - self.their_hints.clone(), - ) - .await?; - transit_handler(info, addr); - - debug!("Beginning file transfer"); - v1::tcp_file_receive( - &mut transit, - self.filesize, - progress_handler, - content_handler, - ) - .await?; - Ok(()) - }); - - futures::pin_mut!(cancel); - let result = crate::util::cancellable_2(run, cancel).await; - handle_run_result(self.wormhole, result).await - } - - /** - * Reject the file offer - * - * This will send an error message to the other side so that it knows the transfer failed. - */ - pub async fn reject(mut self) -> Result<(), TransferError> { - self.wormhole - .send_json(&PeerMessage::error_message("transfer rejected")) - .await?; - self.wormhole.close().await?; +#[cfg(test)] +mod test { + use super::*; + use transit::{Abilities, DirectHint, RelayHint}; - Ok(()) + #[test] + fn test_transit() { + let abilities = Abilities::ALL_ABILITIES; + let hints = transit::Hints::new( + [DirectHint::new("192.168.1.8", 46295)], + [RelayHint::new( + None, + [DirectHint::new("magic-wormhole-transit.debian.net", 4001)], + [], + )], + ); + assert_eq!( + serde_json::json!(crate::transfer::PeerMessage::transit_v1(abilities, hints)), + serde_json::json!({ + "transit": { + "abilities-v1": [{"type":"direct-tcp-v1"},{"type":"relay-v1"}], + "hints-v1": [ + {"hostname":"192.168.1.8","port":46295,"type":"direct-tcp-v1"}, + { + "type": "relay-v1", + "hints": [ + {"type": "direct-tcp-v1", "hostname": "magic-wormhole-transit.debian.net", "port": 4001} + ], + "name": null + } + ], + } + }) + ); } -} -/// Maximum duration that we are willing to wait for cleanup tasks to finish -const SHUTDOWN_TIME: std::time::Duration = std::time::Duration::from_secs(5); + #[test] + fn test_message() { + let m1 = PeerMessage::offer_message_v1("hello from rust"); + assert_eq!( + serde_json::json!(m1).to_string(), + "{\"offer\":{\"message\":\"hello from rust\"}}" + ); + } -/** Handle the post-{transfer, failure, cancellation} logic */ -async fn handle_run_result( - mut wormhole: Wormhole, - result: Result<(Result<(), TransferError>, impl Future), crate::util::Cancelled>, -) -> Result<(), TransferError> { - async fn wrap_timeout(run: impl Future, cancel: impl Future) { - let run = async_std::future::timeout(SHUTDOWN_TIME, run); - futures::pin_mut!(run); - match crate::util::cancellable(run, cancel).await { - Ok(Ok(())) => {}, - Ok(Err(_timeout)) => log::debug!("Post-transfer timed out"), - Err(_cancelled) => log::debug!("Post-transfer got cancelled by user"), - }; - } - - /// Ignore an error but at least debug print it - fn debug_err(result: Result<(), WormholeError>, operation: &str) { - if let Err(error) = result { - log::debug!("Failed to {} after transfer: {}", operation, error); - } + #[test] + fn test_offer_file() { + let f1 = PeerMessage::offer_file_v1("somefile.txt", 34556); + assert_eq!( + serde_json::json!(f1).to_string(), + "{\"offer\":{\"file\":{\"filename\":\"somefile.txt\",\"filesize\":34556}}}" + ); } - match result { - /* Happy case: everything went okay */ - Ok((Ok(()), cancel)) => { - log::debug!("Transfer done, doing cleanup logic"); - wrap_timeout( - async { - debug_err(wormhole.close().await, "close Wormhole"); - }, - cancel, - ) - .await; - Ok(()) - }, - /* Got peer error: stop everything immediately */ - Ok((Err(error @ TransferError::PeerError(_)), cancel)) => { - log::debug!( - "Transfer encountered an error ({}), doing cleanup logic", - error - ); - wrap_timeout( - async { - debug_err(wormhole.close().await, "close Wormhole"); - }, - cancel, - ) - .await; - Err(error) - }, - /* Got transit error: try receive peer error for better error message */ - Ok((Err(mut error @ TransferError::Transit(_)), cancel)) => { - log::debug!( - "Transfer encountered an error ({}), doing cleanup logic", - error - ); - wrap_timeout(async { - /* If transit failed, ask for a proper error and potentially use that instead */ - // TODO this should be replaced with some try_receive that only polls already available messages, - // and we should not only look for the next one but all have been received - // and we should not interrupt a receive operation without making sure it leaves the connection - // in a consistent state, otherwise the shutdown may cause protocol errors - if let Ok(Ok(Ok(PeerMessage::Error(e)))) = async_std::future::timeout(SHUTDOWN_TIME / 3, wormhole.receive_json()).await { - error = TransferError::PeerError(e); - } else { - log::debug!("Failed to retrieve more specific error message from peer. Maybe it crashed?"); - } - debug_err(wormhole.close().await, "close Wormhole"); - }, cancel).await; - Err(error) - }, - /* Other error: try to notify peer */ - Ok((Err(error), cancel)) => { - log::debug!( - "Transfer encountered an error ({}), doing cleanup logic", - error - ); - wrap_timeout( - async { - debug_err( - wormhole - .send_json(&PeerMessage::Error(format!("{}", error))) - .await, - "notify peer about the error", - ); - debug_err(wormhole.close().await, "close Wormhole"); - }, - cancel, - ) - .await; - Err(error) - }, - /* Cancelled: try to notify peer */ - Err(cancelled) => { - log::debug!("Transfer got cancelled, doing cleanup logic"); - /* Replace cancel with ever-pending future, as we have already been cancelled */ - wrap_timeout( - async { - debug_err( - wormhole - .send_json(&PeerMessage::Error(format!("{}", cancelled))) - .await, - "notify peer about our cancellation", - ); - debug_err(wormhole.close().await, "close Wormhole"); - }, - futures::future::pending(), - ) - .await; - Ok(()) - }, + #[test] + fn test_offer_directory() { + let d1 = PeerMessage::offer_directory_v1("somedirectory", "zipped", 45, 1234, 10); + assert_eq!( + serde_json::json!(d1).to_string(), + "{\"offer\":{\"directory\":{\"dirname\":\"somedirectory\",\"mode\":\"zipped\",\"numbytes\":1234,\"numfiles\":10,\"zipsize\":45}}}" + ); } -} -#[cfg(test)] -mod test { - use super::*; + #[test] + fn test_message_ack() { + let m1 = PeerMessage::message_ack_v1("ok"); + assert_eq!( + serde_json::json!(m1).to_string(), + "{\"answer\":{\"message_ack\":\"ok\"}}" + ); + } #[test] - fn test_transit_ack() { - let f1 = TransitAck::new("ok", "deadbeaf"); - assert_eq!(f1.serialize(), "{\"ack\":\"ok\",\"sha256\":\"deadbeaf\"}"); + fn test_file_ack() { + let f1 = PeerMessage::file_ack_v1("ok"); + assert_eq!( + serde_json::json!(f1).to_string(), + "{\"answer\":{\"file_ack\":\"ok\"}}" + ); } } diff --git a/src/transfer/cancel.rs b/src/transfer/cancel.rs new file mode 100644 index 00000000..bb99ddbd --- /dev/null +++ b/src/transfer/cancel.rs @@ -0,0 +1,293 @@ +/// Various helpers to deal with closing connections and cancellation +use super::*; +use crate::util; +use futures::Future; + +/// A weird mixture of [`futures::future::Abortable`], [`async_std::sync::Condvar`] and [`futures::future::Select`] tailored to our Ctrl+C handling. +/// +/// At it's core, it is an `Abortable` but instead of having an `AbortHandle`, we use a future that resolves as trigger. +/// Under the hood, it is implementing the same functionality as a `select`, but mapping one of the outcomes to an error type. +pub async fn cancellable( + future: impl Future + Unpin, + cancel: impl Future, +) -> Result { + use futures::future::Either; + futures::pin_mut!(cancel); + match futures::future::select(cancel, future).await { + Either::Left(((), _)) => Err(Cancelled), + Either::Right((val, _)) => Ok(val), + } +} + +/** Like `cancellable`, but you'll get back the cancellation future in case the code terminates for future use */ +pub async fn cancellable_2 + Unpin>( + future: impl Future + Unpin, + cancel: C, +) -> Result<(T, C), Cancelled> { + use futures::future::Either; + match futures::future::select(cancel, future).await { + Either::Left(((), _)) => Err(Cancelled), + Either::Right((val, cancel)) => Ok((val, cancel)), + } +} + +/// Indicator that the [`Cancellable`] task was cancelled. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct Cancelled; + +impl std::fmt::Display for Cancelled { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Task has been cancelled") + } +} + +/// Maximum duration that we are willing to wait for cleanup tasks to finish +const SHUTDOWN_TIME: std::time::Duration = std::time::Duration::from_secs(5); + +// TODO make function once possible (Rust language limitations etc.) +macro_rules! with_cancel_wormhole { + ($wormhole:ident, run = $run:expr, $cancel:expr, ret_cancel = $ret_cancel:expr $(,)?) => {{ + let run = Box::pin($run); + let result = cancel::cancellable_2(run, $cancel).await; + let Some((transit, wormhole, cancel)) = cancel::handle_run_result_noclose($wormhole, result).await? else { return Ok($ret_cancel); }; + (transit, wormhole, cancel) + }}; +} + +// Make macro public +pub(super) use with_cancel_wormhole; + +// Rustfmt has a bug where it will indent a few lines again and again and again and again and again anda +#[rustfmt::skip] +macro_rules! with_cancel_transit { + ($transit:ident, run = $run:expr, $cancel:expr, $make_error_message:expr, $parse_message:expr, ret_cancel = $ret_cancel:expr $(,)?) => {{ + let run = Box::pin($run); + let result = cancel::cancellable_2(run, $cancel).await; + let Some((value, transit)) = cancel::handle_run_result_transit( + $transit, + result, + $make_error_message, + $parse_message, + ).await? else { return Ok($ret_cancel); }; + (value, transit) + }}; +} + +// Make macro public +pub(super) use with_cancel_transit; + +/// Run a future with timeout and cancellation, ignore errors +async fn wrap_timeout(run: impl Future, cancel: impl Future) { + let run = util::timeout(SHUTDOWN_TIME, run); + futures::pin_mut!(run); + match cancellable(run, cancel).await { + Ok(Ok(())) => {}, + Ok(Err(_timeout)) => log::debug!("Post-transfer timed out"), + Err(_cancelled) => log::debug!("Post-transfer got cancelled by user"), + }; +} + +/// Ignore an error but at least debug print it +fn debug_err(result: Result<(), impl std::fmt::Display>, operation: &str) { + if let Err(error) = result { + log::debug!("Failed to {} after transfer: {}", operation, error); + } +} + +/** Handle the post-{transfer, failure, cancellation} logic, then close the Wormhole */ +pub async fn handle_run_result( + wormhole: Wormhole, + result: Result<(Result<(), TransferError>, impl Future), Cancelled>, +) -> Result<(), TransferError> { + match handle_run_result_noclose(wormhole, result).await { + Ok(Some(((), mut wormhole, cancel))) => { + /* Happy case: everything went okay. Now close the wormholhe */ + log::debug!("Transfer done, doing cleanup logic"); + wrap_timeout( + async { + debug_err(wormhole.close().await, "close Wormhole"); + }, + cancel, + ) + .await; + Ok(()) + }, + Ok(None) => Ok(()), + Err(e) => Err(e), + } +} + +/** Handle the post-{transfer, failure, cancellation} logic */ +pub async fn handle_run_result_noclose>( + mut wormhole: Wormhole, + result: Result<(Result, C), Cancelled>, +) -> Result, TransferError> { + match result { + /* Happy case: everything went okay */ + Ok((Ok(val), cancel)) => Ok(Some((val, wormhole, cancel))), + /* Got peer error: stop everything immediately */ + Ok((Err(error @ TransferError::PeerError(_)), cancel)) => { + log::debug!( + "Transfer encountered an error ({}), doing cleanup logic", + error + ); + wrap_timeout( + async { + debug_err(wormhole.close().await, "close Wormhole"); + }, + cancel, + ) + .await; + Err(error) + }, + /* Got transit error: try to receive peer error for better error message */ + Ok((Err(mut error @ TransferError::Transit(_)), cancel)) => { + log::debug!( + "Transfer encountered an error ({}), doing cleanup logic", + error + ); + wrap_timeout(async { + /* If transit failed, ask for a proper error and potentially use that instead */ + // TODO this should be replaced with some try_receive that only polls already available messages, + // and we should not only look for the next one but all have been received + // and we should not interrupt a receive operation without making sure it leaves the connection + // in a consistent state, otherwise the shutdown may cause protocol errors + if let Ok(Ok(Ok(PeerMessage::Error(e)))) = util::timeout(SHUTDOWN_TIME / 3, wormhole.receive_json()).await { + error = TransferError::PeerError(e); + } else { + log::debug!("Failed to retrieve more specific error message from peer. Maybe it crashed?"); + } + debug_err(wormhole.close().await, "close Wormhole"); + }, cancel).await; + Err(error) + }, + /* Other error: try to notify peer */ + Ok((Err(error), cancel)) => { + log::debug!( + "Transfer encountered an error ({}), doing cleanup logic", + error + ); + wrap_timeout( + async { + debug_err( + wormhole + .send_json(&PeerMessage::Error(format!("{}", error))) + .await, + "notify peer about the error", + ); + debug_err(wormhole.close().await, "close Wormhole"); + }, + cancel, + ) + .await; + Err(error) + }, + /* Cancelled: try to notify peer */ + Err(cancelled) => { + log::debug!("Transfer got cancelled, doing cleanup logic"); + /* Replace cancel with ever-pending future, as we have already been cancelled */ + wrap_timeout( + async { + debug_err( + wormhole + .send_json(&PeerMessage::Error(format!("{}", cancelled))) + .await, + "notify peer about our cancellation", + ); + debug_err(wormhole.close().await, "close Wormhole"); + }, + futures::future::pending(), + ) + .await; + Ok(None) + }, + } +} + +/** + * Handle the post-{transfer, failure, cancellation} logic where the error signaling is done over the transit channel + */ +pub async fn handle_run_result_transit( + mut transit: transit::Transit, + result: Result<(Result, impl Future), Cancelled>, + make_error_message: impl FnOnce(&(dyn std::string::ToString + Sync)) -> Vec, + parse_message: impl Fn(&[u8]) -> Result, TransferError>, +) -> Result, TransferError> { + match result { + /* Happy case: everything went okay */ + Ok((Ok(val), _cancel)) => Ok(Some((val, transit))), + /* Got peer error: stop everything immediately */ + Ok((Err(error @ TransferError::PeerError(_)), _cancel)) => { + log::debug!( + "Transfer encountered an error ({}), doing cleanup logic", + error + ); + Err(error) + }, + /* Got transit error: try to receive peer error for better error message */ + Ok((Err(mut error @ TransferError::Transit(_)), cancel)) => { + log::debug!( + "Transfer encountered an error ({}), doing cleanup logic", + error + ); + wrap_timeout( + async { + /* Receive one peer message to see if they sent some error prior to closing + * (Note that this will only happen if we noticed the closed connection while trying to send, + * otherwise receiving will already yield the error message). + */ + loop { + let Ok(msg) = transit.receive_record().await else { + break; + }; + match parse_message(&msg) { + Ok(None) => continue, + Ok(Some(err)) => { + error = TransferError::PeerError(err); + break; + }, + Err(_) => break, + } + } + }, + cancel, + ) + .await; + Err(error) + }, + /* Other error: try to notify peer */ + Ok((Err(error), cancel)) => { + log::debug!( + "Transfer encountered an error ({}), doing cleanup logic", + error + ); + wrap_timeout( + async { + debug_err( + transit.send_record(&make_error_message(&error)).await, + "notify peer about the error", + ); + }, + cancel, + ) + .await; + Err(error) + }, + /* Cancelled: try to notify peer */ + Err(cancelled) => { + log::debug!("Transfer got cancelled, doing cleanup logic"); + /* Replace cancel with ever-pending future, as we have already been cancelled */ + wrap_timeout( + async { + debug_err( + transit.send_record(&make_error_message(&cancelled)).await, + "notify peer about our cancellation", + ); + }, + futures::future::pending(), + ) + .await; + Ok(None) + }, + } +} diff --git a/src/transfer/messages.rs b/src/transfer/messages.rs deleted file mode 100644 index 33f79ecd..00000000 --- a/src/transfer/messages.rs +++ /dev/null @@ -1,252 +0,0 @@ -//! Over-the-wire messages for the file transfer (including transit) -//! -//! The transit protocol does not specify how to deliver the information to -//! the other side, so it is up to the file transfer to do that. hfoo - -use crate::transit::{self, Abilities as TransitAbilities}; -use serde_derive::{Deserialize, Serialize}; -use std::{collections::HashMap, path::PathBuf}; - -/** - * The type of message exchanged over the wormhole for this protocol - */ -#[derive(Deserialize, Serialize, Debug)] -#[serde(rename_all = "kebab-case")] -#[non_exhaustive] -pub enum PeerMessage { - Offer(Offer), - OfferV2(OfferV2), - Answer(Answer), - AnswerV2(AnswerV2), - /** Tell the other side you got an error */ - Error(String), - /** Used to set up a transit channel */ - Transit(TransitV1), - TransitV2(TransitV2), - #[serde(other)] - Unknown, -} - -impl PeerMessage { - pub fn offer_message(msg: impl Into) -> Self { - PeerMessage::Offer(Offer::Message(msg.into())) - } - - pub fn offer_file(name: impl Into, size: u64) -> Self { - PeerMessage::Offer(Offer::File { - filename: name.into(), - filesize: size, - }) - } - - #[allow(dead_code)] - pub fn offer_directory( - name: impl Into, - mode: impl Into, - compressed_size: u64, - numbytes: u64, - numfiles: u64, - ) -> Self { - PeerMessage::Offer(Offer::Directory { - dirname: name.into(), - mode: mode.into(), - zipsize: compressed_size, - numbytes, - numfiles, - }) - } - - #[allow(dead_code)] - pub fn message_ack(msg: impl Into) -> Self { - PeerMessage::Answer(Answer::MessageAck(msg.into())) - } - - pub fn file_ack(msg: impl Into) -> Self { - PeerMessage::Answer(Answer::FileAck(msg.into())) - } - - pub fn error_message(msg: impl Into) -> Self { - PeerMessage::Error(msg.into()) - } - - pub fn transit(abilities: TransitAbilities, hints: transit::Hints) -> Self { - PeerMessage::Transit(TransitV1 { - abilities_v1: abilities, - hints_v1: hints, - }) - } - - #[allow(dead_code)] - pub fn transit_v2(hints: transit::Hints) -> Self { - PeerMessage::TransitV2(TransitV2 { hints }) - } - - #[allow(dead_code)] - pub fn ser_json(&self) -> Vec { - serde_json::to_vec(self).unwrap() - } - - #[allow(dead_code)] - pub fn ser_msgpack(&self) -> Vec { - let mut writer = Vec::with_capacity(128); - let mut ser = rmp_serde::encode::Serializer::new(&mut writer) - .with_struct_map() - .with_human_readable(); - serde::Serialize::serialize(self, &mut ser).unwrap(); - writer - } - - #[allow(dead_code)] - pub fn de_msgpack(data: &[u8]) -> Result { - rmp_serde::from_read(&mut &*data) - } -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "kebab-case")] -pub enum Offer { - Message(String), - File { - filename: PathBuf, - filesize: u64, - }, - Directory { - dirname: PathBuf, - mode: String, - zipsize: u64, - numbytes: u64, - numfiles: u64, - }, - #[serde(other)] - Unknown, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "kebab-case")] -pub struct OfferV2 { - transfer_name: Option, - files: Vec, - format: String, // TODO use custom enum? -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "kebab-case")] -pub struct OfferV2Entry { - path: String, - size: u64, - mtime: u64, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "snake_case")] -pub enum Answer { - MessageAck(String), - FileAck(String), -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -#[serde(rename_all = "kebab-case")] -pub struct AnswerV2 { - files: HashMap, -} - -/** - * A set of hints for both sides to find each other - */ -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -pub struct TransitV1 { - pub abilities_v1: TransitAbilities, - pub hints_v1: transit::Hints, -} - -/** - * A set of hints for both sides to find each other - */ -#[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "kebab-case")] -pub struct TransitV2 { - pub hints: transit::Hints, -} - -#[cfg(test)] -mod test { - use super::*; - use transit::{Abilities, DirectHint, RelayHint}; - - #[test] - fn test_transit() { - let abilities = Abilities::ALL_ABILITIES; - let hints = transit::Hints::new( - [DirectHint::new("192.168.1.8", 46295)], - [RelayHint::new( - None, - [DirectHint::new("magic-wormhole-transit.debian.net", 4001)], - [], - )], - ); - assert_eq!( - serde_json::json!(crate::transfer::PeerMessage::transit(abilities, hints)), - serde_json::json!({ - "transit": { - "abilities-v1": [{"type":"direct-tcp-v1"},{"type":"relay-v1"}], - "hints-v1": [ - {"hostname":"192.168.1.8","port":46295,"type":"direct-tcp-v1"}, - { - "type": "relay-v1", - "hints": [ - {"type": "direct-tcp-v1", "hostname": "magic-wormhole-transit.debian.net", "port": 4001} - ], - "name": null - } - ], - } - }) - ); - } - - #[test] - fn test_message() { - let m1 = PeerMessage::offer_message("hello from rust"); - assert_eq!( - serde_json::json!(m1).to_string(), - "{\"offer\":{\"message\":\"hello from rust\"}}" - ); - } - - #[test] - fn test_offer_file() { - let f1 = PeerMessage::offer_file("somefile.txt", 34556); - assert_eq!( - serde_json::json!(f1).to_string(), - "{\"offer\":{\"file\":{\"filename\":\"somefile.txt\",\"filesize\":34556}}}" - ); - } - - #[test] - fn test_offer_directory() { - let d1 = PeerMessage::offer_directory("somedirectory", "zipped", 45, 1234, 10); - assert_eq!( - serde_json::json!(d1).to_string(), - "{\"offer\":{\"directory\":{\"dirname\":\"somedirectory\",\"mode\":\"zipped\",\"numbytes\":1234,\"numfiles\":10,\"zipsize\":45}}}" - ); - } - - #[test] - fn test_message_ack() { - let m1 = PeerMessage::message_ack("ok"); - assert_eq!( - serde_json::json!(m1).to_string(), - "{\"answer\":{\"message_ack\":\"ok\"}}" - ); - } - - #[test] - fn test_file_ack() { - let f1 = PeerMessage::file_ack("ok"); - assert_eq!( - serde_json::json!(f1).to_string(), - "{\"answer\":{\"file_ack\":\"ok\"}}" - ); - } -} diff --git a/src/transfer/v1.rs b/src/transfer/v1.rs index 1049c404..e7035811 100644 --- a/src/transfer/v1.rs +++ b/src/transfer/v1.rs @@ -1,15 +1,141 @@ -use futures::{AsyncReadExt, AsyncWriteExt}; -use log::*; +use futures::{ + io::{AsyncReadExt, AsyncWriteExt}, + StreamExt, TryFutureExt, +}; use sha2::{digest::FixedOutput, Digest, Sha256}; -use std::path::PathBuf; use super::*; -pub async fn send_file( +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +#[serde(rename_all = "kebab-case")] +pub enum OfferMessage { + Message(String), + File { + filename: String, + filesize: u64, + }, + Directory { + dirname: String, + mode: String, + zipsize: u64, + numbytes: u64, + numfiles: u64, + }, + #[serde(other)] + Unknown, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum AnswerMessage { + MessageAck(String), + FileAck(String), +} + +/** + * A set of hints for both sides to find each other + */ +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "kebab-case")] +pub struct TransitV1 { + pub abilities_v1: TransitAbilities, + pub hints_v1: transit::Hints, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[serde(rename_all = "kebab-case")] +struct TransitAck { + pub ack: String, + pub sha256: String, +} + +impl TransitAck { + pub fn new(msg: impl Into, sha256: impl Into) -> Self { + TransitAck { + ack: msg.into(), + sha256: sha256.into(), + } + } + + #[cfg(test)] + pub fn serialize(&self) -> String { + json!(self).to_string() + } + + pub fn serialize_vec(&self) -> Vec { + serde_json::to_vec(self).unwrap() + } +} + +pub async fn send( + wormhole: Wormhole, + relay_hints: Vec, + transit_abilities: transit::Abilities, + offer: OfferSend, + progress_handler: impl FnMut(u64, u64) + 'static, + transit_handler: impl FnOnce(transit::TransitInfo), + _peer_version: AppVersion, + cancel: impl Future, +) -> Result<(), TransferError> { + if offer.is_multiple() { + let folder = OfferSendEntry::Directory { + content: offer.content, + }; + send_folder( + wormhole, + relay_hints, + "".into(), + folder, + transit_abilities, + transit_handler, + progress_handler, + cancel, + ) + .await + } else if offer.is_directory() { + let (folder_name, folder) = offer.content.into_iter().next().unwrap(); + send_folder( + wormhole, + relay_hints, + folder_name, + folder, + transit_abilities, + transit_handler, + progress_handler, + cancel, + ) + .await + } else { + let (file_name, file) = offer.content.into_iter().next().unwrap(); + let (mut file, file_size) = match file { + OfferSendEntry::RegularFile { content, size } => { + /* This must be split into two statements to appease the borrow checker (unfortunate side effect of borrow-through) */ + let content = content(); + let content = content.await?; + (content, size) + }, + _ => unreachable!(), + }; + send_file( + wormhole, + relay_hints, + &mut file, + file_name, + file_size, + transit_abilities, + transit_handler, + progress_handler, + cancel, + ) + .await + } +} + +pub async fn send_file( mut wormhole: Wormhole, relay_hints: Vec, file: &mut F, - file_name: N, + file_name: impl Into, file_size: u64, transit_abilities: transit::Abilities, transit_handler: G, @@ -17,9 +143,8 @@ pub async fn send_file( cancel: impl Future, ) -> Result<(), TransferError> where - F: AsyncRead + Unpin, - N: Into, - G: FnOnce(transit::TransitInfo, std::net::SocketAddr), + F: AsyncRead + Unpin + Send, + G: FnOnce(transit::TransitInfo), H: FnMut(u64, u64) + 'static, { let run = Box::pin(async { @@ -28,7 +153,7 @@ where // We want to do some transit debug!("Sending transit message '{:?}", connector.our_hints()); wormhole - .send_json(&PeerMessage::transit( + .send_json(&PeerMessage::transit_v1( *connector.our_abilities(), (**connector.our_hints()).clone(), )) @@ -37,19 +162,16 @@ where // Send file offer message. debug!("Sending file offer"); wormhole - .send_json(&PeerMessage::offer_file(file_name, file_size)) + .send_json(&PeerMessage::offer_file_v1(file_name, file_size)) .await?; // Wait for their transit response let (their_abilities, their_hints): (transit::Abilities, transit::Hints) = - match wormhole.receive_json().await?? { + match wormhole.receive_json::().await??.check_err()? { PeerMessage::Transit(transit) => { debug!("Received transit message: {:?}", transit); (transit.abilities_v1, transit.hints_v1) }, - PeerMessage::Error(err) => { - bail!(TransferError::PeerError(err)); - }, other => { bail!(TransferError::unexpected_message("transit", other)) }, @@ -57,16 +179,13 @@ where { // Wait for file_ack - let fileack_msg = wormhole.receive_json().await??; + let fileack_msg = wormhole.receive_json::().await??; debug!("Received file ack message: {:?}", fileack_msg); - match fileack_msg { - PeerMessage::Answer(Answer::FileAck(msg)) => { + match fileack_msg.check_err()? { + PeerMessage::Answer(AnswerMessage::FileAck(msg)) => { ensure!(msg == "ok", TransferError::AckError); }, - PeerMessage::Error(err) => { - bail!(TransferError::PeerError(err)); - }, _ => { bail!(TransferError::unexpected_message( "answer/file_ack", @@ -76,18 +195,21 @@ where } } - let (mut transit, info, addr) = connector + let (mut transit, info) = connector .leader_connect( wormhole.key().derive_transit_key(wormhole.appid()), their_abilities, Arc::new(their_hints), ) .await?; - transit_handler(info, addr); + transit_handler(info); debug!("Beginning file transfer"); // 11. send the file as encrypted records. + let file = futures::stream::once(futures::future::ready(std::io::Result::Ok( + Box::new(file) as Box, + ))); let checksum = v1::send_records(&mut transit, file, file_size, progress_handler).await?; // 13. wait for the transit ack with sha256 sum from the peer. @@ -104,222 +226,157 @@ where }); futures::pin_mut!(cancel); - let result = crate::util::cancellable_2(run, cancel).await; - super::handle_run_result(wormhole, result).await + let result = cancel::cancellable_2(run, cancel).await; + cancel::handle_run_result(wormhole, result).await } -pub async fn send_folder( +pub async fn send_folder( mut wormhole: Wormhole, relay_hints: Vec, - folder_path: N, - folder_name: M, + mut folder_name: String, + folder: OfferSendEntry, transit_abilities: transit::Abilities, - transit_handler: G, - progress_handler: H, + transit_handler: impl FnOnce(transit::TransitInfo), + progress_handler: impl FnMut(u64, u64) + 'static, cancel: impl Future, -) -> Result<(), TransferError> -where - N: Into, - M: Into, - G: FnOnce(transit::TransitInfo, std::net::SocketAddr), - H: FnMut(u64, u64) + 'static, -{ +) -> Result<(), TransferError> { let run = Box::pin(async { let connector = transit::init(transit_abilities, None, relay_hints).await?; - let folder_path = folder_path.into(); - - if !folder_path.is_dir() { - panic!( - "You should only call this method with directory paths, but '{}' is not", - folder_path.display() - ); - } // We want to do some transit debug!("Sending transit message '{:?}", connector.our_hints()); wormhole - .send_json(&PeerMessage::transit( + .send_json(&PeerMessage::transit_v1( *connector.our_abilities(), (**connector.our_hints()).clone(), )) .await?; - // use sha2::{digest::FixedOutput, Digest, Sha256}; + /* We need to know the length of what we are going to send in advance. So we already build + * all the headers of our file now but without the contents. We know that a file is + * header + contents + padding + */ + log::debug!("Estimating the file size"); - /* Helper struct stolen from https://docs.rs/count-write/0.1.0 */ - struct CountWrite { - inner: W, - count: u64, + // TODO try again but without pinning + use futures::{ + future::{ready, BoxFuture}, + io::Cursor, + }; + use std::io::Result as IoResult; + + /* Type tetris :) */ + fn wrap( + buffer: impl AsRef<[u8]> + Unpin + Send + 'static, + ) -> BoxFuture<'static, IoResult>> { + Box::pin(ready(IoResult::Ok( + Box::new(Cursor::new(buffer)) as Box + ))) as _ } - impl std::io::Write for CountWrite { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - let written = self.inner.write(buf)?; - self.count += written as u64; - Ok(written) - } - - fn flush(&mut self) -> std::io::Result<()> { - self.inner.flush() + /* Walk our offer recursively, concatenate all our readers into a stream that will build the tar file */ + fn create_offer( + mut total_content: Vec< + BoxFuture<'static, IoResult>>, + >, + total_size: &mut u64, + offer: OfferSendEntry, + path: &mut Vec, + ) -> IoResult>>>> + { + match offer { + OfferSendEntry::Directory { content } => { + log::debug!("Adding directory {path:?}"); + let header = tar_helper::create_header_directory(path)?; + *total_size += header.len() as u64; + total_content.push(wrap(header)); + + for (name, file) in content { + path.push(name); + total_content = create_offer(total_content, total_size, file, path)?; + path.pop(); + } + }, + OfferSendEntry::RegularFile { size, content } => { + log::debug!("Adding file {path:?}; {size} bytes"); + let header = tar_helper::create_header_file(&path, size)?; + let padding = tar_helper::padding(size); + *total_size += header.len() as u64; + *total_size += padding.len() as u64; + *total_size += size; + + total_content.push(wrap(header)); + let content = content().map_ok( + /* Re-box because we can't upcast trait objects */ + |read| Box::new(read) as Box, + ); + total_content.push(Box::pin(content) as _); + total_content.push(wrap(padding)); + }, + // OfferSendEntry::Symlink { .. } => todo!(), } + Ok(total_content) } - /* We need to know the length of what we are going to send in advance. So we build the - * tar file once, stream it into the void, and the second time we stream it over the - * wire. Also hashing for future reference. - */ - log::info!("Calculating the size of '{}'", folder_path.display()); - let folder_path2 = folder_path.clone(); - let (length, sha256sum_initial) = { - let mut hasher = Sha256::new(); - let mut counter = CountWrite { - inner: &mut hasher, - count: 0, - }; - let mut builder = async_tar::Builder::new(futures::io::AllowStdIo::new(&mut counter)); - - builder.mode(async_tar::HeaderMode::Deterministic); - builder.follow_symlinks(false); - /* A hasher should never fail writing */ - builder.append_dir_all("", folder_path2).await.unwrap(); - builder.finish().await.unwrap(); - - std::mem::drop(builder); - let count = counter.count; - std::mem::drop(counter); - (count, hasher.finalize_fixed()) - }; + let mut total_size = 0; + let mut content = create_offer( + Vec::new(), + &mut total_size, + folder, + &mut vec![folder_name.clone()], + )?; + + /* Finish tar file */ + total_size += 1024; + content.push(wrap([0; 1024])); + + let content = futures::stream::iter(content).then(|content| async { content.await }); + + /* Convert to stream */ // Send file offer message. - debug!("Sending file offer"); + log::debug!("Sending file offer ({total_size} bytes)"); + folder_name.push_str(".tar"); wormhole - .send_json(&PeerMessage::offer_file(folder_name, length)) + .send_json(&PeerMessage::offer_file_v1(folder_name, total_size)) .await?; // Wait for their transit response let (their_abilities, their_hints): (transit::Abilities, transit::Hints) = - match wormhole.receive_json().await?? { + match wormhole.receive_json::().await??.check_err()? { PeerMessage::Transit(transit) => { debug!("received transit message: {:?}", transit); (transit.abilities_v1, transit.hints_v1) }, - PeerMessage::Error(err) => { - bail!(TransferError::PeerError(err)); - }, other => { bail!(TransferError::unexpected_message("transit", other)); }, }; // Wait for file_ack - match wormhole.receive_json().await?? { - PeerMessage::Answer(Answer::FileAck(msg)) => { + match wormhole.receive_json::().await??.check_err()? { + PeerMessage::Answer(AnswerMessage::FileAck(msg)) => { ensure!(msg == "ok", TransferError::AckError); }, - PeerMessage::Error(err) => { - bail!(TransferError::PeerError(err)); - }, other => { bail!(TransferError::unexpected_message("answer/file_ack", other)); }, } - let (mut transit, info, addr) = connector + let (mut transit, info) = connector .leader_connect( wormhole.key().derive_transit_key(wormhole.appid()), their_abilities, Arc::new(their_hints), ) .await?; - transit_handler(info, addr); + transit_handler(info); debug!("Beginning file transfer"); - /* Inspired by https://github.com/RustCrypto/traits/pull/1159/files */ - pub struct HashWriter { - writer: W, - hasher: D, - } - - use std::{ - pin::Pin, - task::{Context, Poll}, - }; - impl - futures::io::AsyncWrite for HashWriter - { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - // log::debug!("Poll write, {}", buf.len()); - match Pin::new(&mut self.writer).poll_write(cx, buf) { - Poll::Ready(Ok(n)) => { - self.hasher.update(&buf[..n]); - Poll::Ready(Ok(n)) - }, - res => res, - } - } - - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - // log::debug!("Poll flush"); - Pin::new(&mut self.writer).poll_flush(cx) - } - - fn poll_close( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - // log::debug!("Poll close"); - Pin::new(&mut self.writer).poll_close(cx) - } - } - // 11. send the file as encrypted records. - let (mut reader, writer) = futures_ringbuf::RingBuffer::new(4096).split(); - - let file_sender = async_std::task::spawn(async move { - let mut hash_writer = HashWriter { - writer, - hasher: Sha256::new(), - }; - let mut builder = async_tar::Builder::new(&mut hash_writer); - - builder.mode(async_tar::HeaderMode::Deterministic); - builder.follow_symlinks(false); - builder.append_dir_all("", folder_path).await?; - builder.finish().await?; - std::mem::drop(builder); - - hash_writer.flush().await?; - hash_writer.close().await?; - let hasher = hash_writer.hasher; - - std::io::Result::Ok(hasher.finalize_fixed()) - }); - - let (checksum, sha256sum) = - match v1::send_records(&mut transit, &mut reader, length, progress_handler).await { - Ok(checksum) => (checksum, file_sender.await?), - Err(err) => { - log::debug!("Some more error {err}"); - if let Some(Err(err)) = file_sender.cancel().await { - log::warn!("Error in background task: {err}"); - } - return Err(err); - }, - }; - - /* Check if the hash sum still matches what we advertized. Otherwise, tell the other side and bail out */ - ensure!( - sha256sum == sha256sum_initial, - TransferError::FilesystemSkew - ); + let checksum = + v1::send_records(&mut transit, content, total_size, progress_handler).await?; // 13. wait for the transit ack with sha256 sum from the peer. debug!("sent file. Waiting for ack"); @@ -335,21 +392,184 @@ where }); futures::pin_mut!(cancel); - let result = crate::util::cancellable_2(run, cancel).await; - super::handle_run_result(wormhole, result).await + let result = cancel::cancellable_2(run, cancel).await; + cancel::handle_run_result(wormhole, result).await +} + +/** + * Wait for a file offer from the other side + * + * This method waits for an offer message and builds up a [`ReceiveRequest`](ReceiveRequest). + * It will also start building a TCP connection to the other side using the transit protocol. + * + * Returns `None` if the task got cancelled. + */ +pub async fn request( + mut wormhole: Wormhole, + relay_hints: Vec, + transit_abilities: transit::Abilities, + cancel: impl Future, +) -> Result, TransferError> { + // Error handling + let run = Box::pin(async { + let connector = transit::init(transit_abilities, None, relay_hints).await?; + + // send the transit message + debug!("Sending transit message '{:?}", connector.our_hints()); + wormhole + .send_json(&PeerMessage::transit_v1( + *connector.our_abilities(), + (**connector.our_hints()).clone(), + )) + .await?; + + // receive transit message + let (their_abilities, their_hints): (transit::Abilities, transit::Hints) = + match wormhole.receive_json::().await??.check_err()? { + PeerMessage::Transit(transit) => { + debug!("received transit message: {:?}", transit); + (transit.abilities_v1, transit.hints_v1) + }, + other => { + bail!(TransferError::unexpected_message("transit", other)); + }, + }; + + // 3. receive file offer message from peer + let (filename, filesize) = + match wormhole.receive_json::().await??.check_err()? { + PeerMessage::Offer(offer_type) => match offer_type { + v1::OfferMessage::File { filename, filesize } => (filename, filesize), + v1::OfferMessage::Directory { + mut dirname, + zipsize, + .. + } => { + dirname.push_str(".zip"); + (dirname, zipsize) + }, + _ => bail!(TransferError::UnsupportedOffer), + }, + other => { + bail!(TransferError::unexpected_message("offer", other)); + }, + }; + + Ok((filename, filesize, connector, their_abilities, their_hints)) + }); + + futures::pin_mut!(cancel); + let result = cancel::cancellable_2(run, cancel).await; + cancel::handle_run_result_noclose(wormhole, result) + .await + .map(|inner: Option<_>| { + inner.map( + |((filename, filesize, connector, their_abilities, their_hints), wormhole, _)| { + ReceiveRequest { + wormhole, + filename, + filesize, + connector, + their_abilities, + their_hints: Arc::new(their_hints), + } + }, + ) + }) +} + +/** + * A pending files send offer from the other side + * + * You *should* consume this object, either by calling [`accept`](ReceiveRequest::accept) or [`reject`](ReceiveRequest::reject). + */ +#[must_use] +pub struct ReceiveRequest { + wormhole: Wormhole, + connector: TransitConnector, + /// **Security warning:** this is untrusted and unverified input + pub filename: String, + pub filesize: u64, + their_abilities: transit::Abilities, + their_hints: Arc, +} + +impl ReceiveRequest { + /** + * Accept the file offer + * + * This will transfer the file and save it on disk. + */ + pub async fn accept( + mut self, + transit_handler: G, + content_handler: &mut W, + progress_handler: F, + cancel: impl Future, + ) -> Result<(), TransferError> + where + F: FnMut(u64, u64) + 'static, + G: FnOnce(transit::TransitInfo), + W: AsyncWrite + Unpin, + { + let run = Box::pin(async { + // send file ack. + debug!("Sending ack"); + self.wormhole + .send_json(&PeerMessage::file_ack_v1("ok")) + .await?; + + let (mut transit, info) = self + .connector + .follower_connect( + self.wormhole + .key() + .derive_transit_key(self.wormhole.appid()), + self.their_abilities, + self.their_hints.clone(), + ) + .await?; + transit_handler(info); + + debug!("Beginning file transfer"); + tcp_file_receive( + &mut transit, + self.filesize, + progress_handler, + content_handler, + ) + .await?; + Ok(()) + }); + + futures::pin_mut!(cancel); + let result = cancel::cancellable_2(run, cancel).await; + cancel::handle_run_result(self.wormhole, result).await + } + + /** + * Reject the file offer + * + * This will send an error message to the other side so that it knows the transfer failed. + */ + pub async fn reject(mut self) -> Result<(), TransferError> { + self.wormhole + .send_json(&PeerMessage::error_message("transfer rejected")) + .await?; + self.wormhole.close().await?; + + Ok(()) + } } // encrypt and send the file to tcp stream and return the sha256 sum // of the file before encryption. -pub async fn send_records( +pub async fn send_records<'a>( transit: &mut Transit, - file: &mut (impl AsyncRead + Unpin), + files: impl futures::Stream>>, file_size: u64, - mut progress_handler: F, -) -> Result, TransferError> -where - F: FnMut(u64, u64) + 'static, -{ + mut progress_handler: impl FnMut(u64, u64) + 'static, +) -> Result, TransferError> { // rough plan: // 1. Open the file // 2. read a block of N bytes @@ -367,28 +587,30 @@ where // Yeah, maybe don't allocate 4kiB on the stack… let mut plaintext = Box::new([0u8; 4096]); let mut sent_size = 0; - loop { - // read a block of up to 4096 bytes - let n = file.read(&mut plaintext[..]).await?; - log::debug!("Read {n}"); - - if n == 0 { - // EOF - break; - } + futures::pin_mut!(files); + while let Some(mut file) = files.next().await.transpose()? { + loop { + // read a block of up to 4096 bytes + let n = file.read(&mut plaintext[..]).await?; + + if n == 0 { + // EOF + break; + } - // send the encrypted record - transit.send_record(&plaintext[0..n]).await?; - sent_size += n as u64; - progress_handler(sent_size, file_size); + // send the encrypted record + transit.send_record(&plaintext[0..n]).await?; + sent_size += n as u64; + progress_handler(sent_size, file_size); - // sha256 of the input - hasher.update(&plaintext[..n]); + // sha256 of the input + hasher.update(&plaintext[..n]); - /* Don't do this. The EOF check above is sufficient */ - // if n < 4096 { - // break; - // } + /* Don't do this. The EOF check above is sufficient */ + // if n < 4096 { + // break; + // } + } } transit.flush().await?; @@ -407,7 +629,7 @@ pub async fn receive_records( filesize: u64, transit: &mut Transit, mut progress_handler: F, - content_handler: &mut W, + mut content_handler: W, ) -> Result, TransferError> where F: FnMut(u64, u64) + 'static, @@ -436,6 +658,7 @@ where let remaining = remaining_size as u64; progress_handler(total - remaining, total); } + content_handler.close().await?; debug!("done"); // TODO: 5. write the buffer into a file. @@ -470,3 +693,142 @@ where debug!("Transfer complete"); Ok(()) } + +/// Custom functions from the `tar` crate to access internals +mod tar_helper { + /* Imports may depend on target platform */ + #[allow(unused_imports)] + use std::{ + borrow::Cow, + io::{self, Read, Write}, + path::Path, + str, + }; + + pub fn create_header_file(path: &[String], size: u64) -> std::io::Result> { + let mut header = tar::Header::new_gnu(); + header.set_size(size); + let mut data = Vec::with_capacity(1024); + prepare_header_path(&mut data, &mut header, path.join("/").as_ref())?; + header.set_mode(0o644); + header.set_cksum(); + data.write_all(header.as_bytes())?; + Ok(data) + } + + pub fn create_header_directory(path: &[String]) -> std::io::Result> { + let mut header = tar::Header::new_gnu(); + header.set_entry_type(tar::EntryType::Directory); + let mut data = Vec::with_capacity(1024); + prepare_header_path(&mut data, &mut header, path.join("/").as_ref())?; + header.set_mode(0o755); + header.set_cksum(); + data.write_all(header.as_bytes())?; + // append(&mut data, header, data)?; + Ok(data) + } + + pub fn padding(size: u64) -> &'static [u8] { + const BLOCK: [u8; 512] = [0; 512]; + if size % 512 != 0 { + &BLOCK[size as usize % 512..] + } else { + &[] + } + } + + fn append( + mut dst: &mut dyn std::io::Write, + header: &tar::Header, + mut data: &mut dyn std::io::Read, + ) -> std::io::Result<()> { + dst.write_all(header.as_bytes())?; + let len = std::io::copy(&mut data, &mut dst)?; + dst.write_all(padding(len))?; + Ok(()) + } + + fn prepare_header(size: u64, entry_type: u8) -> tar::Header { + let mut header = tar::Header::new_gnu(); + let name = b"././@LongLink"; + header.as_gnu_mut().unwrap().name[..name.len()].clone_from_slice(&name[..]); + header.set_mode(0o644); + header.set_uid(0); + header.set_gid(0); + header.set_mtime(0); + // + 1 to be compliant with GNU tar + header.set_size(size + 1); + header.set_entry_type(tar::EntryType::new(entry_type)); + header.set_cksum(); + header + } + + fn prepare_header_path( + dst: &mut dyn std::io::Write, + header: &mut tar::Header, + path: &str, + ) -> std::io::Result<()> { + // Try to encode the path directly in the header, but if it ends up not + // working (probably because it's too long) then try to use the GNU-specific + // long name extension by emitting an entry which indicates that it's the + // filename. + if let Err(e) = header.set_path(path) { + let data = path2bytes(&path); + let max = header.as_old().name.len(); + // Since `e` isn't specific enough to let us know the path is indeed too + // long, verify it first before using the extension. + if data.len() < max { + return Err(e); + } + let header2 = prepare_header(data.len() as u64, b'L'); + // null-terminated string + let mut data2 = data.chain(io::repeat(0).take(1)); + append(dst, &header2, &mut data2)?; + + // Truncate the path to store in the header we're about to emit to + // ensure we've got something at least mentioned. Note that we use + // `str`-encoding to be compatible with Windows, but in general the + // entry in the header itself shouldn't matter too much since extraction + // doesn't look at it. + let truncated = match std::str::from_utf8(&data[..max]) { + Ok(s) => s, + Err(e) => std::str::from_utf8(&data[..e.valid_up_to()]).unwrap(), + }; + header.set_path(truncated)?; + } + Ok(()) + } + + #[cfg(any(windows, target_arch = "wasm32"))] + pub fn path2bytes(p: &str) -> Cow<[u8]> { + let bytes = p.as_bytes(); + if bytes.contains(&b'\\') { + // Normalize to Unix-style path separators + let mut bytes = bytes.to_owned(); + for b in &mut bytes { + if *b == b'\\' { + *b = b'/'; + } + } + Cow::Owned(bytes) + } else { + Cow::Borrowed(bytes) + } + } + + #[cfg(unix)] + pub fn path2bytes(p: &str) -> Cow<[u8]> { + Cow::Borrowed(p.as_bytes()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_transit_ack() { + let f1 = TransitAck::new("ok", "deadbeaf"); + assert_eq!(f1.serialize(), "{\"ack\":\"ok\",\"sha256\":\"deadbeaf\"}"); + } +} diff --git a/src/transfer/v2.rs b/src/transfer/v2.rs index 1906c5ef..7ecaa5b1 100644 --- a/src/transfer/v2.rs +++ b/src/transfer/v2.rs @@ -1,115 +1,604 @@ -#![allow(dead_code)] +use futures::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt}; +use serde_derive::{Deserialize, Serialize}; +use sha2::{digest::FixedOutput, Sha256}; use super::*; -#[allow(unused_variables, unused_mut)] -pub async fn send_file( +/** + * A set of hints for both sides to find each other + */ +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "kebab-case")] +pub struct TransitV2 { + pub hints_v2: transit::Hints, +} + +/** + * The type of message exchanged over the transit connection, serialized with msgpack + */ +#[derive(Deserialize, Serialize, derive_more::Display, Debug, Clone)] +#[serde(rename_all = "kebab-case")] +#[non_exhaustive] +pub enum PeerMessageV2 { + #[display(fmt = "offer")] + Offer(Offer), + #[display(fmt = "answer")] + Answer(AnswerMessage), + #[display(fmt = "file-start")] + FileStart(FileStart), + #[display(fmt = "payload")] + Payload(Payload), + #[display(fmt = "file-end")] + FileEnd(FileEnd), + #[display(fmt = "transfer-ack")] + TransferAck(TransferAck), + #[display(fmt = "error")] + Error(String), + #[display(fmt = "unknown")] + #[serde(other)] + Unknown, +} + +impl PeerMessageV2 { + pub fn ser_msgpack(&self) -> Vec { + let mut writer = Vec::with_capacity(128); + let mut ser = rmp_serde::encode::Serializer::new(&mut writer) + .with_struct_map() + .with_human_readable(); + serde::Serialize::serialize(self, &mut ser).unwrap(); + writer + } + + pub fn de_msgpack(data: &[u8]) -> Result { + rmp_serde::from_read(&mut &*data) + } + + pub fn check_err(self) -> Result { + match self { + Self::Error(err) => Err(TransferError::PeerError(err)), + other => Ok(other), + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +#[serde(rename_all = "kebab-case")] +pub struct AnswerMessage { + pub(self) files: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +#[serde(rename_all = "kebab-case")] +pub(self) struct AnswerMessageInner { + pub file: Vec, + pub offset: u64, + pub sha256: Option<[u8; 32]>, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "kebab-case")] +pub struct FileStart { + pub file: Vec, + pub start_at_offset: bool, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "kebab-case")] +pub struct Payload { + payload: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "kebab-case")] +pub struct FileEnd {} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "kebab-case")] +pub struct TransferAck {} + +/** The code to establish a transit connection is essentially the same on both sides. */ +async fn make_transit( + wormhole: &mut Wormhole, + is_leader: bool, + relay_hints: Vec, + transit_abilities: transit::Abilities, + peer_abilities: transit::Abilities, +) -> Result<(transit::Transit, transit::TransitInfo), TransferError> { + let connector = transit::init(transit_abilities, Some(peer_abilities), relay_hints).await?; + + /* Send our transit hints */ + wormhole + .send_json(&PeerMessage::transit_v2( + (**connector.our_hints()).clone().into(), + )) + .await?; + + /* Receive their transit hints */ + let their_hints: transit::Hints = + match wormhole.receive_json::().await??.check_err()? { + PeerMessage::TransitV2(transit) => { + debug!("received transit message: {:?}", transit); + transit.hints_v2.into() + }, + other => { + let error = TransferError::unexpected_message("transit-v2", other); + let _ = wormhole + .send_json(&PeerMessage::Error(format!("{}", error))) + .await; + bail!(error) + }, + }; + + /* Get a transit connection */ + let (transit, info) = match connector + .connect( + is_leader, + wormhole.key().derive_transit_key(wormhole.appid()), + peer_abilities, + Arc::new(their_hints), + ) + .await + { + Ok(transit) => transit, + Err(error) => { + let error = TransferError::TransitConnect(error); + let _ = wormhole + .send_json(&PeerMessage::Error(format!("{}", error))) + .await; + return Err(error); + }, + }; + + Ok((transit, info)) +} + +pub async fn send( mut wormhole: Wormhole, relay_hints: Vec, - file: &mut F, - file_path: N, - file_name: String, - progress_handler: H, + transit_abilities: transit::Abilities, + offer: OfferSend, + progress_handler: impl FnMut(u64, u64) + 'static, peer_version: AppVersion, -) -> Result<(), TransferError> -where - F: AsyncRead + Unpin, - N: Into, - H: FnMut(u64, u64) + 'static, -{ - // let file_path = file_path.into(); - // let peer_abilities = peer_version.transfer_v2.unwrap(); - // let mut actual_transit_abilities = transit::Ability::all_abilities(); - // actual_transit_abilities.retain(|a| peer_abilities.transit_abilities.contains(a)); - // let connector = transit::init(actual_transit_abilities, Some(&peer_abilities.transit_abilities), relay_hints).await?; - - // /* Send our transit hints */ - // wormhole - // .send_json( - // &PeerMessage::transit_v2( - // (**connector.our_hints()).clone().into(), - // ), - // ) - // .await?; - - // /* Receive their transit hints */ - // let their_hints: transit::Hints = - // match wormhole.receive_json().await?? { - // PeerMessage::TransitV2(transit) => { - // debug!("received transit message: {:?}", transit); - // transit.hints_v2.into() - // }, - // PeerMessage::Error(err) => { - // bail!(TransferError::PeerError(err)); - // }, - // other => { - // let error = TransferError::unexpected_message("transit-v2", other); - // let _ = wormhole - // .send_json(&PeerMessage::Error(format!("{}", error))) - // .await; - // bail!(error) - // }, - // }; - - // /* Get a transit connection */ - // let mut transit = match connector - // .leader_connect( - // wormhole.key().derive_transit_key(wormhole.appid()), - // Arc::new(peer_abilities.transit_abilities), - // Arc::new(their_hints), - // ) - // .await - // { - // Ok(transit) => transit, - // Err(error) => { - // let error = TransferError::TransitConnect(error); - // let _ = wormhole - // .send_json(&PeerMessage::Error(format!("{}", error))) - // .await; - // return Err(error); - // }, - // }; - - // /* Close the Wormhole and switch to using the transit connection (msgpack instead of json) */ - // wormhole.close().await?; - - // transit.send_record(&PeerMessage::OfferV2(OfferV2 { - // transfer_name: None, - // files: vec![], - // format: "tar.zst".into(), - // }).ser_msgpack()).await?; - - // match PeerMessage::de_msgpack(&transit.receive_record().await?)? { - // PeerMessage::AnswerV2(answer) => { - // // let files = answer.files; - // }, - // PeerMessage::Error(err) => { - // bail!(TransferError::PeerError(err)); - // }, - // other => { - // let error = TransferError::unexpected_message("answer-v2", other); - // let _ = transit - // .send_record(&PeerMessage::Error(format!("{}", error)).ser_msgpack()) - // .await; - // bail!(error) - // }, - // } + cancel: impl Future, +) -> Result<(), TransferError> { + let peer_abilities = peer_version.transfer_v2.unwrap(); + futures::pin_mut!(cancel); + + /* Establish transit connection, close the Wormhole and switch to using the transit connection (msgpack instead of json) */ + let (mut transit, mut wormhole, cancel) = cancel::with_cancel_wormhole!( + wormhole, + run = async { + Ok(make_transit( + &mut wormhole, + true, + relay_hints, + transit_abilities, + peer_abilities.transit_abilities, + ) + .await? + .0) + }, + cancel, + ret_cancel = (), + ); + + cancel::with_cancel_transit!( + transit, + run = async { + /* Close the wormhole only here so that the operation may be cancelled */ + wormhole.close().await?; + + send_inner(&mut transit, offer, progress_handler).await + }, + cancel, + |err| PeerMessageV2::Error(err.to_string()).ser_msgpack(), + |msg| match PeerMessageV2::de_msgpack(msg)? { + PeerMessageV2::Error(err) => Ok(Some(err)), + _ => Ok(None), + }, + ret_cancel = (), + ); + + Ok(()) +} + +/** We've established the transit connection and closed the Wormhole */ +async fn send_inner( + transit: &mut transit::Transit, + offer: OfferSend, + mut progress_handler: impl FnMut(u64, u64) + 'static, +) -> Result<(), TransferError> { + transit.send_record(&{ + /* This must be split into two statements to appease the borrow checker (unfortunate side effect of borrow-through) */ + let message = PeerMessageV2::Offer((&offer).into()).ser_msgpack(); + message + }).await?; + + let files = match PeerMessageV2::de_msgpack(&transit.receive_record().await?)?.check_err()? { + PeerMessageV2::Answer(answer) => answer.files, + other => { + bail!(TransferError::unexpected_message("answer", other)) + }, + }; + + let mut total_size = 0; + for file in &files { + if let Some((_, size)) = offer.get_file(&file.file) { + total_size += size; + } else { + bail!(TransferError::Protocol( + format!("Invalid file request: {}", file.file.join("/")).into() + )); + } + } + let mut total_sent = 0; + + // use zstd::stream::raw::Encoder; + // let zstd = Encoder::new(zstd::DEFAULT_COMPRESSION_LEVEL); + const BUFFER_LEN: usize = 16 * 1024; + let mut buffer = Box::new([0u8; BUFFER_LEN]); + + for AnswerMessageInner { + file, + offset, + sha256, + } in &files + { + let offset = *offset; + /* This must be split into two statements to appease the borrow checker (unfortunate side effect of borrow-through) */ + let content = (offer.get_file(&file).unwrap().0)(); + let mut content = content.await?; + let file = file.clone(); + + /* If they specified a hash, check our local file's contents */ + if let Some(sha256) = sha256 { + content.seek(std::io::SeekFrom::Start(offset)).await?; + let mut hasher = Sha256::default(); + futures::io::copy( + (&mut content).take(offset), + &mut futures::io::AllowStdIo::new(&mut hasher), + ) + .await?; + let our_hash = hasher.finalize_fixed(); + + /* If it doesn't match, start at 0 instead of the originally requested offset */ + if &*our_hash == &sha256[..] { + transit + .send_record( + &PeerMessageV2::FileStart(FileStart { + file, + start_at_offset: true, + }) + .ser_msgpack(), + ) + .await?; + } else { + transit + .send_record( + &PeerMessageV2::FileStart(FileStart { + file, + start_at_offset: false, + }) + .ser_msgpack(), + ) + .await?; + content.seek(std::io::SeekFrom::Start(0)).await?; + // offset = 0; TODO + } + } else { + content.seek(std::io::SeekFrom::Start(offset)).await?; + transit + .send_record( + &PeerMessageV2::FileStart(FileStart { + file, + start_at_offset: true, + }) + .ser_msgpack(), + ) + .await?; + } + + progress_handler(total_sent, total_size); + loop { + let n = content.read(&mut buffer[..]).await?; + let buffer = &buffer[..n]; + + if n == 0 { + // EOF + break; + } + + transit + .send_record( + &PeerMessageV2::Payload(Payload { + payload: buffer.into(), + }) + .ser_msgpack(), + ) + .await?; + total_sent += n as u64; + progress_handler(total_sent, total_size); + + if n < BUFFER_LEN { + break; + } + } + + transit + .send_record(&PeerMessageV2::FileEnd(FileEnd {}).ser_msgpack()) + .await?; + } + transit + .send_record(&PeerMessageV2::TransferAck(TransferAck {}).ser_msgpack()) + .await?; Ok(()) } -#[allow(unused_variables)] -pub async fn send_folder( - wormhole: Wormhole, +pub async fn request( + mut wormhole: Wormhole, relay_hints: Vec, - folder_path: N, - folder_name: M, - progress_handler: H, -) -> Result<(), TransferError> -where - N: Into, - M: Into, - H: FnMut(u64, u64) + 'static, -{ - unimplemented!() + peer_version: AppVersion, + transit_abilities: transit::Abilities, + cancel: impl Future, +) -> Result, TransferError> { + let peer_abilities = peer_version.transfer_v2.unwrap(); + futures::pin_mut!(cancel); + + /* Establish transit connection, close the Wormhole and switch to using the transit connection (msgpack instead of json) */ + let ((mut transit, info), mut wormhole, cancel) = cancel::with_cancel_wormhole!( + wormhole, + run = async { + make_transit( + &mut wormhole, + false, + relay_hints, + transit_abilities, + peer_abilities.transit_abilities, + ) + .await + }, + cancel, + ret_cancel = None, + ); + + let (offer, transit) = cancel::with_cancel_transit!( + transit, + run = async { + /* Close the wormhole only here so that the `.await` is scoped within cancellation */ + wormhole.close().await?; + + let offer = + match PeerMessageV2::de_msgpack(&transit.receive_record().await?)?.check_err()? { + PeerMessageV2::Offer(offer) => offer, + other => { + bail!(TransferError::unexpected_message("offer", other)) + }, + }; + + Ok(offer) + }, + cancel, + |err| PeerMessageV2::Error(err.to_string()).ser_msgpack(), + |msg| match PeerMessageV2::de_msgpack(msg)? { + PeerMessageV2::Error(err) => Ok(Some(err)), + _ => Ok(None), + }, + ret_cancel = None, + ); + + Ok(Some(ReceiveRequest::new(transit, offer, info))) +} + +/** + * A pending files send offer from the other side + * + * You *should* consume this object, either by calling [`accept`](ReceiveRequest::accept) or [`reject`](ReceiveRequest::reject). + */ +#[must_use] +pub struct ReceiveRequest { + transit: Transit, + offer: Arc, + info: transit::TransitInfo, +} + +impl ReceiveRequest { + pub fn new(transit: Transit, offer: Offer, info: transit::TransitInfo) -> Self { + Self { + transit, + offer: Arc::new(offer), + info, + } + } + + /** The offer we got */ + pub fn offer(&self) -> Arc { + self.offer.clone() + } + + /** + * Accept the file offer + * + * This will transfer the file and save it on disk. + */ + pub async fn accept( + self, + transit_handler: impl FnOnce(transit::TransitInfo), + answer: OfferAccept, + progress_handler: impl FnMut(u64, u64) + 'static, + cancel: impl Future, + ) -> Result<(), TransferError> { + transit_handler(self.info); + futures::pin_mut!(cancel); + + let mut transit = self.transit; + cancel::with_cancel_transit!( + transit, + run = async { + transit.send_record(&{ + /* This must be split into two statements to appease the borrow checker (unfortunate side effect of borrow-through) */ + let msg = PeerMessageV2::Answer(AnswerMessage { + files: answer.iter_files() + .map(|(path, inner, _size)| AnswerMessageInner { + file: path, + offset: inner.offset, + sha256: inner.sha256, + }) + .collect(), + }).ser_msgpack(); + msg + }).await?; + + receive_inner(&mut transit, &self.offer, answer, progress_handler).await + }, + cancel, + |err| PeerMessageV2::Error(err.to_string()).ser_msgpack(), + |msg| match PeerMessageV2::de_msgpack(msg)? { + PeerMessageV2::Error(err) => Ok(Some(err)), + _ => Ok(None), + }, + ret_cancel = (), + ); + Ok(()) + } + + /** + * Reject the file offer + * + * This will send an error message to the other side so that it knows the transfer failed. + */ + pub async fn reject(mut self) -> Result<(), TransferError> { + self.transit + .send_record(&PeerMessageV2::Error("transfer rejected".into()).ser_msgpack()) + .await?; + self.transit.flush().await?; + + Ok(()) + } +} + +/** We've established the transit connection and closed the Wormhole */ +async fn receive_inner( + transit: &mut transit::Transit, + offer: &Arc, + our_answer: OfferAccept, + mut progress_handler: impl FnMut(u64, u64) + 'static, +) -> Result<(), TransferError> { + /* This does not check for file sizes, but should be good enough + * (failures will eventually lead to protocol errors later on anyways) + */ + assert!( + our_answer + .iter_file_paths() + .all(|path| offer.get_file(&path).is_some()), + "Mismatch between offer and accept: accept must be a true subset of offer" + ); + let n_accepted = our_answer.iter_file_paths().count(); + let total_size = our_answer + .iter_files() + .map(|(_path, _inner, size)| size) + .sum::(); + let mut total_received = 0; + + /* The receive loop */ + for (i, (file, answer, size)) in our_answer.into_iter_files().enumerate() { + let file_start = match PeerMessageV2::de_msgpack(&transit.receive_record().await?)? + .check_err()? + { + PeerMessageV2::FileStart(file_start) => file_start, + PeerMessageV2::TransferAck(_) => { + bail!(TransferError::Protocol(format!("Unexpected message: got 'transfer-ack' but expected {} more 'file-start' messages", n_accepted - i).into_boxed_str())) + }, + other => { + bail!(TransferError::unexpected_message("file-start", other)) + }, + }; + ensure!( + file_start.file == file, + TransferError::Protocol( + format!( + "Unexpected file: got file {} but expected {}", + file_start.file.join("/"), + file.join("/"), + ) + .into_boxed_str() + ) + ); + + let mut content; + let mut received_size = 0; + if file_start.start_at_offset { + content = (answer.content)(true).await?; + let offset = answer.offset; + received_size = offset; + } else { + content = (answer.content)(false).await?; + } + + progress_handler(total_received, total_size); + loop { + let payload = + match PeerMessageV2::de_msgpack(&transit.receive_record().await?)?.check_err()? { + PeerMessageV2::Payload(payload) => payload.payload, + PeerMessageV2::FileEnd(_) => { + bail!(TransferError::Protocol( + format!( + "Unexpected message: got 'file-end' but expected {} more payload bytes", + size - received_size, + ) + .into_boxed_str() + )) + }, + other => { + bail!(TransferError::unexpected_message("payload", other)) + }, + }; + + content.write_all(&payload).await?; + received_size += payload.len() as u64; + total_received += payload.len() as u64; + progress_handler(total_received, total_size); + + if received_size == size { + break; + } else if received_size >= size { + /* `received_size` must never become greater than `size` or we might panic on an integer underflow in the next iteration + * (only on an unhappy path, but still). Also, the progress bar might not appreciate. + */ + bail!(TransferError::Protocol( + format!( + "File too large: expected only {size} bytes, got at least {} more", + size - received_size + ) + .into_boxed_str() + )) + } + } + + content.close().await?; + + let _end = match PeerMessageV2::de_msgpack(&transit.receive_record().await?)?.check_err()? { + PeerMessageV2::FileEnd(end) => end, + other => { + bail!(TransferError::unexpected_message("file-end", other)) + }, + }; + } + + let _transfer_ack = match PeerMessageV2::de_msgpack(&transit.receive_record().await?)? + .check_err()? + { + PeerMessageV2::TransferAck(transfer_ack) => transfer_ack, + PeerMessageV2::FileStart(_) => { + bail!(TransferError::Protocol( + format!("Unexpected message: got 'file-start' but did not expect any more files") + .into_boxed_str() + )) + }, + other => { + bail!(TransferError::unexpected_message("transfer-ack", other)) + }, + }; + + Ok(()) } diff --git a/src/transit.rs b/src/transit.rs index 76ac8b8b..c2ea7404 100644 --- a/src/transit.rs +++ b/src/transit.rs @@ -13,31 +13,37 @@ //! **Notice:** while the resulting TCP connection is naturally bi-directional, the handshake is not symmetric. There *must* be one //! "leader" side and one "follower" side (formerly called "sender" and "receiver"). -use crate::{Key, KeyPurpose}; +use crate::{util, Key, KeyPurpose}; +use derive_more::Display; use serde_derive::{Deserialize, Serialize}; -use async_std::{ - io::{prelude::WriteExt, ReadExt}, - net::{TcpListener, TcpStream}, -}; +#[cfg(not(target_family = "wasm"))] +use async_std::net::{TcpListener, TcpStream}; #[allow(unused_imports)] /* We need them for the docs */ -use futures::{future::TryFutureExt, Sink, SinkExt, Stream, StreamExt, TryStreamExt}; +use futures::{ + future::FutureExt, + future::TryFutureExt, + io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, + Sink, SinkExt, Stream, StreamExt, TryStreamExt, +}; use log::*; use std::{ collections::HashSet, - net::{IpAddr, SocketAddr, ToSocketAddrs}, + net::{IpAddr, SocketAddr}, sync::Arc, }; -use xsalsa20poly1305 as secretbox; -use xsalsa20poly1305::aead::{Aead, NewAead}; mod crypto; +mod transport; +use crypto::TransitHandshakeError; +use transport::{TransitTransport, TransitTransportRx, TransitTransportTx}; /// ULR to a default hosted relay server. Please don't abuse or DOS. pub const DEFAULT_RELAY_SERVER: &str = "tcp://transit.magic-wormhole.io:4001"; // No need to make public, it's hard-coded anyways (: // Open an issue if you want an API for this // Use for non-production testing +#[cfg(not(target_family = "wasm"))] const PUBLIC_STUN_SERVER: &str = "stun.piegames.de:3478"; #[derive(Debug)] @@ -64,6 +70,13 @@ pub enum TransitConnectError { #[source] std::io::Error, ), + #[cfg(target_family = "wasm")] + #[error("WASM error")] + WASM( + #[from] + #[source] + ws_stream_wasm::WsErr, + ), } #[derive(Debug, thiserror::Error)] @@ -79,6 +92,13 @@ pub enum TransitError { #[source] std::io::Error, ), + #[cfg(target_family = "wasm")] + #[error("WASM error")] + WASM( + #[from] + #[source] + ws_stream_wasm::WsErr, + ), } impl From<()> for TransitError { @@ -195,18 +215,6 @@ impl<'de> serde::Deserialize<'de> for Abilities { where D: serde::Deserializer<'de>, { - #[derive(Deserialize)] - #[serde(rename_all = "kebab-case", tag = "type")] - enum Ability { - DirectTcpV1, - RelayV1, - RelayV2, - #[cfg(all())] - NoiseCryptoV1, - #[serde(other)] - Other, - } - let mut abilities = Self::default(); /* Specifying a hint multiple times is undefined behavior. Here, we simply merge all features. */ for ability in as serde::Deserialize>::deserialize(de)? { @@ -240,7 +248,8 @@ enum HintSerde { } /** Information about how to find a peer */ -#[derive(Clone, Debug, Default)] +#[derive(Clone, Display, Debug, Default, PartialEq)] +#[display(fmt = "Hints(direct: {:?}, relay: {:?})", "&direct_tcp", "&relay")] pub struct Hints { /** Hints for direct connection */ pub direct_tcp: HashSet, @@ -532,6 +541,7 @@ impl<'de> serde::Deserialize<'de> for RelayHint { } } +use crate::core::Ability; use std::convert::{TryFrom, TryInto}; impl TryFrom<&DirectHint> for IpAddr { @@ -554,80 +564,33 @@ impl TryFrom<&DirectHint> for SocketAddr { } } +/// Direct or relay #[derive(Clone, Debug, Eq, PartialEq)] #[non_exhaustive] -pub enum TransitInfo { +pub enum ConnectionType { + /// We are directly connected to our peer Direct, + /// We are connected to a relay server, and may even know its name Relay { name: Option }, } -type TransitConnection = (TcpStream, TransitInfo); - -fn set_socket_opts(socket: &socket2::Socket) -> std::io::Result<()> { - socket.set_nonblocking(true)?; - - /* See https://stackoverflow.com/a/14388707/6094756. - * On most BSD and Linux systems, we need both REUSEADDR and REUSEPORT; - * and if they don't support the latter we won't compile. - * On Windows, there is only REUSEADDR but it does what we want. - */ - socket.set_reuse_address(true)?; - #[cfg(all(unix, not(any(target_os = "solaris", target_os = "illumos"))))] - { - socket.set_reuse_port(true)?; - } - #[cfg(not(any( - all(unix, not(any(target_os = "solaris", target_os = "illumos"))), - target_os = "windows" - )))] - { - compile_error!("Your system is not supported yet, please raise an error"); - } - - Ok(()) +/// Metadata for the established transit connection +#[derive(Clone, Debug, Eq, PartialEq)] +#[non_exhaustive] +pub struct TransitInfo { + /// Whether we are connected directly or via a relay server + pub conn_type: ConnectionType, + /// Target address of our connection. This may be our peer, or the relay server. + /// This says nothing about the actual transport protocol used. + #[cfg(not(target_family = "wasm"))] + pub peer_addr: SocketAddr, + // Prevent exhaustive destructuring for future proofing + _unused: (), } -/** - * Bind to a port with SO_REUSEADDR, connect to the destination and then hide the blood behind a pretty [`async_std::net::TcpStream`] - * - * We want an `async_std::net::TcpStream`, but with SO_REUSEADDR set. - * The former is just a wrapper around `async_io::Async`, of which we - * copy the `connect` method to add a statement that will set the socket flag. - * See https://github.com/smol-rs/async-net/issues/20. - */ -async fn connect_custom( - local_addr: &socket2::SockAddr, - dest_addr: &socket2::SockAddr, -) -> std::io::Result { - log::debug!("Binding to {}", local_addr.as_socket().unwrap()); - let socket = socket2::Socket::new(socket2::Domain::IPV6, socket2::Type::STREAM, None)?; - /* Set our custum options */ - set_socket_opts(&socket)?; - - socket.bind(local_addr)?; - - /* Initiate connect */ - match socket.connect(dest_addr) { - Ok(_) => {}, - #[cfg(unix)] - Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) => {}, - Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {}, - Err(err) => return Err(err), - } - - let stream = async_io::Async::new(std::net::TcpStream::from(socket))?; - /* The stream becomes writable when connected. */ - stream.writable().await?; - - /* Check if there was an error while connecting. */ - stream - .get_ref() - .take_error() - .and_then(|maybe_err| maybe_err.map_or(Ok(()), Result::Err))?; - /* Convert our mess to `async_std::net::TcpStream */ - Ok(stream.into_inner()?.into()) -} +type TransitConnection = (Box, TransitInfo); +#[cfg(not(target_family = "wasm"))] #[derive(Debug, thiserror::Error)] enum StunError { #[error("No IPv4 addresses were found for the selected STUN server")] @@ -650,90 +613,6 @@ enum StunError { ), } -/** Perform a STUN query to get the external IP address */ -async fn get_external_ip() -> Result<(SocketAddr, TcpStream), StunError> { - let mut socket = connect_custom( - &"[::]:0".parse::().unwrap().into(), - &PUBLIC_STUN_SERVER - .to_socket_addrs()? - /* If you find yourself behind a NAT66, open an issue */ - .find(|x| x.is_ipv4()) - /* TODO add a helper method to stdlib for this */ - .map(|addr| match addr { - SocketAddr::V4(v4) => { - SocketAddr::new(IpAddr::V6(v4.ip().to_ipv6_mapped()), v4.port()) - }, - SocketAddr::V6(_) => unreachable!(), - }) - .ok_or(StunError::ServerIsV6Only)? - .into(), - ) - .await?; - - use bytecodec::{DecodeExt, EncodeExt}; - use stun_codec::{ - rfc5389::{ - self, - attributes::{MappedAddress, Software, XorMappedAddress}, - Attribute, - }, - Message, MessageClass, MessageDecoder, MessageEncoder, TransactionId, - }; - - fn get_binding_request() -> Result, bytecodec::Error> { - use rand::Rng; - let random_bytes = rand::thread_rng().gen::<[u8; 12]>(); - - let mut message = Message::new( - MessageClass::Request, - rfc5389::methods::BINDING, - TransactionId::new(random_bytes), - ); - - message.add_attribute(Attribute::Software(Software::new( - "magic-wormhole-rust".to_owned(), - )?)); - - // Encodes the message - let mut encoder = MessageEncoder::new(); - let bytes = encoder.encode_into_bytes(message.clone())?; - Ok(bytes) - } - - fn decode_address(buf: &[u8]) -> Result, bytecodec::Error> { - let mut decoder = MessageDecoder::::new(); - let decoded = decoder.decode_from_bytes(buf)??; - - let external_addr1 = decoded - .get_attribute::() - .map(|x| x.address()); - //let external_addr2 = decoded.get_attribute::().map(|x|x.address()); - let external_addr3 = decoded - .get_attribute::() - .map(|x| x.address()); - let external_addr = external_addr1 - // .or(external_addr2) - .or(external_addr3); - - Ok(external_addr) - } - - /* Connect the plugs */ - - socket.write_all(get_binding_request()?.as_ref()).await?; - - let mut buf = [0u8; 256]; - /* Read header first */ - socket.read_exact(&mut buf[..20]).await?; - let len: u16 = u16::from_be_bytes([buf[2], buf[3]]); - /* Read the rest of the message */ - socket.read_exact(&mut buf[20..][..len as usize]).await?; - let external_addr = - decode_address(&buf[..20 + len as usize])?.ok_or(StunError::ServerNoResponse)?; - - Ok((external_addr, socket)) -} - /// Utility method that logs information of the transit result /// /// Example usage: @@ -741,31 +620,38 @@ async fn get_external_ip() -> Result<(SocketAddr, TcpStream), StunError> { /// ```no_run /// use magic_wormhole as mw; /// # #[async_std::main] async fn main() -> Result<(), mw::transit::TransitConnectError> { -/// # let derived_key = todo!(); -/// # let their_abilities = todo!(); -/// # let their_hints = todo!(); -/// let connector: mw::transit::TransitConnector = todo!("transit::init(…).await?"); -/// let (mut transit, info, addr) = connector +/// # let derived_key = unimplemented!(); +/// # let their_abilities = unimplemented!(); +/// # let their_hints = unimplemented!(); +/// let connector: mw::transit::TransitConnector = unimplemented!("transit::init(…).await?"); +/// let (mut transit, info) = connector /// .leader_connect(derived_key, their_abilities, their_hints) /// .await?; -/// mw::transit::log_transit_connection(info, addr); +/// mw::transit::log_transit_connection(info); /// # Ok(()) /// # } /// ``` -pub fn log_transit_connection(info: TransitInfo, peer_addr: SocketAddr) { - match info { - TransitInfo::Direct => { - log::info!("Established direct transit connection to '{}'", peer_addr,); +#[cfg(not(target_family = "wasm"))] +pub fn log_transit_connection(info: TransitInfo) { + match info.conn_type { + ConnectionType::Direct => { + log::info!( + "Established direct transit connection to '{}'", + info.peer_addr, + ); }, - TransitInfo::Relay { name: Some(name) } => { + ConnectionType::Relay { name: Some(name) } => { log::info!( "Established transit connection via relay '{}' ({})", name, - peer_addr, + info.peer_addr, ); }, - TransitInfo::Relay { name: None } => { - log::info!("Established transit connection via relay ({})", peer_addr,); + ConnectionType::Relay { name: None } => { + log::info!( + "Established transit connection via relay ({})", + info.peer_addr, + ); }, } } @@ -781,22 +667,24 @@ pub async fn init( relay_hints: Vec, ) -> Result { let mut our_hints = Hints::default(); - let mut listener = None; + #[cfg(not(target_family = "wasm"))] + let mut sockets = None; if let Some(peer_abilities) = peer_abilities { abilities = abilities.intersect(&peer_abilities); } /* Detect our IP addresses if the ability is enabled */ + #[cfg(not(target_family = "wasm"))] if abilities.can_direct() { let create_sockets = async { /* Do a STUN query to get our public IP. If it works, we must reuse the same socket (port) * so that we will be NATted to the same port again. If it doesn't, simply bind a new socket * and use that instead. */ - let socket: MaybeConnectedSocket = match async_std::future::timeout( + let socket: MaybeConnectedSocket = match util::timeout( std::time::Duration::from_secs(4), - get_external_ip(), + transport::tcp_get_external_ip(), ) .await .map_err(|_| StunError::Timeout) @@ -820,7 +708,7 @@ pub async fn init( log::warn!("Failed to get external address via STUN, {}", err); let socket = socket2::Socket::new(socket2::Domain::IPV6, socket2::Type::STREAM, None)?; - set_socket_opts(&socket)?; + transport::set_socket_opts(&socket)?; socket.bind(&"[::]:0".parse::().unwrap().into())?; log::debug!( @@ -838,11 +726,11 @@ pub async fn init( * the port. In theory, we could, but it really confused the kernel to the point * of `accept` calls never returning again. */ - let socket2 = TcpListener::bind("[::]:0").await?; + let listener = TcpListener::bind("[::]:0").await?; /* Find our ports, iterate all our local addresses, combine them with the ports and that's our hints */ let port = socket.local_addr()?.as_socket().unwrap().port(); - let port2 = socket2.local_addr()?.port(); + let port2 = listener.local_addr()?.port(); our_hints.direct_tcp.extend( if_addrs::get_if_addrs()? .iter() @@ -861,12 +749,12 @@ pub async fn init( .into_iter() }), ); - log::debug!("Our socket for listening is {}", socket2.local_addr()?); + log::debug!("Our socket for listening is {}", listener.local_addr()?); - Ok::<_, std::io::Error>((socket, socket2)) + Ok::<_, std::io::Error>((socket, listener)) }; - listener = create_sockets + sockets = create_sockets .await // TODO replace with inspect_err once stable .map_err(|err| { @@ -881,12 +769,15 @@ pub async fn init( } Ok(TransitConnector { - sockets: listener, + #[cfg(not(target_family = "wasm"))] + sockets, our_abilities: abilities, our_hints: Arc::new(our_hints), }) } +/// Bound socket, maybe also connected. Guaranteed to have SO_REUSEADDR. +#[cfg(not(target_family = "wasm"))] #[derive(derive_more::From)] enum MaybeConnectedSocket { #[from] @@ -895,6 +786,7 @@ enum MaybeConnectedSocket { Stream(TcpStream), } +#[cfg(not(target_family = "wasm"))] impl MaybeConnectedSocket { fn local_addr(&self) -> std::io::Result { match &self { @@ -916,6 +808,7 @@ pub struct TransitConnector { * The first socket is the port from which we will start connection attempts. * For in case the user is behind no firewalls, we must also listen to the second socket. */ + #[cfg(not(target_family = "wasm"))] sockets: Option<(MaybeConnectedSocket, TcpListener)>, our_abilities: Abilities, our_hints: Arc, @@ -931,6 +824,29 @@ impl TransitConnector { &self.our_hints } + /** + * Forwards to either [`leader_connect`] or [`follower_connect`]. + * + * It usually is better to call the respective functions directly by their name, as it makes + * them less easy to confuse (confusion may still happen though). Nevertheless, sometimes it + * is desirable to use the same code for both sides and only track the side with a boolean. + */ + pub async fn connect( + self, + is_leader: bool, + transit_key: Key, + their_abilities: Abilities, + their_hints: Arc, + ) -> Result<(Transit, TransitInfo), TransitConnectError> { + if is_leader { + self.leader_connect(transit_key, their_abilities, their_hints) + .await + } else { + self.follower_connect(transit_key, their_abilities, their_hints) + .await + } + } + /** * Connect to the other side, as sender. */ @@ -939,23 +855,25 @@ impl TransitConnector { transit_key: Key, their_abilities: Abilities, their_hints: Arc, - ) -> Result<(Transit, TransitInfo, SocketAddr), TransitConnectError> { + ) -> Result<(Transit, TransitInfo), TransitConnectError> { let Self { + #[cfg(not(target_family = "wasm"))] sockets, our_abilities, our_hints, } = self; let transit_key = Arc::new(transit_key); - let start = std::time::Instant::now(); + let start = instant::Instant::now(); let mut connection_stream = Box::pin( - Self::connect( + Self::connect_inner( true, transit_key, our_abilities, our_hints, their_abilities, their_hints, + #[cfg(not(target_family = "wasm"))] sockets, ) .filter_map(|result| async { @@ -969,18 +887,16 @@ impl TransitConnector { }), ); - let (mut transit, mut host_type) = async_std::future::timeout( - std::time::Duration::from_secs(60), - connection_stream.next(), - ) - .await - .map_err(|_| { - log::debug!("`leader_connect` timed out"); - TransitConnectError::Handshake - })? - .ok_or(TransitConnectError::Handshake)?; + let (mut transit, mut finalizer, mut conn_info) = + util::timeout(std::time::Duration::from_secs(60), connection_stream.next()) + .await + .map_err(|_| { + log::debug!("`leader_connect` timed out"); + TransitConnectError::Handshake + })? + .ok_or(TransitConnectError::Handshake)?; - if host_type != TransitInfo::Direct && our_abilities.can_direct() { + if conn_info.conn_type != ConnectionType::Direct && our_abilities.can_direct() { log::debug!( "Established transit connection over relay. Trying to find a direct connection …" ); @@ -994,12 +910,15 @@ impl TransitConnector { } else { elapsed.mul_f32(0.3) }; - let _ = async_std::future::timeout(to_wait, async { - while let Some((new_transit, new_host_type)) = connection_stream.next().await { + let _ = util::timeout(to_wait, async { + while let Some((new_transit, new_finalizer, new_conn_info)) = + connection_stream.next().await + { /* We already got a connection, so we're only interested in direct ones */ - if new_host_type == TransitInfo::Direct { + if new_conn_info.conn_type == ConnectionType::Direct { transit = new_transit; - host_type = new_host_type; + finalizer = new_finalizer; + conn_info = new_conn_info; log::debug!("Found direct connection; using that instead."); break; } @@ -1016,17 +935,22 @@ impl TransitConnector { */ std::mem::drop(connection_stream); - let (mut socket, finalizer) = transit; let (tx, rx) = finalizer - .handshake_finalize(&mut socket) + .handshake_finalize(&mut transit) .await .map_err(|e| { log::debug!("`handshake_finalize` failed: {e}"); TransitConnectError::Handshake })?; - let addr = socket.peer_addr().unwrap(); - Ok((Transit { socket, tx, rx }, host_type, addr)) + Ok(( + Transit { + socket: transit, + tx, + rx, + }, + conn_info, + )) } /** @@ -1037,8 +961,9 @@ impl TransitConnector { transit_key: Key, their_abilities: Abilities, their_hints: Arc, - ) -> Result<(Transit, TransitInfo, SocketAddr), TransitConnectError> { + ) -> Result<(Transit, TransitInfo), TransitConnectError> { let Self { + #[cfg(not(target_family = "wasm"))] sockets, our_abilities, our_hints, @@ -1046,13 +971,14 @@ impl TransitConnector { let transit_key = Arc::new(transit_key); let mut connection_stream = Box::pin( - Self::connect( + Self::connect_inner( false, transit_key, our_abilities, our_hints, their_abilities, their_hints, + #[cfg(not(target_family = "wasm"))] sockets, ) .filter_map(|result| async { @@ -1066,14 +992,13 @@ impl TransitConnector { }), ); - let transit = match async_std::future::timeout( + let transit = match util::timeout( std::time::Duration::from_secs(60), &mut connection_stream.next(), ) .await { - Ok(Some(((mut socket, finalizer), host_type))) => { - let addr = socket.peer_addr().unwrap(); + Ok(Some((mut socket, finalizer, conn_info))) => { let (tx, rx) = finalizer .handshake_finalize(&mut socket) .await @@ -1082,7 +1007,7 @@ impl TransitConnector { TransitConnectError::Handshake })?; - Ok((Transit { socket, tx, rx }, host_type, addr)) + Ok((Transit { socket, tx, rx }, conn_info)) }, Ok(None) | Err(_) => { log::debug!("`follower_connect` timed out"); @@ -1107,18 +1032,18 @@ impl TransitConnector { * If the receiving end of the channel for the results is closed before all futures in the return * value are cancelled/dropped. */ - fn connect( + fn connect_inner( is_leader: bool, transit_key: Arc>, our_abilities: Abilities, our_hints: Arc, their_abilities: Abilities, their_hints: Arc, - socket: Option<(MaybeConnectedSocket, TcpListener)>, - ) -> impl Stream> - + 'static { - /* Have socket => can direct */ - assert!(socket.is_none() || our_abilities.can_direct()); + #[cfg(not(target_family = "wasm"))] sockets: Option<(MaybeConnectedSocket, TcpListener)>, + ) -> impl Stream> + 'static { + /* Have Some(sockets) → Can direct */ + #[cfg(not(target_family = "wasm"))] + assert!(sockets.is_none() || our_abilities.can_direct()); let cryptor = if our_abilities.can_noise_crypto() && their_abilities.can_noise_crypto() { log::debug!("Using noise protocol for encryption"); @@ -1138,17 +1063,25 @@ impl TransitConnector { /* Iterator of futures yielding a connection. They'll be then mapped with the handshake, collected into * a Vec and polled concurrently. */ + #[cfg(not(target_family = "wasm"))] use futures::future::BoxFuture; + #[cfg(target_family = "wasm")] + use futures::future::LocalBoxFuture as BoxFuture; type BoxIterator = Box>; - type ConnectorFuture = - BoxFuture<'static, Result>; + type ConnectorFuture = BoxFuture<'static, Result>; let mut connectors: BoxIterator = Box::new(std::iter::empty()); - /* Create direct connection sockets, if we support it. If peer doesn't support it, their list of hints will - * be empty and no entries will be pushed. - */ - let socket2 = if let Some((socket, socket2)) = socket { - let local_addr = Arc::new(socket.local_addr().unwrap()); + #[cfg(not(target_family = "wasm"))] + let (socket, listener) = sockets.unzip(); + #[cfg(not(target_family = "wasm"))] + if our_abilities.can_direct() && their_abilities.can_direct() { + let local_addr = socket.map(|socket| { + Arc::new( + socket + .local_addr() + .expect("This is guaranteed to be an IP socket"), + ) + }); /* Connect to each hint of the peer */ connectors = Box::new( connectors.chain( @@ -1158,46 +1091,13 @@ impl TransitConnector { .into_iter() /* Nobody should have that many IP addresses, even with NATing */ .take(50) - .map(move |hint| { - let local_addr = local_addr.clone(); - async move { - let dest_addr = SocketAddr::try_from(&hint)?; - log::debug!("Connecting directly to {}", dest_addr); - let socket = connect_custom(&local_addr, &dest_addr.into()).await?; - log::debug!("Connected to {}!", dest_addr); - Ok((socket, TransitInfo::Direct)) - } - }) - .map(|fut| Box::pin(fut) as ConnectorFuture), - ), - ) as BoxIterator; - Some(socket2) - } else if our_abilities.can_direct() { - /* Fallback: We did not manage to bind a listener but we can still connect to the peer's hints */ - connectors = Box::new( - connectors.chain( - their_hints - .direct_tcp - .clone() - .into_iter() - /* Nobody should have that many IP addresses, even with NATing */ - .take(50) - .map(move |hint| async move { - let dest_addr = SocketAddr::try_from(&hint)?; - log::debug!("Connecting directly to {}", dest_addr); - let socket = async_std::net::TcpStream::connect(&dest_addr).await?; - log::debug!("Connected to {}!", dest_addr); - Ok((socket, TransitInfo::Direct)) - }) + .map(move |hint| transport::connect_tcp_direct(local_addr.clone(), hint)) .map(|fut| Box::pin(fut) as ConnectorFuture), ), ) as BoxIterator; - None - } else { - None - }; + } - /* Relay hints. Make sure that both sides adverize it, since it is fine to support it without providing own hints. */ + /* Relay hints. Make sure that both sides advertise it, since it is fine to support it without providing own hints. */ if our_abilities.can_relay() && their_abilities.can_relay() { /* Collect intermediate into HashSet for deduplication */ let mut relay_hints = Vec::::new(); @@ -1206,26 +1106,14 @@ impl TransitConnector { hint.merge_into(&mut relay_hints); } - /* Take a relay hint and try to connect to it */ - async fn hint_connector( - host: DirectHint, - name: Option, - ) -> Result { - log::debug!("Connecting to relay {}", host); - let transit = TcpStream::connect((host.hostname.as_str(), host.port)) - .err_into::() - .await?; - log::debug!("Connected to {}!", host); - - Ok((transit, TransitInfo::Relay { name })) - } - - connectors = Box::new( - connectors.chain( + #[cfg(not(target_family = "wasm"))] + { + connectors = Box::new( + connectors.chain( relay_hints .into_iter() /* A hint may have multiple addresses pointing towards the server. This may be multiple - * domain aliases or different ports or an IPv6 or IPv4 address. We only need + * domain aliases or different ports or an IPv6 or IPv4 address. We only need * to connect to one of them, since they are considered equivalent. However, we * also want to be prepared for the rare case of one failing, thus we try to reach * up to three different addresses. To not flood the system with requests, we @@ -1235,37 +1123,83 @@ impl TransitConnector { .flat_map(|hint| { /* If the hint has no name, take the first domain name as fallback */ let name = hint.name - .or_else(|| { - /* Try to parse as IP address. We are only interested in human readable names (the IP address will be printed anyways) */ - hint.tcp.iter() + .or_else(|| { + /* Try to parse as IP address. We are only interested in human readable names (the IP address will be printed anyways) */ + hint.tcp.iter() .filter_map(|hint| match url::Host::parse(&hint.hostname) { Ok(url::Host::Domain(_)) => Some(hint.hostname.clone()), _ => None, }) .next() - }); + }); hint.tcp .into_iter() .take(3) .enumerate() .map(move |(i, h)| (i, h, name.clone())) - }) - .map(|(index, host, name)| async move { - async_std::task::sleep(std::time::Duration::from_secs( - index as u64 * 5, - )) - .await; - hint_connector(host, name).await - }) - .map(|fut| Box::pin(fut) as ConnectorFuture), - ), - ) as BoxIterator; + }) + .map(|(index, host, name)| async move { + util::sleep(std::time::Duration::from_secs( + index as u64 * 5, + )) + .await; + transport::connect_tcp_relay(host, name).await + }) + .map(|fut| Box::pin(fut) as ConnectorFuture), + ), + ) as BoxIterator; + } + + #[cfg(target_family = "wasm")] + { + connectors = Box::new( + connectors.chain( + relay_hints + .into_iter() + /* A hint may have multiple addresses pointing towards the server. This may be multiple + * domain aliases or different ports or an IPv6 or IPv4 address. We only need + * to connect to one of them, since they are considered equivalent. However, we + * also want to be prepared for the rare case of one failing, thus we try to reach + * up to three different addresses. To not flood the system with requests, we + * start them in a 5 seconds interval spread. If one of them succeeds, the remaining ones + * will be cancelled anyways. Note that a hint might not necessarily be reachable via TCP. + */ + .flat_map(|hint| { + /* If the hint has no name, take the first domain name as fallback */ + let name = hint.name + .or_else(|| { + /* Try to parse as IP address. We are only interested in human readable names (the IP address will be printed anyways) */ + hint.tcp.iter() + .filter_map(|hint| match url::Host::parse(&hint.hostname) { + Ok(url::Host::Domain(_)) => Some(hint.hostname.clone()), + _ => None, + }) + .next() + }); + hint.ws + .into_iter() + .take(3) + .enumerate() + .map(move |(i, u)| (i, u, name.clone())) + }) + .map(|(index, url, name)| async move { + util::sleep(std::time::Duration::from_secs( + index as u64 * 5, + )) + .await; + transport::connect_ws_relay(url, name).await + }) + .map(|fut| Box::pin(fut) as ConnectorFuture), + ), + ) as BoxIterator; + } } /* Do a handshake on all our found connections */ let transit_key2 = transit_key.clone(); let tside2 = tside.clone(); let cryptor2 = cryptor.clone(); + #[allow(unused_mut)] // For WASM targets let mut connectors = Box::new( connectors .map(move |fut| { @@ -1273,32 +1207,28 @@ impl TransitConnector { let tside = tside2.clone(); let cryptor = cryptor2.clone(); async move { - let (socket, host_type) = fut.await?; - let transit = handshake_exchange( + let (socket, conn_info) = fut.await?; + let (transit, finalizer) = handshake_exchange( is_leader, tside, socket, - &host_type, + &conn_info.conn_type, &*cryptor, transit_key, ) .await?; - Ok((transit, host_type)) + Ok((transit, finalizer, conn_info)) } }) .map(|fut| { - Box::pin(fut) - as BoxFuture< - Result<(HandshakeResult, TransitInfo), crypto::TransitHandshakeError>, - > + Box::pin(fut) as BoxFuture> }), ) - as BoxIterator< - BoxFuture>, - >; + as BoxIterator>>; /* Also listen on some port just in case. */ - if let Some(socket2) = socket2 { + #[cfg(not(target_family = "wasm"))] + if let Some(listener) = listener { connectors = Box::new( connectors.chain( std::iter::once(async move { @@ -1306,21 +1236,20 @@ impl TransitConnector { let tside = tside.clone(); let cryptor = cryptor.clone(); let connect = || async { - let (stream, peer) = socket2.accept().await?; + let (socket, peer) = listener.accept().await?; + let (socket, info) = + transport::wrap_tcp_connection(socket, ConnectionType::Direct)?; log::debug!("Got connection from {}!", peer); - let transit = handshake_exchange( + let (transit, finalizer) = handshake_exchange( is_leader, tside.clone(), - stream, - &TransitInfo::Direct, + socket, + &ConnectionType::Direct, &*cryptor, transit_key.clone(), ) .await?; - Result::<_, crypto::TransitHandshakeError>::Ok(( - transit, - TransitInfo::Direct, - )) + Result::<_, TransitHandshakeError>::Ok((transit, finalizer, info)) }; loop { match connect().await { @@ -1336,21 +1265,11 @@ impl TransitConnector { } }) .map(|fut| { - Box::pin(fut) - as BoxFuture< - Result< - (HandshakeResult, TransitInfo), - crypto::TransitHandshakeError, - >, - > + Box::pin(fut) as BoxFuture> }), ), ) - as BoxIterator< - BoxFuture< - Result<(HandshakeResult, TransitInfo), crypto::TransitHandshakeError>, - >, - >; + as BoxIterator>>; } connectors.collect::>() } @@ -1364,7 +1283,7 @@ impl TransitConnector { */ pub struct Transit { /** Raw transit connection */ - socket: TcpStream, + socket: Box, tx: Box, rx: Box, } @@ -1387,14 +1306,13 @@ impl Transit { } /** Convert the transit connection to a [`Stream`]/[`Sink`] pair */ + #[cfg(not(target_family = "wasm"))] pub fn split( self, ) -> ( impl futures::sink::Sink, Error = TransitError>, impl futures::stream::Stream, TransitError>>, ) { - use futures::io::AsyncReadExt; - let (reader, writer) = self.socket.split(); ( futures::sink::unfold( @@ -1414,7 +1332,11 @@ impl Transit { } } -type HandshakeResult = (TcpStream, Box); +type HandshakeResult = ( + Box, + Box, + TransitInfo, +); /** * Do a transit handshake exchange, to establish a direct connection. @@ -1428,20 +1350,18 @@ type HandshakeResult = (TcpStream, Box); async fn handshake_exchange( is_leader: bool, tside: Arc, - socket: TcpStream, - host_type: &TransitInfo, + mut socket: Box, + host_type: &ConnectionType, cryptor: &dyn crypto::TransitCryptoInit, key: Arc>, -) -> Result { - /* Set proper read and write timeouts. This will temporarily set the socket into blocking mode :/ */ - // https://github.com/async-rs/async-std/issues/499 - let socket = std::net::TcpStream::try_from(socket) - .expect("Internal error: this should not fail because we never cloned the socket"); - socket.set_write_timeout(Some(std::time::Duration::from_secs(120)))?; - socket.set_read_timeout(Some(std::time::Duration::from_secs(120)))?; - let mut socket: TcpStream = socket.into(); - - if host_type != &TransitInfo::Direct { +) -> Result< + ( + Box, + Box, + ), + TransitHandshakeError, +> { + if host_type != &ConnectionType::Direct { log::trace!("initiating relay handshake"); let sub_key = key.derive_subkey_from_purpose::("transit_relay_token"); @@ -1451,10 +1371,7 @@ async fn handshake_exchange( let mut rx = [0u8; 3]; socket.read_exact(&mut rx).await?; let ok_msg: [u8; 3] = *b"ok\n"; - ensure!( - ok_msg == rx, - crypto::TransitHandshakeError::RelayHandshakeFailed - ); + ensure!(ok_msg == rx, TransitHandshakeError::RelayHandshakeFailed); } let finalizer = if is_leader { diff --git a/src/transit/crypto.rs b/src/transit/crypto.rs index 0bf2975a..475316a5 100644 --- a/src/transit/crypto.rs +++ b/src/transit/crypto.rs @@ -1,12 +1,18 @@ -/// Cryptographic backbone of the Transit protocol -/// -/// This handles the encrypted handshakes during connection setup, then provides -/// a simple "encrypt/decrypt" abstraction that will be used for all messages. -use super::*; +//! Cryptographic backbone of the Transit protocol +//! +//! This handles the encrypted handshakes during connection setup, then provides +//! a simple "encrypt/decrypt" abstraction that will be used for all messages. + +use super::{ + TransitError, TransitKey, TransitRxKey, TransitTransport, TransitTransportRx, + TransitTransportTx, TransitTxKey, +}; use crate::Key; use async_trait::async_trait; -use futures::future::BoxFuture; +use futures::{future::BoxFuture, io::AsyncWriteExt}; use std::sync::Arc; +use xsalsa20poly1305 as secretbox; +use xsalsa20poly1305::aead::{Aead, NewAead}; /// Private, because we try multiple handshakes and only /// one needs to succeed @@ -37,6 +43,13 @@ pub(super) enum TransitHandshakeError { #[source] std::io::Error, ), + #[cfg(target_family = "wasm")] + #[error("WASM error")] + WASM( + #[from] + #[source] + ws_stream_wasm::WsErr, + ), } impl From<()> for TransitHandshakeError { @@ -45,57 +58,12 @@ impl From<()> for TransitHandshakeError { } } -/// Helper method for handshake: read a fixed number of bytes and make sure they are as expected -async fn read_expect( - socket: &mut (dyn futures::io::AsyncRead + Unpin + Send), - expected: &[u8], -) -> Result<(), TransitHandshakeError> { - let mut buffer = vec![0u8; expected.len()]; - socket.read_exact(&mut buffer).await?; - ensure!(buffer == expected, TransitHandshakeError::HandshakeFailed); - Ok(()) -} - -/// Helper method: read a four bytes length prefix then the appropriate number of bytes -async fn read_transit_message( - socket: &mut (dyn futures::io::AsyncRead + Unpin + Send), -) -> Result, std::io::Error> { - // 1. read 4 bytes from the stream. This represents the length of the encrypted packet. - let length = { - let mut length_arr: [u8; 4] = [0; 4]; - socket.read_exact(&mut length_arr[..]).await?; - u32::from_be_bytes(length_arr) as usize - }; - - // 2. read that many bytes into an array (or a vector?) - let mut buffer = Vec::with_capacity(length); - let len = socket.take(length as u64).read_to_end(&mut buffer).await?; - use std::io::{Error, ErrorKind}; - ensure!( - len == length, - Error::new(ErrorKind::UnexpectedEof, "failed to read whole message") - ); - Ok(buffer) -} - -/// Helper method: write the message length then the message -async fn write_transit_message( - socket: &mut (dyn futures::io::AsyncWrite + Unpin + Send), - message: &[u8], -) -> Result<(), std::io::Error> { - // send the encrypted record - socket - .write_all(&(message.len() as u32).to_be_bytes()) - .await?; - socket.write_all(message).await -} - /// The Transit protocol has the property that the last message of the handshake is from the leader /// and confirms the usage of that specific connection. This trait represents that specific type state. pub(super) trait TransitCryptoInitFinalizer: Send { fn handshake_finalize( self: Box, - socket: &mut TcpStream, + socket: &mut dyn TransitTransport, ) -> BoxFuture>; } @@ -104,7 +72,7 @@ pub(super) trait TransitCryptoInitFinalizer: Send { impl TransitCryptoInitFinalizer for DynTransitCrypto { fn handshake_finalize( self: Box, - _socket: &mut TcpStream, + _socket: &mut dyn TransitTransport, ) -> BoxFuture> { Box::pin(futures::future::ready(Ok(*self))) } @@ -116,11 +84,11 @@ pub(super) trait TransitCryptoInit: Send + Sync { // Yes, this method returns a nested future. TODO explain async fn handshake_leader( &self, - socket: &mut TcpStream, + socket: &mut dyn TransitTransport, ) -> Result, TransitHandshakeError>; async fn handshake_follower( &self, - socket: &mut TcpStream, + socket: &mut dyn TransitTransport, ) -> Result, TransitHandshakeError>; } @@ -140,7 +108,7 @@ pub struct SecretboxInit { impl TransitCryptoInit for SecretboxInit { async fn handshake_leader( &self, - socket: &mut TcpStream, + socket: &mut dyn TransitTransport, ) -> Result, TransitHandshakeError> { // 9. create record keys let rkey = self @@ -171,7 +139,7 @@ impl TransitCryptoInit for SecretboxInit { .to_hex() ); assert_eq!(expected_rx_handshake.len(), 89); - read_expect(socket, expected_rx_handshake.as_bytes()).await?; + socket.read_expect(expected_rx_handshake.as_bytes()).await?; struct Finalizer { skey: Key, @@ -181,7 +149,7 @@ impl TransitCryptoInit for SecretboxInit { impl TransitCryptoInitFinalizer for Finalizer { fn handshake_finalize( self: Box, - socket: &mut TcpStream, + socket: &mut dyn TransitTransport, ) -> BoxFuture> { Box::pin(async move { socket.write_all(b"go\n").await?; @@ -205,7 +173,7 @@ impl TransitCryptoInit for SecretboxInit { async fn handshake_follower( &self, - socket: &mut TcpStream, + socket: &mut dyn TransitTransport, ) -> Result, TransitHandshakeError> { // 9. create record keys /* The order here is correct. The "sender" and "receiver" side are a misnomer and should be called @@ -240,7 +208,7 @@ impl TransitCryptoInit for SecretboxInit { .to_hex(), ); assert_eq!(expected_tx_handshake.len(), 90); - read_expect(socket, expected_tx_handshake.as_bytes()).await?; + socket.read_expect(expected_tx_handshake.as_bytes()).await?; Ok(Box::new(( Box::new(SecretboxCryptoEncrypt { @@ -279,12 +247,14 @@ pub struct NoiseInit { impl TransitCryptoInit for NoiseInit { async fn handshake_leader( &self, - socket: &mut TcpStream, + socket: &mut dyn TransitTransport, ) -> Result, TransitHandshakeError> { socket .write_all(b"Magic-Wormhole Dilation Handshake v1 Leader\n\n") .await?; - read_expect(socket, b"Magic-Wormhole Dilation Handshake v1 Follower\n\n").await?; + socket + .read_expect(b"Magic-Wormhole Dilation Handshake v1 Follower\n\n") + .await?; let mut handshake: NoiseHandshakeState = { let mut builder = noise_protocol::HandshakeStateBuilder::new(); @@ -296,16 +266,18 @@ impl TransitCryptoInit for NoiseInit { handshake.push_psk(&self.key); // → psk, e - write_transit_message(socket, &handshake.write_message_vec(&[])?).await?; + socket + .write_transit_message(&handshake.write_message_vec(&[])?) + .await?; // ← e, ee - handshake.read_message(&read_transit_message(socket).await?, &mut [])?; + handshake.read_message(&socket.read_transit_message().await?, &mut [])?; assert!(handshake.completed()); let (tx, mut rx) = handshake.get_ciphers(); // ← "" - let peer_confirmation_message = rx.decrypt_vec(&read_transit_message(socket).await?)?; + let peer_confirmation_message = rx.decrypt_vec(&socket.read_transit_message().await?)?; ensure!( peer_confirmation_message.is_empty(), TransitHandshakeError::HandshakeFailed @@ -319,11 +291,13 @@ impl TransitCryptoInit for NoiseInit { impl TransitCryptoInitFinalizer for Finalizer { fn handshake_finalize( mut self: Box, - socket: &mut TcpStream, + socket: &mut dyn TransitTransport, ) -> BoxFuture> { Box::pin(async move { // → "" - write_transit_message(socket, &self.tx.encrypt_vec(&[])).await?; + socket + .write_transit_message(&self.tx.encrypt_vec(&[])) + .await?; Ok::<_, TransitHandshakeError>(( Box::new(NoiseCryptoEncrypt { tx: self.tx }) @@ -340,12 +314,14 @@ impl TransitCryptoInit for NoiseInit { async fn handshake_follower( &self, - socket: &mut TcpStream, + socket: &mut dyn TransitTransport, ) -> Result, TransitHandshakeError> { socket .write_all(b"Magic-Wormhole Dilation Handshake v1 Follower\n\n") .await?; - read_expect(socket, b"Magic-Wormhole Dilation Handshake v1 Leader\n\n").await?; + socket + .read_expect(b"Magic-Wormhole Dilation Handshake v1 Leader\n\n") + .await?; let mut handshake: NoiseHandshakeState = { let mut builder = noise_protocol::HandshakeStateBuilder::new(); @@ -357,20 +333,22 @@ impl TransitCryptoInit for NoiseInit { handshake.push_psk(&self.key); // ← psk, e - handshake.read_message(&read_transit_message(socket).await?, &mut [])?; + handshake.read_message(&socket.read_transit_message().await?, &mut [])?; // → e, ee - write_transit_message(socket, &handshake.write_message_vec(&[])?).await?; + socket + .write_transit_message(&handshake.write_message_vec(&[])?) + .await?; assert!(handshake.completed()); // Warning: rx and tx are swapped here (read the `get_ciphers` doc carefully) let (mut rx, mut tx) = handshake.get_ciphers(); // → "" - write_transit_message(socket, &tx.encrypt_vec(&[])).await?; + socket.write_transit_message(&tx.encrypt_vec(&[])).await?; // ← "" - let peer_confirmation_message = rx.decrypt_vec(&read_transit_message(socket).await?)?; + let peer_confirmation_message = rx.decrypt_vec(&socket.read_transit_message().await?)?; ensure!( peer_confirmation_message.is_empty(), TransitHandshakeError::HandshakeFailed @@ -386,19 +364,19 @@ impl TransitCryptoInit for NoiseInit { type DynTransitCrypto = (Box, Box); #[async_trait] -pub trait TransitCryptoEncrypt: Send { +pub(super) trait TransitCryptoEncrypt: Send { async fn encrypt( &mut self, - socket: &mut (dyn futures::io::AsyncWrite + Unpin + Send), + socket: &mut dyn TransitTransportTx, plaintext: &[u8], ) -> Result<(), TransitError>; } #[async_trait] -pub trait TransitCryptoDecrypt: Send { +pub(super) trait TransitCryptoDecrypt: Send { async fn decrypt( &mut self, - socket: &mut (dyn futures::io::AsyncRead + Unpin + Send), + socket: &mut dyn TransitTransportRx, ) -> Result, TransitError>; } @@ -424,7 +402,7 @@ struct SecretboxCryptoDecrypt { impl TransitCryptoEncrypt for SecretboxCryptoEncrypt { async fn encrypt( &mut self, - socket: &mut (dyn futures::io::AsyncWrite + Unpin + Send), + socket: &mut dyn TransitTransportTx, plaintext: &[u8], ) -> Result<(), TransitError> { let nonce = &mut self.snonce; @@ -457,11 +435,11 @@ impl TransitCryptoEncrypt for SecretboxCryptoEncrypt { impl TransitCryptoDecrypt for SecretboxCryptoDecrypt { async fn decrypt( &mut self, - socket: &mut (dyn futures::io::AsyncRead + Unpin + Send), + socket: &mut dyn TransitTransportRx, ) -> Result, TransitError> { let nonce = &mut self.rnonce; - let enc_packet = read_transit_message(socket).await?; + let enc_packet = socket.read_transit_message().await?; use std::io::{Error, ErrorKind}; ensure!( @@ -508,10 +486,12 @@ struct NoiseCryptoDecrypt { impl TransitCryptoEncrypt for NoiseCryptoEncrypt { async fn encrypt( &mut self, - socket: &mut (dyn futures::io::AsyncWrite + Unpin + Send), + socket: &mut dyn TransitTransportTx, plaintext: &[u8], ) -> Result<(), TransitError> { - write_transit_message(socket, &self.tx.encrypt_vec(plaintext)).await?; + socket + .write_transit_message(&self.tx.encrypt_vec(plaintext)) + .await?; Ok(()) } } @@ -520,9 +500,9 @@ impl TransitCryptoEncrypt for NoiseCryptoEncrypt { impl TransitCryptoDecrypt for NoiseCryptoDecrypt { async fn decrypt( &mut self, - socket: &mut (dyn futures::io::AsyncRead + Unpin + Send), + socket: &mut dyn TransitTransportRx, ) -> Result, TransitError> { - let plaintext = self.rx.decrypt_vec(&read_transit_message(socket).await?)?; + let plaintext = self.rx.decrypt_vec(&socket.read_transit_message().await?)?; Ok(plaintext.into_boxed_slice()) } } diff --git a/src/transit/transport.rs b/src/transit/transport.rs new file mode 100644 index 00000000..1e3839ec --- /dev/null +++ b/src/transit/transport.rs @@ -0,0 +1,304 @@ +//! Helper functions abstracting away different transport protocols for Transit + +use super::{ConnectionType, TransitConnection, TransitHandshakeError, TransitInfo}; +#[cfg(not(target_family = "wasm"))] +use super::{DirectHint, StunError}; + +#[cfg(not(target_family = "wasm"))] +use async_std::net::TcpStream; +use async_trait::async_trait; +use futures::{ + future::TryFutureExt, + io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, +}; +#[cfg(not(target_family = "wasm"))] +use std::{ + net::{IpAddr, SocketAddr, ToSocketAddrs}, + sync::Arc, +}; + +#[async_trait] +pub(super) trait TransitTransportRx: AsyncRead + std::any::Any + Unpin + Send { + /// Helper method for handshake: read a fixed number of bytes and make sure they are as expected + async fn read_expect(&mut self, expected: &[u8]) -> Result<(), TransitHandshakeError> { + let mut buffer = vec![0u8; expected.len()]; + self.read_exact(&mut buffer).await?; + ensure!(buffer == expected, TransitHandshakeError::HandshakeFailed); + Ok(()) + } + + /// Helper method: read a four bytes length prefix then the appropriate number of bytes + async fn read_transit_message(&mut self) -> Result, std::io::Error> { + // 1. read 4 bytes from the stream. This represents the length of the encrypted packet. + let length = { + let mut length_arr: [u8; 4] = [0; 4]; + self.read_exact(&mut length_arr[..]).await?; + u32::from_be_bytes(length_arr) as usize + }; + + // 2. read that many bytes into an array (or a vector?) + let mut buffer = Vec::with_capacity(length); + let len = self.take(length as u64).read_to_end(&mut buffer).await?; + use std::io::{Error, ErrorKind}; + ensure!( + len == length, + Error::new(ErrorKind::UnexpectedEof, "failed to read whole message") + ); + Ok(buffer) + } +} + +#[async_trait] +pub(super) trait TransitTransportTx: AsyncWrite + std::any::Any + Unpin + Send { + /// Helper method: write the message length then the message + async fn write_transit_message(&mut self, message: &[u8]) -> Result<(), std::io::Error> { + // send the encrypted record + self.write_all(&(message.len() as u32).to_be_bytes()) + .await?; + self.write_all(message).await + } +} + +/// Trait abstracting our socket used for communicating over the wire. +/// +/// Will be primarily instantiated by either a TCP or web socket. Custom methods +/// will be added in the future. +pub(super) trait TransitTransport: TransitTransportRx + TransitTransportTx {} + +impl TransitTransportRx for T where T: AsyncRead + std::any::Any + Unpin + Send {} +impl TransitTransportTx for T where T: AsyncWrite + std::any::Any + Unpin + Send {} +impl TransitTransport for T where T: AsyncRead + AsyncWrite + std::any::Any + Unpin + Send {} + +#[cfg(not(target_family = "wasm"))] +pub(super) fn set_socket_opts(socket: &socket2::Socket) -> std::io::Result<()> { + socket.set_nonblocking(true)?; + + /* See https://stackoverflow.com/a/14388707/6094756. + * On most BSD and Linux systems, we need both REUSEADDR and REUSEPORT; + * and if they don't support the latter we won't compile. + * On Windows, there is only REUSEADDR but it does what we want. + */ + socket.set_reuse_address(true)?; + #[cfg(all(unix, not(any(target_os = "solaris", target_os = "illumos"))))] + { + socket.set_reuse_port(true)?; + } + #[cfg(not(any( + all(unix, not(any(target_os = "solaris", target_os = "illumos"))), + target_os = "windows" + )))] + { + compile_error!("Your system is not supported yet, please raise an error"); + } + + Ok(()) +} + +/** Perform a STUN query to get the external IP address */ +#[cfg(not(target_family = "wasm"))] +pub(super) async fn tcp_get_external_ip() -> Result<(SocketAddr, TcpStream), StunError> { + let mut socket = tcp_connect_custom( + &"[::]:0".parse::().unwrap().into(), + &super::PUBLIC_STUN_SERVER + .to_socket_addrs()? + /* If you find yourself behind a NAT66, open an issue */ + .find(|x| x.is_ipv4()) + /* TODO add a helper method to stdlib for this */ + .map(|addr| match addr { + SocketAddr::V4(v4) => { + SocketAddr::new(IpAddr::V6(v4.ip().to_ipv6_mapped()), v4.port()) + }, + SocketAddr::V6(_) => unreachable!(), + }) + .ok_or(StunError::ServerIsV6Only)? + .into(), + ) + .await?; + + use bytecodec::{DecodeExt, EncodeExt}; + use stun_codec::{ + rfc5389::{ + self, + attributes::{MappedAddress, Software, XorMappedAddress}, + Attribute, + }, + Message, MessageClass, MessageDecoder, MessageEncoder, TransactionId, + }; + + fn get_binding_request() -> Result, bytecodec::Error> { + use rand::Rng; + let random_bytes = rand::thread_rng().gen::<[u8; 12]>(); + + let mut message = Message::new( + MessageClass::Request, + rfc5389::methods::BINDING, + TransactionId::new(random_bytes), + ); + + message.add_attribute(Attribute::Software(Software::new( + "magic-wormhole-rust".to_owned(), + )?)); + + // Encodes the message + let mut encoder = MessageEncoder::new(); + let bytes = encoder.encode_into_bytes(message.clone())?; + Ok(bytes) + } + + fn decode_address(buf: &[u8]) -> Result, bytecodec::Error> { + let mut decoder = MessageDecoder::::new(); + let decoded = decoder.decode_from_bytes(buf)??; + + let external_addr1 = decoded + .get_attribute::() + .map(|x| x.address()); + //let external_addr2 = decoded.get_attribute::().map(|x|x.address()); + let external_addr3 = decoded + .get_attribute::() + .map(|x| x.address()); + let external_addr = external_addr1 + // .or(external_addr2) + .or(external_addr3); + + Ok(external_addr) + } + + /* Connect the plugs */ + + socket.write_all(get_binding_request()?.as_ref()).await?; + + let mut buf = [0u8; 256]; + /* Read header first */ + socket.read_exact(&mut buf[..20]).await?; + let len: u16 = u16::from_be_bytes([buf[2], buf[3]]); + /* Read the rest of the message */ + socket.read_exact(&mut buf[20..][..len as usize]).await?; + let external_addr = + decode_address(&buf[..20 + len as usize])?.ok_or(StunError::ServerNoResponse)?; + + Ok((external_addr, socket)) +} + +/** + * Bind to a port with SO_REUSEADDR, connect to the destination and then hide the blood behind a pretty [`async_std::net::TcpStream`] + * + * We want an `async_std::net::TcpStream`, but with SO_REUSEADDR set. + * The former is just a wrapper around `async_io::Async`, of which we + * copy the `connect` method to add a statement that will set the socket flag. + * See https://github.com/smol-rs/async-net/issues/20. + */ +#[cfg(not(target_family = "wasm"))] +async fn tcp_connect_custom( + local_addr: &socket2::SockAddr, + dest_addr: &socket2::SockAddr, +) -> std::io::Result { + log::debug!("Binding to {}", local_addr.as_socket().unwrap()); + let socket = socket2::Socket::new(socket2::Domain::IPV6, socket2::Type::STREAM, None)?; + /* Set our custum options */ + set_socket_opts(&socket)?; + + socket.bind(local_addr)?; + + /* Initiate connect */ + match socket.connect(dest_addr) { + Ok(_) => {}, + #[cfg(unix)] + Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) => {}, + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {}, + Err(err) => return Err(err), + } + + let stream = async_io::Async::new(std::net::TcpStream::from(socket))?; + /* The stream becomes writable when connected. */ + stream.writable().await?; + + /* Check if there was an error while connecting. */ + stream + .get_ref() + .take_error() + .and_then(|maybe_err| maybe_err.map_or(Ok(()), Result::Err))?; + /* Convert our mess to `async_std::net::TcpStream */ + Ok(stream.into_inner()?.into()) +} + +#[cfg(not(target_family = "wasm"))] +pub(super) async fn connect_tcp_direct( + local_addr: Option>, + hint: DirectHint, +) -> Result { + let dest_addr = SocketAddr::try_from(&hint)?; + log::debug!("Connecting directly to {}", dest_addr); + let socket; + + if let Some(local_addr) = local_addr { + socket = tcp_connect_custom(&local_addr, &dest_addr.into()).await?; + log::debug!("Connected to {}!", dest_addr); + } else { + socket = async_std::net::TcpStream::connect(&dest_addr).await?; + log::debug!("Connected to {}!", dest_addr); + } + + wrap_tcp_connection(socket, ConnectionType::Direct) +} + +/* Take a relay hint and try to connect to it */ +#[cfg(not(target_family = "wasm"))] +pub(super) async fn connect_tcp_relay( + host: DirectHint, + name: Option, +) -> Result { + log::debug!("Connecting to relay {}", host); + let socket = TcpStream::connect((host.hostname.as_str(), host.port)) + .err_into::() + .await?; + log::debug!("Connected to {}!", host); + + wrap_tcp_connection(socket, ConnectionType::Relay { name }) +} + +#[cfg(target_family = "wasm")] +pub(super) async fn connect_ws_relay( + url: url::Url, + name: Option, +) -> Result { + log::debug!("Connecting to relay {}", url); + let (_meta, transit) = ws_stream_wasm::WsMeta::connect(&url, None) + .err_into::() + .await?; + log::debug!("Connected to {}!", url); + + let transit = Box::new(transit.into_io()) as Box; + + Ok(( + transit, + TransitInfo { + conn_type: ConnectionType::Relay { name }, + _unused: (), + }, + )) +} + +/* Take a tcp connection and transform it into a `TransitConnection` (mainly set timeouts) */ +#[cfg(not(target_family = "wasm"))] +pub(super) fn wrap_tcp_connection( + socket: TcpStream, + conn_type: ConnectionType, +) -> Result { + /* Set proper read and write timeouts. This will temporarily set the socket into blocking mode :/ */ + // https://github.com/async-rs/async-std/issues/499 + let socket = std::net::TcpStream::try_from(socket) + .expect("Internal error: this should not fail because we never cloned the socket"); + socket.set_write_timeout(Some(std::time::Duration::from_secs(120)))?; + socket.set_read_timeout(Some(std::time::Duration::from_secs(120)))?; + let socket: TcpStream = socket.into(); + + let info = TransitInfo { + conn_type, + peer_addr: socket + .peer_addr() + .expect("Internal error: socket must be IP"), + _unused: (), + }; + + Ok((Box::new(socket), info)) +} diff --git a/src/util.rs b/src/util.rs index 84438a27..6a172af2 100644 --- a/src/util.rs +++ b/src/util.rs @@ -1,5 +1,3 @@ -use futures::Future; - macro_rules! ensure { ($cond:expr, $err:expr $(,)?) => { if !$cond { @@ -79,6 +77,7 @@ impl std::fmt::Display for DisplayBytes<'_> { * TODO remove after https://github.com/quininer/memsec/issues/11 is resolved. * Original implementation: https://github.com/jedisct1/libsodium/blob/6d566070b48efd2fa099bbe9822914455150aba9/src/libsodium/sodium/utils.c#L262-L307 */ +#[allow(unused)] pub fn sodium_increment_le(n: &mut [u8]) { let mut c = 1u16; for b in n { @@ -171,41 +170,34 @@ pub fn hashcash(resource: String, bits: u32) -> String { } } } - -/// A weird mixture of [`futures::future::Abortable`], [`async_std::sync::Condvar`] and [`futures::future::Select`] tailored to our Ctrl+C handling. -/// -/// At it's core, it is an `Abortable` but instead of having an `AbortHandle`, we use a future that resolves as trigger. -/// Under the hood, it is implementing the same functionality as a `select`, but mapping one of the outcomes to an error type. -pub async fn cancellable( - future: impl Future + Unpin, - cancel: impl Future, -) -> Result { - use futures::future::Either; - futures::pin_mut!(cancel); - match futures::future::select(cancel, future).await { - Either::Left(((), _)) => Err(Cancelled), - Either::Right((val, _)) => Ok(val), - } +#[cfg(not(target_family = "wasm"))] +pub async fn sleep(duration: std::time::Duration) { + async_std::task::sleep(duration).await } -/** Like `cancellable`, but you'll get back the cancellation future in case the code terminates for future use */ -pub async fn cancellable_2 + Unpin>( - future: impl Future + Unpin, - cancel: C, -) -> Result<(T, C), Cancelled> { - use futures::future::Either; - match futures::future::select(cancel, future).await { - Either::Left(((), _)) => Err(Cancelled), - Either::Right((val, cancel)) => Ok((val, cancel)), - } +#[cfg(target_family = "wasm")] +pub async fn sleep(duration: std::time::Duration) { + /* Skip error handling. Waiting is best effort anyways */ + let _ = wasm_timer::Delay::new(duration).await; } -/// Indicator that the [`Cancellable`] task was cancelled. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub struct Cancelled; +#[cfg(not(target_family = "wasm"))] +pub async fn timeout( + duration: std::time::Duration, + future: F, +) -> Result +where + F: futures::Future, +{ + async_std::future::timeout(duration, future).await +} -impl std::fmt::Display for Cancelled { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Task has been cancelled") - } +#[cfg(target_family = "wasm")] +pub async fn timeout(duration: std::time::Duration, future: F) -> Result +where + F: futures::Future, +{ + use futures::FutureExt; + use wasm_timer::TryFutureExt; + future.map(Result::Ok).timeout(duration).await }