diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 2323d3dd9..13d1a9c2d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -25,10 +25,6 @@ jobs: run: cargo build --features test-vendored-openssl - name: Run tests run: cargo test --features test-vendored-openssl --verbose - - name: Run integration tests - run: | - cd integration - sh ./run.sh build-windows: strategy: @@ -65,4 +61,3 @@ jobs: verify-code-formatting: uses: ./.github/workflows/ci_format_code.yml - diff --git a/.gitignore b/.gitignore index 9cb61f483..aa0da6a6b 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,7 @@ pki/ log/ /curl-ca-bundle.crt /samples/server.test.conf -/integration/pki-client -/integration/pki-server +**/pki-client +**/pki-server 3rd-party/open62541/build/ lib/pki* \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 83c80f758..b45d6584f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,211 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "actix-codec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" -dependencies = [ - "bitflags", - "bytes", - "futures-core", - "futures-sink", - "memchr", - "pin-project-lite", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "actix-files" -version = "0.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0773d59061dedb49a8aed04c67291b9d8cf2fe0b60130a381aab53c6dd86e9be" -dependencies = [ - "actix-http", - "actix-service", - "actix-utils", - "actix-web", - "bitflags", - "bytes", - "derive_more", - "futures-core", - "http-range", - "log 0.4.22", - "mime", - "mime_guess", - "percent-encoding 2.3.1", - "pin-project-lite", - "v_htmlescape", -] - -[[package]] -name = "actix-http" -version = "3.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae682f693a9cd7b058f2b0b5d9a6d7728a8555779bedbbc35dd88528611d020" -dependencies = [ - "actix-codec", - "actix-rt", - "actix-service", - "actix-utils", - "ahash", - "base64 0.22.1", - "bitflags", - "brotli", - "bytes", - "bytestring", - "derive_more", - "encoding_rs", - "flate2", - "futures-core", - "h2", - "http", - "httparse", - "httpdate", - "itoa", - "language-tags", - "local-channel", - "mime", - "percent-encoding 2.3.1", - "pin-project-lite", - "rand 0.8.5", - "sha1", - "smallvec", - "tokio", - "tokio-util", - "tracing", - "zstd", -] - -[[package]] -name = "actix-macros" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" -dependencies = [ - "quote", - "syn 2.0.71", -] - -[[package]] -name = "actix-router" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d324164c51f63867b57e73ba5936ea151b8a41a1d23d1031eeb9f70d0236f8" -dependencies = [ - "bytestring", - "cfg-if", - "http", - "regex", - "regex-lite", - "serde", - "tracing", -] - -[[package]] -name = "actix-rt" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eda4e2a6e042aa4e55ac438a2ae052d3b5da0ecf83d7411e1a368946925208" -dependencies = [ - "futures-core", - "tokio", -] - -[[package]] -name = "actix-server" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b02303ce8d4e8be5b855af6cf3c3a08f3eff26880faad82bab679c22d3650cb5" -dependencies = [ - "actix-rt", - "actix-service", - "actix-utils", - "futures-core", - "futures-util", - "mio", - "socket2", - "tokio", - "tracing", -] - -[[package]] -name = "actix-service" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b894941f818cfdc7ccc4b9e60fa7e53b5042a2e8567270f9147d5591893373a" -dependencies = [ - "futures-core", - "paste", - "pin-project-lite", -] - -[[package]] -name = "actix-utils" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8" -dependencies = [ - "local-waker", - "pin-project-lite", -] - -[[package]] -name = "actix-web" -version = "4.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1988c02af8d2b718c05bc4aeb6a66395b7cdf32858c2c71131e5637a8c05a9ff" -dependencies = [ - "actix-codec", - "actix-http", - "actix-macros", - "actix-router", - "actix-rt", - "actix-server", - "actix-service", - "actix-utils", - "actix-web-codegen", - "ahash", - "bytes", - "bytestring", - "cfg-if", - "cookie", - "derive_more", - "encoding_rs", - "futures-core", - "futures-util", - "itoa", - "language-tags", - "log 0.4.22", - "mime", - "once_cell", - "pin-project-lite", - "regex", - "regex-lite", - "serde", - "serde_json", - "serde_urlencoded", - "smallvec", - "socket2", - "time", - "url 2.5.2", -] - -[[package]] -name = "actix-web-codegen" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f591380e2e68490b5dfaf1dd1aa0ebe78d84ba7067078512b4ea6e4492d622b8" -dependencies = [ - "actix-router", - "proc-macro2", - "quote", - "syn 2.0.71", -] - [[package]] name = "addr2line" version = "0.22.0" @@ -229,7 +24,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.15", "once_cell", "version_check", "zerocopy", @@ -245,19 +39,10 @@ dependencies = [ ] [[package]] -name = "alloc-no-stdlib" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.2" +name = "allocator-api2" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" -dependencies = [ - "alloc-no-stdlib", -] +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "android-tzdata" @@ -295,6 +80,28 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "async-server" +version = "0.1.0" +dependencies = [ + "chrono", + "ctrlc", + "log 0.4.22", + "opcua", + "tokio", +] + +[[package]] +name = "async-trait" +version = "0.1.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + [[package]] name = "autocfg" version = "1.3.0" @@ -322,48 +129,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" -[[package]] -name = "base64" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" - [[package]] name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "brotli" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "4.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] - [[package]] name = "bumpalo" version = "3.16.0" @@ -378,28 +149,15 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" - -[[package]] -name = "bytestring" -version = "1.3.1" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d80203ea6b29df88012294f62733de21cfeab47f17b41af3a38bc30a03ee72" -dependencies = [ - "bytes", -] +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cc" -version = "1.1.6" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aba8f4e9906c7ce3c73463f62a7f0c65183ada1a2d47e397cc8810827f9694f" -dependencies = [ - "jobserver", - "libc", -] +checksum = "e9e8aabfac534be767c909e0690571677d49f41bd8465ae876fe043d52ba5292" [[package]] name = "cfg-if" @@ -407,6 +165,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.38" @@ -422,23 +186,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - -[[package]] -name = "cookie" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" -dependencies = [ - "percent-encoding 2.3.1", - "time", - "version_check", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -451,45 +198,18 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] -name = "cpufeatures" -version = "0.2.12" +name = "ctrlc" +version = "3.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" dependencies = [ - "libc", -] - -[[package]] -name = "crc32fast" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "deranged" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" -dependencies = [ - "powerfmt", + "nix", + "windows-sys 0.59.0", ] [[package]] @@ -511,20 +231,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", -] - -[[package]] -name = "derive_more" -version = "0.99.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" -dependencies = [ - "convert_case", - "proc-macro2", - "quote", - "rustc_version", - "syn 2.0.71", + "syn 2.0.74", ] [[package]] @@ -533,25 +240,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c877555693c14d2f84191cfd3ad8582790fc52b5e2274b40b59cf5f5cea25c7" -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer", - "crypto-common", -] - -[[package]] -name = "encoding_rs" -version = "0.8.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" -dependencies = [ - "cfg-if", -] - [[package]] name = "env_logger" version = "0.10.2" @@ -571,16 +259,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" -[[package]] -name = "flate2" -version = "1.0.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - [[package]] name = "flume" version = "0.11.0" @@ -613,15 +291,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" -[[package]] -name = "form_urlencoded" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" -dependencies = [ - "percent-encoding 2.3.1", -] - [[package]] name = "fuchsia-cprng" version = "0.1.1" @@ -684,7 +353,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.74", ] [[package]] @@ -717,16 +386,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - [[package]] name = "gethostname" version = "0.4.3" @@ -765,30 +424,15 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" -[[package]] -name = "h2" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - [[package]] name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "allocator-api2", +] [[package]] name = "hermit-abi" @@ -796,35 +440,6 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-range" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21dec9db110f5f872ed9699c3ecf50cf16f423502706ba5c72462e28d3157573" - -[[package]] -name = "httparse" -version = "1.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - [[package]] name = "humantime" version = "2.1.0" @@ -865,21 +480,11 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "idna" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "indexmap" -version = "2.2.6" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" dependencies = [ "equivalent", "hashbrown", @@ -902,30 +507,15 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" -[[package]] -name = "jobserver" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" -dependencies = [ - "libc", -] - [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] -[[package]] -name = "language-tags" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" - [[package]] name = "lazy_static" version = "1.5.0" @@ -938,23 +528,6 @@ version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" -[[package]] -name = "local-channel" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6cbc85e69b8df4b8bb8b89ec634e7189099cea8927a276b7384ce5488e53ec8" -dependencies = [ - "futures-core", - "futures-sink", - "local-waker", -] - -[[package]] -name = "local-waker" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487" - [[package]] name = "lock_api" version = "0.4.12" @@ -1029,22 +602,6 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "mime_guess" -version = "2.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "miniz_oxide" version = "0.7.4" @@ -1056,21 +613,27 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ + "hermit-abi", "libc", - "log 0.4.22", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] -name = "num-conv" -version = "0.1.0" +name = "nix" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags", + "cfg-if", + "cfg_aliases", + "libc", +] [[package]] name = "num-traits" @@ -1081,21 +644,11 @@ dependencies = [ "autocfg", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - [[package]] name = "object" -version = "0.36.1" +version = "0.36.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" +checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" dependencies = [ "memchr", ] @@ -1110,11 +663,10 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" name = "opcua" version = "0.13.0" dependencies = [ - "actix-files", - "actix-web", "arbitrary", "arc-swap", - "base64 0.21.7", + "async-trait", + "base64", "bitflags", "byteorder", "bytes", @@ -1124,6 +676,7 @@ dependencies = [ "foreign-types", "futures", "gethostname", + "hashbrown", "lazy_static", "libc", "log 0.4.22", @@ -1140,7 +693,7 @@ dependencies = [ "tempdir", "tokio", "tokio-util", - "url 1.7.2", + "url", "uuid", ] @@ -1157,6 +710,7 @@ name = "opcua-chess-server" version = "0.13.0" dependencies = [ "opcua", + "tokio", "uci", ] @@ -1172,6 +726,7 @@ dependencies = [ "pico-args", "rand 0.7.3", "tokio", + "tokio-util", ] [[package]] @@ -1192,16 +747,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "opcua-integration" -version = "0.13.0" -dependencies = [ - "chrono", - "log 0.4.22", - "opcua", - "tokio", -] - [[package]] name = "opcua-mqtt-client" version = "0.13.0" @@ -1228,13 +773,14 @@ dependencies = [ "chrono", "log 0.4.22", "opcua", + "tokio", ] [[package]] name = "openssl" -version = "0.10.65" +version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2823eb4c6453ed64055057ea8bd416eda38c71018723869dd043a3b1186115e" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ "bitflags", "cfg-if", @@ -1253,7 +799,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.74", ] [[package]] @@ -1316,24 +862,12 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - [[package]] name = "percent-encoding" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" -[[package]] -name = "percent-encoding" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" - [[package]] name = "pico-args" version = "0.5.0" @@ -1358,17 +892,14 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" -[[package]] -name = "powerfmt" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" - [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "proc-macro2" @@ -1507,9 +1038,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", @@ -1528,12 +1059,6 @@ dependencies = [ "regex-syntax", ] -[[package]] -name = "regex-lite" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" - [[package]] name = "regex-syntax" version = "0.8.4" @@ -1588,15 +1113,6 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - [[package]] name = "rustls" version = "0.21.12" @@ -1627,7 +1143,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.7", + "base64", ] [[package]] @@ -1694,17 +1210,11 @@ dependencies = [ "libc", ] -[[package]] -name = "semver" -version = "1.0.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" - [[package]] name = "serde" -version = "1.0.204" +version = "1.0.207" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "5665e14a49a4ea1b91029ba7d3bca9f299e1f7cfa194388ccc20f14743e784f2" dependencies = [ "serde_derive", ] @@ -1721,34 +1231,23 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.207" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "6aea2634c86b0e8ef2cfdc0c340baede54ec27b1e46febd7f80dffb2aa44a00e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.74", ] [[package]] name = "serde_json" -version = "1.0.120" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" dependencies = [ - "form_urlencoded", "itoa", + "memchr", "ryu", "serde", ] @@ -1766,17 +1265,6 @@ dependencies = [ "unsafe-libyaml", ] -[[package]] -name = "sha1" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -1833,9 +1321,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.71" +version = "2.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b146dcf730474b4bcd16c311627b31ede9ab149045db4d6088b3becaea046462" +checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" dependencies = [ "proc-macro2", "quote", @@ -1878,7 +1366,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.74", ] [[package]] @@ -1891,37 +1379,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "time" -version = "0.3.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" -dependencies = [ - "deranged", - "itoa", - "num-conv", - "powerfmt", - "serde", - "time-core", - "time-macros", -] - -[[package]] -name = "time-core" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" - -[[package]] -name = "time-macros" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" -dependencies = [ - "num-conv", - "time-core", -] - [[package]] name = "tinyvec" version = "1.8.0" @@ -1939,32 +1396,31 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.1" +version = "1.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb2caba9f80616f438e09748d5acda951967e1ea58508ef53d9c6402485a46df" +checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.74", ] [[package]] @@ -1990,26 +1446,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tracing" -version = "0.1.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" -dependencies = [ - "log 0.4.22", - "pin-project-lite", - "tracing-core", -] - -[[package]] -name = "tracing-core" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" -dependencies = [ - "once_cell", -] - [[package]] name = "typemap-ors" version = "1.0.0" @@ -2019,12 +1455,6 @@ dependencies = [ "unsafe-any-ors", ] -[[package]] -name = "typenum" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" - [[package]] name = "uci" version = "0.1.4" @@ -2034,15 +1464,6 @@ dependencies = [ "log 0.3.9", ] -[[package]] -name = "unicase" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.15" @@ -2091,20 +1512,9 @@ version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" dependencies = [ - "idna 0.1.5", + "idna", "matches", - "percent-encoding 1.0.1", -] - -[[package]] -name = "url" -version = "2.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" -dependencies = [ - "form_urlencoded", - "idna 0.5.0", - "percent-encoding 2.3.1", + "percent-encoding", ] [[package]] @@ -2116,12 +1526,6 @@ dependencies = [ "getrandom 0.2.15", ] -[[package]] -name = "v_htmlescape" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e8257fbc510f0a46eb602c10215901938b5c2a7d5e70fc11483b1d3c9b5b18c" - [[package]] name = "vcpkg" version = "0.2.15" @@ -2130,9 +1534,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "wasi" @@ -2148,34 +1552,35 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log 0.4.22", "once_cell", "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.74", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2183,22 +1588,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", + "syn 2.0.74", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "winapi" @@ -2218,11 +1623,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2242,18 +1647,18 @@ dependencies = [ [[package]] name = "windows-sys" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] name = "windows-sys" -version = "0.52.0" +version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets 0.52.6", ] @@ -2385,6 +1790,7 @@ version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] @@ -2396,33 +1802,5 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.71", -] - -[[package]] -name = "zstd" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "7.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa556e971e7b568dc775c136fc9de8c779b1c2fc3a63defaafadffdbd3181afa" -dependencies = [ - "zstd-sys", -] - -[[package]] -name = "zstd-sys" -version = "2.0.12+zstd.1.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4e40c320c3cb459d9a9ff6de98cff88f4751ee9275d140e2be94a2b74e4c13" -dependencies = [ - "cc", - "pkg-config", + "syn 2.0.74", ] diff --git a/Cargo.toml b/Cargo.toml index dc532e807..e549928c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,6 @@ panic = 'abort' resolver = "2" members = [ "lib", - "integration", "samples/demo-server", "samples/simple-client", "samples/mqtt-client", @@ -18,4 +17,4 @@ members = [ "samples/discovery-client", "samples/event-client", "tools/certificate-creator" -] +, "samples/async-server"] diff --git a/TODO.md b/TODO.md new file mode 100644 index 000000000..c87ea67ce --- /dev/null +++ b/TODO.md @@ -0,0 +1,33 @@ +# TODO + +This is a pending rewrite of the OPC-UA stack, following up on the rewrite of the client to be async all the way through. + +The following is a list of tasks, with progress indicated where relevant. + + - Rewrite the server to be async, and a great deal more flexible, making it possible to create _really_ advanced servers using this SDK. + - **~100%** done with the initial scope, barring any bugs or details that need fixing. + - Some features are left out: + - Diagnostics, both as diagnosticsInfo from services, and general session diagnostics. There's a skeleton for this in the DiagnosticsNodeManager. + - Events are _implemented_ but incredibly cumbersome to write, so there is nothing fancy implemented for them. See below. + - Audit events are taken out due to the above. + - The web server is removed. Likely forever, a better solution is to use the `metrics` library to hook into the rust metrics ecosystem. + - A smattering of TODO's, most are somehow blocked by other tasks. + - Merge most recent PRs on the main repo, especially the one migrating away from OpenSSL. + - Split the library into parts again. + - Initially into types, client, core, and server. + - This is needed for other features. + - Write a codegen/macro library. Initially this should just replace all the JS codegen, later on it will do _more_. + - It would be best if this could be written in such a way that it can either be used as a core for a macro library, or as a standalone build.rs codegen module. + - Implement sophisticated event support, using a macro to create event types. + - Investigate decoding. There are several things that would be interesting to do here. + - Capture request-id/request-handle for error reporting during decoding. This will allow us to fatally fail much less often, but will require major changes to codegen. + - See if there is a way to avoid needing to pass the ID when decoding ExtensionObjects. This info should be available, either in the object itself or as part of the type being decoded. + - Flesh out the server and client SDK with tooling for ease if use. + - I had an idea of a "request builder" framework for the client SDK, which might be really useful. + - The server should be possible to set up in such a way that it is no harder to use than before. A specialized node manager would be ideal for this. + - There are probably lots of neat logic we can add as utility methods that make it easier to implement node managers. + - Go through the standard and implement _more_ of the core stuff. Diagnostics, server management methods, etc. + - Implement a better framework for security checks. (?) + - Write a sophisticated server example with a persistent store. This would be a great way to verify the flexibility of the server. + - Write some "bad ideas" servers, it would be nice to showcase how flexible this is. + - Look into using non-send locks, to eliminate a source of deadlocks. diff --git a/docs/compatibility.md b/docs/compatibility.md index e184ca58e..ad97e6c39 100644 --- a/docs/compatibility.md +++ b/docs/compatibility.md @@ -42,8 +42,8 @@ The following services are supported in the server: * DeleteReferences * Query service set - * QueryFirst - stub that returns BadNotSupported - * QueryNext - stub that returns BadNotSupported + * QueryFirst - not implemented in any node manager, but the framework exists. + * QueryNext - not implemented in any node manager, but the framework exists. * View service set * Browse @@ -63,7 +63,7 @@ The following services are supported in the server: * CreateSubscription * ModifySubscription * DeleteSubscriptions - * TransferSubscriptions - stub implementation fails on any request + * TransferSubscriptions - Poorly tested * Publish * Republish * SetPublishingMode @@ -73,23 +73,20 @@ The following services are supported in the server: ### Address Space / Nodeset -The standard OPC UA address space is exposed. OPC UA for Rust uses a script to generate code to create and populate the standard address space. This functionality is controlled by a server build feature -`generated-address-space` that defaults to on but can be disabled if the full address space is not required. When disabled, the address space will be empty apart from some root objects. +The standard OPC UA address space is exposed through the `CoreNodeManager` implementation. OPC UA for Rust uses a script to generate code to create and populate the standard address space. This functionality is controlled by a server build feature `generated-address-space` that defaults to on but can be disabled if the full address space is not required. When disabled, the address space will be empty apart from some root objects. ### Current limitations Currently the following are not supported * Diagnostic info. OPC UA allows for you to ask for diagnostics with any request. None is supplied at this time -* Session resumption. If your client disconnects, all information is discarded. * Default node set is mostly static. Certain fields of server information will contain their default values unless explicitly set. -* Access control is limited to setting read/write permissions on nodes that apply to all sessions. * Multiple created sessions in a single transport. + * This should now technically be supported, but without any client that supports this it is not tested at all. ## Client -The client API API is synchronous - i.e. you call a function that makes a request and it returns -when the response is received or a timeout occurs. Under the surface it is asynchronous so that functionality may be exposed at some point. +The client API is asynchronous, but require you to "drive" the connection by polling an event loop. Convenience methods are provided for polling the event loop on a background thread. The client exposes functions that correspond to the current server supported profile, i.e. look above at the server services and there will be client-side functions that are analogous to those services. @@ -98,8 +95,7 @@ In addition to the server services above, the following are also supported. * FindServers - when connected to a discovery server, to find other servers * RegisterServer - when connected to a discovery server, to register a server -Potentially the client could have functions to call other services so it could be used to call other -OPC UA implementation. +The client is only automatically tested against the server implementation, so primarily only services supported by the current server implementation are supported. ## Configuration diff --git a/docs/design.md b/docs/design.md index ba96050c3..bde1eaa05 100644 --- a/docs/design.md +++ b/docs/design.md @@ -49,7 +49,7 @@ Here is a minimal, functioning server. ```rust extern crate opcua; -use opcua::server::prelude::*; +use opcua::types::*; fn main() { let server: Server = ServerBuilder::new_sample().server().unwrap(); diff --git a/docs/migration.md b/docs/migration.md index b8fa071fd..fd5171359 100644 --- a/docs/migration.md +++ b/docs/migration.md @@ -12,7 +12,7 @@ Any version breaking changes are described below. use opcua::sync::*; ``` -They haven't been added to the `opcua::client::prelude` or `opcua::server::prelude` in case +They haven't been added to the `opcua::client::prelude` or `opcua::types` in case your code uses `std::sync` types for other reasons that you need to resolve manually. ## Migrating from 0.9 and below diff --git a/docs/server.md b/docs/server.md index 9e4420b59..41ca8def4 100644 --- a/docs/server.md +++ b/docs/server.md @@ -36,14 +36,6 @@ To use the server crate we need to add a dependency to the `Cargo.toml`. opcua = { "0.12", features = ["server"] } ``` -## Import types - -Most of the things you need for the server are exposed with a single import that you can add to the top of your `main.rs`. - -```rust -use opcua::server::prelude::*; -``` - ## Create your server ### Configure the server @@ -58,31 +50,65 @@ The server can be configured in a number of ways: A `ServerBuilder` allows you to programmatically construct a `Server`. ```rust -fn main() { - let server = ServerBuilder::new() +use std::sync::Arc; + +use opcua::server::address_space::Variable; +use opcua::server::node_manager::memory::{ + InMemoryNodeManager, NamespaceMetadata, SimpleNodeManager, SimpleNodeManagerImpl, +}; + +#[tokio::main] +async fn main() { + // First, create a simple node manager to contain any custom nodes we make. + // The namespace should be 2 here, since there are two default namespaces, making + // this the third. + let ns = 2; + let node_manager = Arc::new(SimpleNodeManager::new_simple( + NamespaceMetadata { + namespace_index: ns, + namespace_uri: "urn:my_server".to_owned(), + ..Default::default() + }, + "simple", + )); + + let (server, handle) = ServerBuilder::new() .application_name("Server Name") .application_uri("urn:server_uri") .discovery_urls(vec![endpoint_url(port_offset)]) .create_sample_keypair(true) .pki_dir("./pki-server") .discovery_server_url(None) - .host_and_port(hostname(), 1234) - .user_token(sample_user_id, ServerUserToken::new_user_pass("sample", "sample1")) - .endpoints( - [ - ("none", endpoint_path, SecurityPolicy::None, MessageSecurityMode::None, &user_token_ids), - ("basic128rsa15_sign", endpoint_path, SecurityPolicy::Basic128Rsa15, MessageSecurityMode::Sign, &user_token_ids), - ("basic128rsa15_sign_encrypt", endpoint_path, SecurityPolicy::Basic128Rsa15, MessageSecurityMode::SignAndEncrypt, &user_token_ids), - ("basic256_sign", endpoint_path, SecurityPolicy::Basic256, MessageSecurityMode::Sign, &user_token_ids), - ("basic256_sign_encrypt", endpoint_path, SecurityPolicy::Basic256, MessageSecurityMode::SignAndEncrypt, &user_token_ids), - ("basic256sha256_sign", endpoint_path, SecurityPolicy::Basic256Sha256, MessageSecurityMode::Sign, &user_token_ids), - ("basic256sha256_sign_encrypt", endpoint_path, SecurityPolicy::Basic256Sha256, MessageSecurityMode::SignAndEncrypt, &user_token_ids), - ].iter().map(|v| { - (v.0.to_string(), ServerEndpoint::from((v.1, v.2, v.3, &v.4[..]))) - }).collect()) - .server().unwrap(); - - //... + .host(hostname()) + .port(1234) + .add_user_token( + sample_user_id, + ServerUserToken::new_user_pass("sample", "sample1"), + ) + .add_endpoint( + "none", + ( + endpoint_path, + SecurityPolicy::None, + MessageSecurityMode::None, + &user_token_ids as &[&str], + ), + ) + .add_endpoint( + "basic128rsa15_sign", + ( + endpoint_path, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::Sign, + &user_token_ids as &[&str], + ), + ) + .build().unwrap(); + + // Add initial nodes here... + + // Run the server. + server.run().await.unwrap(); } ``` @@ -92,7 +118,23 @@ If you prefer to construct your server from a configuration that you read from a ```rust fn main() { - let mut server = Server::new(ServerConfig::load(&PathBuf::from("../server.conf")).unwrap()); + // The namespace should be 2 here, since there are two default namespaces, making + // this the third. + let ns = 2; + let node_manager = Arc::new(SimpleNodeManager::new_simple( + NamespaceMetadata { + namespace_index: ns, + namespace_uri: "urn:SimpleServer".to_owned(), + ..Default::default() + }, + "simple", + )); + + let (server, handle) = ServerBuilder::new() + .with_config_from("../server.conf") + .with_node_manager(node_manager.clone()) + .build() + .unwrap(); //... } ``` @@ -100,9 +142,10 @@ fn main() { Alternatively, let's say you use a configuration file, but how do you create it when one isn't there? Well your code logic could test if the file can load, and if it doesn't, could create the default one with a `ServerBuilder`. ```rust -fn main() { +#[tokio::main] +async fn main() { let server_config_path = "./myserver.conf"; - let server_config = if let Ok(server_config) = ServerConfig::load(&PathBuf::from(server_config_path))) { + let server_config = if let Ok(server_config) = ServerConfig::load(&PathBuf::from(server_config_path)) { server_config } else { @@ -145,22 +188,25 @@ To this you may wish to add your own objects and variables. To make this easy, y create new nodes with a builder, e.g: ```rust -fn main() { +#[tokio::main] +async fn main() { //... after server is set up - let address_space = server.address_space().write().unwrap(); + let address_space = node_manager.address_space(); + let address_space = address_space.write().unwrap(); // This is a convenience helper - let folder_id = address_space - .add_folder("Variables", "Variables", &NodeId::objects_folder_id()) - .unwrap(); + let folder_id = NodeId::new(2, "Variables"); + address_space.add_folder(&folder_id, "Variables", "Variables", &NodeId::objects_folder_id()); // Build a variable - let node_id = NodeId::new(2,, "MyVar"); + let node_id = NodeId::new(2, "MyVar"); VariableBuilder::new(&node_id, "MyVar", "MyVar") .organized_by(&folder_id) .value(0u8) .insert(&mut address_space); + // Make sure to not keep the address space locked, or nothing will be able to + // read from the server. //.... } ``` @@ -186,7 +232,9 @@ For some values you may prefer to set them once when they change. How you do thi let now = DateTime::now(); let value = 123.456f; let node_id = NodeId::new(2, "myvalue"); - let _ = address_space.set_variable_value(node_id, value, &now, &now); + // You can set the value directly on the address space, but prefer calling this method instead, + // which will notify any listening clients. + node_manager.set_value(&handle.subscriptions(), &node_id, None, DataValue::new_at(value, now)); ``` In this example `now` is the current timestamp for when the value changed and the value is 123.456. @@ -199,43 +247,35 @@ This example will `123.456f`. ```rust let node_id = NodeId::new(2, "myvalue"); - if let Some(ref mut v) = address_space.find_variable_mut(node_id.clone()) { - let getter = AttrFnGetter::new( - move |_, _, _, _, _, _| -> Result, StatusCode> { - Ok(Some(DataValue::new_now(123.456f))) - }, - ); - v.set_value_getter(Arc::new(Mutex::new(getter))); - } + node_manager.inner().add_read_callback(node_id, |_, _, _| { + Ok(DataValue::new_now(123.456f)) + }) ``` The difference with the dynamic getter is there are parameters that allow your code to conditionally decide how they return a value. The parameters to the getter are: -* `&NodeId` -* `TimestampsToReturn` -* `AttributeId` * `NumericRange` -* `&QualifiedName` +* `TimestampsToReturn` +* `f64` - the max age parameter. This allows a getter to be broad or specific. In the example, the getter is so specific it does not require any of the parameters. ### Run the server -Running a server is a synchronous action: +Running a server is asynchronous. ```rust -fn main() { +#[tokio::main] +async fn main() { //... After server and address space are created - // Run the server. This does not ordinarily exit so you must Ctrl+C to terminate - server.run(); + // Run the server. This can be terminated gracefully by calling `handle.cancel()`. + server.run().await.unwrap(); } ``` -If you prefer to make it asynchronous, run it on a separate thread, or use `Server::run_server`. - ## Logging OPC UA for Rust provides an extensive amount of logging at error, warn, info, debug and trace levels. All this is via the standard [log](https://docs.rs/log/0.4.8/log/) facade so choose which logging implementation you want to capture information. See the link for implementations that you can use. diff --git a/integration/Cargo.toml b/integration/Cargo.toml deleted file mode 100644 index aa141455b..000000000 --- a/integration/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "opcua-integration" -version = "0.13.0" # OPCUARustVersion -authors = ["Adam Lock "] -edition = "2021" - -[dev-dependencies] -log = "0.4" -chrono = "0.4" -tokio = { version = "1", features = ["full"] } - -[dev-dependencies.opcua] -path = "../lib" -version = "0.13.0" # OPCUARustVersion -features = ["all"] diff --git a/integration/README.md b/integration/README.md deleted file mode 100644 index 5e84ea800..000000000 --- a/integration/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Integration tests are to test scenarios between the client and server. - -Since tests create and listen on ports, use pki folders, they must be run one at a time, like this: - -``` -cargo test --features integration -- --test-threads=1 -``` - -Or use the `run.sh` or `run-sanity.sh` script. - -The X509 token required for some tests is in `x509/` and generated like so: - -``` -openssl req -x509 -nodes -newkey rsa:4096 -keyout user_private_key.pem -outform der -out user_public_cert.der -days 10000 -``` \ No newline at end of file diff --git a/integration/run-sanity.sh b/integration/run-sanity.sh deleted file mode 100755 index 8061dd0ea..000000000 --- a/integration/run-sanity.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -export RUST_OPCUA_LOG=debug -cd $(git rev-parse --show-toplevel) -cargo test --features test-vendored-openssl -- --exact --test-threads=1 --ignored tests::connect_none diff --git a/integration/run.sh b/integration/run.sh deleted file mode 100755 index cabe6fc4a..000000000 --- a/integration/run.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -export RUST_OPCUA_LOG=debug -cd $(git rev-parse --show-toplevel) -cargo test --features test-vendored-openssl -- --test-threads=1 --ignored --exact $1 $2 $3 $4 diff --git a/integration/src/harness.rs b/integration/src/harness.rs deleted file mode 100644 index dd4bf1f6b..000000000 --- a/integration/src/harness.rs +++ /dev/null @@ -1,709 +0,0 @@ -use std::future::Future; -use std::time::{Duration, Instant}; -use std::{ - path::PathBuf, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, -}; -use tokio::select; -use tokio::sync::mpsc; -use tokio::sync::mpsc::unbounded_channel; - -use log::*; - -use opcua::client::{Client, ClientBuilder, IdentityToken}; -use opcua::{ - runtime_components, - server::{ - builder::ServerBuilder, callbacks, config::ServerEndpoint, prelude::*, - session::SessionManager, - }, - sync::*, -}; - -use crate::*; - -const TEST_TIMEOUT: u64 = 30000; - -pub fn functions_object_id() -> NodeId { - NodeId::new(2, "Functions") -} - -pub fn hellox_method_id() -> NodeId { - NodeId::new(2, "HelloX") -} - -static NEXT_PORT_OFFSET: AtomicUsize = AtomicUsize::new(0); - -pub fn next_port() -> u16 { - port_from_offset(next_port_offset()) -} - -fn next_port_offset() -> u16 { - // hand out an incrementing port so tests can be run in parallel without interfering with each other - NEXT_PORT_OFFSET.fetch_add(1, Ordering::SeqCst) as u16 -} - -pub fn hostname() -> String { - // To avoid certificate trouble, use the computer's own name for the endpoint - let mut names = opcua::crypto::X509Data::computer_hostnames(); - if names.is_empty() { - "localhost".to_string() - } else { - names.remove(0) - } -} - -fn port_from_offset(port_offset: u16) -> u16 { - 4855u16 + port_offset -} - -pub fn endpoint_url(port: u16, path: &str) -> UAString { - // To avoid certificate trouble, use the computer's own name for tne endpoint - format!("opc.tcp://{}:{}{}", hostname(), port, path).into() -} - -fn v1_node_id() -> NodeId { - NodeId::new(2, "v1") -} - -pub fn stress_node_id(idx: usize) -> NodeId { - NodeId::new(2, format!("v{:04}", idx)) -} - -const USER_X509_CERTIFICATE_PATH: &str = "./x509/user_cert.der"; -const USER_X509_PRIVATE_KEY_PATH: &str = "./x509/user_private_key.pem"; - -pub fn server_user_token() -> ServerUserToken { - ServerUserToken::user_pass("sample1", "sample1pwd") -} - -pub fn server_x509_token() -> ServerUserToken { - ServerUserToken::x509("x509", &PathBuf::from(USER_X509_CERTIFICATE_PATH)) -} - -pub fn client_x509_token() -> IdentityToken { - IdentityToken::X509( - PathBuf::from(USER_X509_CERTIFICATE_PATH), - PathBuf::from(USER_X509_PRIVATE_KEY_PATH), - ) -} - -pub fn client_user_token() -> IdentityToken { - IdentityToken::UserName(CLIENT_USERPASS_ID.into(), "sample1pwd".into()) -} - -pub fn client_invalid_user_token() -> IdentityToken { - IdentityToken::UserName(CLIENT_USERPASS_ID.into(), "xxxx".into()) -} - -pub fn new_server(port: u16) -> Server { - let endpoint_path = "/"; - - // Both client and server define this - let sample_user_id = CLIENT_USERPASS_ID; - let x509_user_id = CLIENT_X509_ID; - - // Create user tokens - anonymous and a sample user - let user_token_ids = vec![ - opcua::server::prelude::ANONYMOUS_USER_TOKEN_ID, - sample_user_id, - x509_user_id, - ]; - - // Create an OPC UA server with sample configuration and default node set - let server = ServerBuilder::new() - .application_name("integration_server") - .application_uri("urn:integration_server") - .discovery_urls(vec![endpoint_url(port, endpoint_path).to_string()]) - .create_sample_keypair(true) - .pki_dir(format!("./pki-server/{}", port)) - .discovery_server_url(None) - .host_and_port(hostname(), port) - .user_token(sample_user_id, server_user_token()) - .user_token(x509_user_id, server_x509_token()) - .endpoints( - [ - ( - "none", - endpoint_path, - SecurityPolicy::None, - MessageSecurityMode::None, - &user_token_ids, - ), - ( - "basic128rsa15_sign", - endpoint_path, - SecurityPolicy::Basic128Rsa15, - MessageSecurityMode::Sign, - &user_token_ids, - ), - ( - "basic128rsa15_sign_encrypt", - endpoint_path, - SecurityPolicy::Basic128Rsa15, - MessageSecurityMode::SignAndEncrypt, - &user_token_ids, - ), - ( - "basic256_sign", - endpoint_path, - SecurityPolicy::Basic256, - MessageSecurityMode::Sign, - &user_token_ids, - ), - ( - "basic256_sign_encrypt", - endpoint_path, - SecurityPolicy::Basic256, - MessageSecurityMode::SignAndEncrypt, - &user_token_ids, - ), - ( - "basic256sha256_sign", - endpoint_path, - SecurityPolicy::Basic256Sha256, - MessageSecurityMode::Sign, - &user_token_ids, - ), - ( - "basic256sha256_sign_encrypt", - endpoint_path, - SecurityPolicy::Basic256Sha256, - MessageSecurityMode::SignAndEncrypt, - &user_token_ids, - ), - ( - "endpoint_aes128sha256rsaoaep_sign", - endpoint_path, - SecurityPolicy::Aes128Sha256RsaOaep, - MessageSecurityMode::Sign, - &user_token_ids, - ), - ( - "endpoint_aes128sha256rsaoaep_sign_encrypt", - endpoint_path, - SecurityPolicy::Aes128Sha256RsaOaep, - MessageSecurityMode::SignAndEncrypt, - &user_token_ids, - ), - ( - "endpoint_aes256sha256rsapss_sign", - endpoint_path, - SecurityPolicy::Aes256Sha256RsaPss, - MessageSecurityMode::Sign, - &user_token_ids, - ), - ( - "endpoint_aes256sha256rsapss_sign_encrypt", - endpoint_path, - SecurityPolicy::Aes256Sha256RsaPss, - MessageSecurityMode::SignAndEncrypt, - &user_token_ids, - ), - ] - .iter() - .map(|v| { - ( - v.0.to_string(), - ServerEndpoint::from((v.1, v.2, v.3, &v.4[..])), - ) - }) - .collect(), - ) - .server() - .unwrap(); - - // Allow untrusted access to the server - { - let certificate_store = server.certificate_store(); - let mut certificate_store = certificate_store.write(); - certificate_store.set_trust_unknown_certs(true); - } - - { - let address_space = server.address_space(); - let mut address_space = address_space.write(); - - // Populate the address space with some variables - let v1_node = v1_node_id(); - - // Create a sample folder under objects folder - let sample_folder_id = address_space - .add_folder("Sample", "Sample", &NodeId::objects_folder_id()) - .unwrap(); - - // Add variables - let _ = address_space.add_variables( - vec![Variable::new(&v1_node, "v1", "v1", 0 as i32)], - &sample_folder_id, - ); - - // Register a getter for the variable - if let Some(ref mut v) = address_space.find_variable_mut(v1_node.clone()) { - let getter = AttrFnGetter::new( - move |_, _, _, _, _, _| -> Result, StatusCode> { - Ok(Some(DataValue::new_now(100))) - }, - ); - v.set_value_getter(Arc::new(Mutex::new(getter))); - } - - // Add a bunch of sequential vars too, similar to demo-server - let node_ids = (0..1000) - .map(|i| stress_node_id(i)) - .collect::>(); - let folder_id = address_space - .add_folder("Stress", "Stress", &NodeId::objects_folder_id()) - .unwrap(); - - node_ids.iter().enumerate().for_each(|(i, node_id)| { - let name = format!("stress node v{:04}", i); - VariableBuilder::new(&node_id, &name, &name) - .data_type(DataTypeId::Int32) - .value(0i32) - .writable() - .organized_by(&folder_id) - .insert(&mut address_space); - }); - - let functions_object_id = functions_object_id(); - ObjectBuilder::new(&functions_object_id, "Functions", "Functions") - .event_notifier(EventNotifier::SUBSCRIBE_TO_EVENTS) - .organized_by(ObjectId::ObjectsFolder) - .insert(&mut address_space); - - MethodBuilder::new(&hellox_method_id(), "HelloX", "HelloX") - .component_of(functions_object_id) - .input_args( - &mut address_space, - &[("YourName", DataTypeId::String).into()], - ) - .output_args(&mut address_space, &[("Result", DataTypeId::String).into()]) - .callback(Box::new(HelloX)) - .insert(&mut address_space); - } - - server -} - -struct HelloX; - -impl callbacks::Method for HelloX { - fn call( - &mut self, - _session_id: &NodeId, - _session_map: Arc>, - request: &CallMethodRequest, - ) -> Result { - debug!("HelloX method called"); - // Validate input to be a string - let mut out1 = Variant::Empty; - let in1_status = if let Some(ref input_arguments) = request.input_arguments { - if let Some(in1) = input_arguments.get(0) { - if let Variant::String(in1) = in1 { - out1 = Variant::from(format!("Hello {}!", &in1)); - StatusCode::Good - } else { - StatusCode::BadTypeMismatch - } - } else if input_arguments.len() == 0 { - return Err(StatusCode::BadArgumentsMissing); - } else { - // Shouldn't get here because there is 1 argument - return Err(StatusCode::BadTooManyArguments); - } - } else { - return Err(StatusCode::BadArgumentsMissing); - }; - - let status_code = if in1_status.is_good() { - StatusCode::Good - } else { - StatusCode::BadInvalidArgument - }; - - Ok(CallMethodResult { - status_code, - input_argument_results: Some(vec![in1_status]), - input_argument_diagnostic_infos: None, - output_arguments: Some(vec![out1]), - }) - } -} - -fn new_client(port: u16, quick_timeout: bool) -> Client { - let builder = ClientBuilder::new() - .application_name("integration_client") - .application_uri("x") - .pki_dir(format!("./pki-client/{port}")) - .create_sample_keypair(true) - .trust_server_certs(true) - .session_retry_initial(Duration::from_millis(200)); - - let builder = if quick_timeout { - builder.session_retry_limit(1) - } else { - builder - }; - builder.client().unwrap() -} - -pub fn new_client_server(port: u16, quick_timeout: bool) -> (Client, Server) { - (new_client(port, quick_timeout), new_server(port)) -} - -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum ClientCommand { - Start, - Quit, -} - -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum ClientResponse { - Starting, - Ready, - Finished(bool), -} - -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum ServerCommand { - Quit, -} - -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum ServerResponse { - Starting, - Ready, - Finished(bool), -} - -pub async fn perform_test( - client: Client, - server: Server, - client_test: Option, - server_test: ST, -) where - CT: FnOnce(mpsc::UnboundedReceiver, Client) -> CFut + Send + 'static, - ST: FnOnce(mpsc::UnboundedReceiver, Server) -> SFut + Send + 'static, - CFut: Future + Send + 'static, - SFut: Future + Send + 'static, -{ - opcua::console_logging::init(); - - // Spawn the CLIENT future - let (client_fut, tx_client_command, mut rx_client_response) = { - println!("Begin test"); - // Create channels for client command and response - let (tx_client_command, mut rx_client_command) = unbounded_channel::(); - let (tx_client_response, rx_client_response) = unbounded_channel::(); - - let client_fut = tokio::task::spawn(async move { - println!("Enter client fut"); - let result = if let Some(client_test) = client_test { - // Wait for start command so we know server is ready - println!("Begin wait for client RX"); - let msg = rx_client_command.recv().await.unwrap(); - - assert_eq!(msg, ClientCommand::Start); - // Client is ready - let _ = tx_client_response.send(ClientResponse::Ready); - - // Client test will run - trace!("Running client test"); - - let _ = tx_client_response.send(ClientResponse::Starting); - - println!("Begin client test"); - client_test(rx_client_command, client).await; - true - } else { - trace!("No client test"); - true - }; - let _ = tx_client_response.send(ClientResponse::Finished(result)); - }); - (client_fut, tx_client_command, rx_client_response) - }; - - // Spawn the SERVER future - let (server_fut, tx_server_command, mut rx_server_response) = { - // Create channels for server command and response - let (tx_server_command, rx_server_command) = unbounded_channel(); - let (tx_server_response, rx_server_response) = unbounded_channel(); - println!("Make server fut"); - let server_fut = tokio::task::spawn(async move { - println!("Begin server"); - // Server future - info!("Server test thread is running"); - let _ = tx_server_response.send(ServerResponse::Starting); - let _ = tx_server_response.send(ServerResponse::Ready); - - println!("Begin server test"); - server_test(rx_server_command, server).await; - - let result = true; - info!( - "Server test has completed, sending ServerResponse::Finished({:?})", - result - ); - let _ = tx_server_response.send(ServerResponse::Finished(result)); - info!("Server thread has finished"); - }); - (server_fut, tx_server_command, rx_server_response) - }; - - let start_time = Instant::now(); - - let timeout = TEST_TIMEOUT; - - let mut client_has_finished = false; - let mut client_success = false; - let mut server_has_finished = false; - let mut server_success = false; - - let end_time = start_time + std::time::Duration::from_millis(timeout); - - // Loop until either the client or the server has quit, or the timeout limit is reached - while !client_has_finished || !server_has_finished { - select! { - _ = tokio::time::sleep_until(end_time.into()) => { - let _ = tx_client_command.send(ClientCommand::Quit); - let _ = tx_server_command.send(ServerCommand::Quit); - - error!("Test timed out after {} ms", timeout); - error!("Running components:\n {}", { - let components = runtime_components!(); - components - .iter() - .cloned() - .collect::>() - .join("\n ") - }); - - server_success = false; - client_success = false; - - break; - } - response = rx_client_response.recv() => { - match response { - Some(ClientResponse::Starting) => { - info!("Client test is starting"); - } - Some(ClientResponse::Ready) => { - info!("Client is ready"); - } - Some(ClientResponse::Finished(success)) => { - info!("Client test finished, result = {:?}", success); - client_success = success; - client_has_finished = true; - if !server_has_finished { - info!("Telling the server to quit"); - let _ = tx_server_command.send(ServerCommand::Quit); - } - } - None => { - } - } - } - response = rx_server_response.recv() => { - match response { - Some(ServerResponse::Starting) => { - info!("Server test is starting"); - } - Some(ServerResponse::Ready) => { - info!("Server test is ready"); - // Tell the client to start - let _ = tx_client_command.send(ClientCommand::Start); - } - Some(ServerResponse::Finished(success)) => { - info!("Server test finished, result = {:?}", success); - server_success = success; - server_has_finished = true; - } - None => { - } - } - } - } - } - - info!("Joining on threads...."); - - // Threads should exit by now - let _ = client_fut.await.unwrap(); - let _ = server_fut.await.unwrap(); - - assert!(client_success); - assert!(server_success); - - info!("test complete") -} - -pub async fn get_endpoints_client_test( - server_url: &str, - _identity_token: IdentityToken, - _rx_client_command: mpsc::UnboundedReceiver, - client: Client, -) { - let endpoints = client - .get_server_endpoints_from_url(server_url) - .await - .unwrap(); - // Value should match number of expected endpoints - assert_eq!(endpoints.len(), 11); -} - -pub async fn regular_client_test( - client_endpoint: impl Into, - identity_token: IdentityToken, - _rx_client_command: mpsc::UnboundedReceiver, - mut client: Client, -) { - // Connect to the server - let client_endpoint = client_endpoint.into(); - info!( - "Client will try to connect to endpoint {:?}", - client_endpoint - ); - let (session, event_loop) = client - .new_session_from_endpoint(client_endpoint, identity_token) - .await - .unwrap(); - - let handle = event_loop.spawn(); - session.wait_for_connection().await; - - // Read the variable - let mut values = { - let read_nodes = vec![ReadValueId::from(v1_node_id())]; - session - .read(&read_nodes, TimestampsToReturn::Both, 1.0) - .await - .unwrap() - }; - assert_eq!(values.len(), 1); - - let value = values.remove(0).value; - assert_eq!(value, Some(Variant::from(100))); - - session.disconnect().await.unwrap(); - handle.await.unwrap(); -} - -pub async fn invalid_token_test( - client_endpoint: impl Into, - identity_token: IdentityToken, - _rx_client_command: mpsc::UnboundedReceiver, - mut client: Client, -) { - // Connect to the server - let client_endpoint = client_endpoint.into(); - info!( - "Client will try to connect to endpoint {:?}", - client_endpoint - ); - let (_, event_loop) = client - .new_session_from_endpoint(client_endpoint, identity_token) - .await - .unwrap(); - let res = event_loop.spawn().await.unwrap(); - assert_eq!(res, StatusCode::BadUserAccessDenied); -} - -pub async fn regular_server_test( - mut rx_server_command: mpsc::UnboundedReceiver, - server: Server, -) { - trace!("Hello from server"); - // Wrap the server - a little juggling is required to give one rc - // to a thread while holding onto one. - let server = Arc::new(RwLock::new(server)); - let server2 = server.clone(); - - let server_fut = Server::new_server_task(server); - tokio::pin!(server_fut); - - // Listen for quit command, if we get one then finish - loop { - select! { - command = rx_server_command.recv() => { - match command { - Some(ServerCommand::Quit) | None => { - // Tell the server to quit - { - info!("1. ------------------------ Server test received quit"); - let mut server = server2.write(); - server.abort(); - } - // wait for server thread to quit - let _ = server_fut.await; - info!("2. ------------------------ Server has now terminated after quit"); - break; - } - } - } - _ = &mut server_fut => { - warn!("Server finished unexpectedly"); - break; - } - } - } -} - -pub async fn connect_with_client_test(port: u16, client_test: CT, quick_timeout: bool) -where - CT: FnOnce(mpsc::UnboundedReceiver, Client) -> Fut + Send + 'static, - Fut: Future + Send + 'static, -{ - let (client, server) = new_client_server(port, quick_timeout); - perform_test(client, server, Some(client_test), regular_server_test).await; -} - -pub async fn connect_with_get_endpoints(port: u16) { - connect_with_client_test( - port, - move |rx_client_command: mpsc::UnboundedReceiver, client: Client| async move { - get_endpoints_client_test( - &endpoint_url(port, "/").as_ref(), - IdentityToken::Anonymous, - rx_client_command, - client, - ) - .await; - }, - false - ).await; -} - -pub async fn connect_with_invalid_token( - port: u16, - client_endpoint: EndpointDescription, - identity_token: IdentityToken, -) { - connect_with_client_test( - port, - move |rx_client_command: mpsc::UnboundedReceiver, client: Client| async move { - invalid_token_test(client_endpoint, identity_token, rx_client_command, client).await; - }, - true - ) - .await; -} - -pub async fn connect_with( - port: u16, - client_endpoint: EndpointDescription, - identity_token: IdentityToken, -) { - connect_with_client_test( - port, - move |rx_client_command: mpsc::UnboundedReceiver, client: Client| async move { - regular_client_test(client_endpoint, identity_token, rx_client_command, client).await; - }, - false - ).await; -} diff --git a/integration/src/main.rs b/integration/src/main.rs deleted file mode 100644 index 058128373..000000000 --- a/integration/src/main.rs +++ /dev/null @@ -1,12 +0,0 @@ -fn main() { - eprintln!(r#"Needs to be run with "cargo test -- --test-threads=1 --ignored""#); -} - -pub const CLIENT_USERPASS_ID: &str = "sample1"; -pub const CLIENT_X509_ID: &str = "x509"; - -#[cfg(test)] -mod tests; - -#[cfg(test)] -mod harness; diff --git a/integration/src/tests.rs b/integration/src/tests.rs deleted file mode 100644 index 43b4a057f..000000000 --- a/integration/src/tests.rs +++ /dev/null @@ -1,597 +0,0 @@ -use std::{sync::Arc, thread}; - -use chrono::Utc; -use log::*; - -use opcua::client::{Client, DataChangeCallback, IdentityToken}; -use opcua::server::prelude::*; -use opcua::sync::*; - -use tokio::sync::mpsc::{self, unbounded_channel}; - -use crate::harness::*; - -fn endpoint( - port: u16, - path: &str, - _security_policy: SecurityPolicy, - _message_security_mode: MessageSecurityMode, -) -> EndpointDescription { - let mut endpoint = - EndpointDescription::from(("", SecurityPolicy::None.to_str(), MessageSecurityMode::None)); - endpoint.endpoint_url = endpoint_url(port, path); - endpoint -} - -fn endpoint_none(port: u16) -> EndpointDescription { - endpoint(port, "/", SecurityPolicy::None, MessageSecurityMode::None) -} - -fn endpoint_basic128rsa15_sign(port: u16) -> EndpointDescription { - endpoint( - port, - "/", - SecurityPolicy::Basic128Rsa15, - MessageSecurityMode::Sign, - ) -} - -fn endpoint_basic128rsa15_sign_encrypt(port: u16) -> EndpointDescription { - endpoint( - port, - "/", - SecurityPolicy::Basic128Rsa15, - MessageSecurityMode::SignAndEncrypt, - ) -} - -fn endpoint_basic256_sign(port: u16) -> EndpointDescription { - endpoint( - port, - "/", - SecurityPolicy::Basic256, - MessageSecurityMode::Sign, - ) -} - -fn endpoint_basic256_sign_encrypt(port: u16) -> EndpointDescription { - endpoint( - port, - "/", - SecurityPolicy::Basic256, - MessageSecurityMode::SignAndEncrypt, - ) -} - -fn endpoint_basic256sha256_sign(port: u16) -> EndpointDescription { - endpoint( - port, - "/", - SecurityPolicy::Basic256Sha256, - MessageSecurityMode::Sign, - ) -} - -fn endpoint_basic256sha256_sign_encrypt(port: u16) -> EndpointDescription { - endpoint( - port, - "/", - SecurityPolicy::Basic256Sha256, - MessageSecurityMode::SignAndEncrypt, - ) -} - -fn endpoint_aes128sha256rsaoaep_sign(port: u16) -> EndpointDescription { - endpoint( - port, - "/", - SecurityPolicy::Aes128Sha256RsaOaep, - MessageSecurityMode::Sign, - ) -} - -fn endpoint_aes128sha256rsaoaep_sign_encrypt(port: u16) -> EndpointDescription { - endpoint( - port, - "/", - SecurityPolicy::Aes128Sha256RsaOaep, - MessageSecurityMode::SignAndEncrypt, - ) -} - -fn endpoint_aes256sha256rsapss_sign(port: u16) -> EndpointDescription { - endpoint( - port, - "/", - SecurityPolicy::Aes256Sha256RsaPss, - MessageSecurityMode::Sign, - ) -} - -fn endpoint_aes256sha256rsapss_sign_encrypt(port: u16) -> EndpointDescription { - endpoint( - port, - "/", - SecurityPolicy::Aes256Sha256RsaPss, - MessageSecurityMode::SignAndEncrypt, - ) -} - -/// This is the most basic integration test starting the server on a thread, setting an abort flag -/// and expecting the test to complete before it times out. -#[test] -fn server_abort() { - opcua::console_logging::init(); - - let server = Arc::new(RwLock::new(new_server(0))); - let server2 = server.clone(); - - // This is pretty lame, but to tell if the thread has terminated or not, there is no try_join - // so we will have the thread send a message when it is finishing via a receiver - - let (tx, mut rx) = unbounded_channel(); - let _t = thread::spawn(move || { - // This should run & block until it is told to abort - Server::run_server(server); - tx.send(()).unwrap(); - }); - - { - // Set the abort flag - server2.write().abort(); - } - - // Wait for the message or timeout to occur - let timeout = 10000; - let start_time = Utc::now(); - loop { - if let Ok(_) = rx.try_recv() { - info!("Abort test succeeded"); - break; - } - let now = Utc::now(); - let elapsed = now.signed_duration_since(start_time.clone()); - if elapsed.num_milliseconds() > timeout { - panic!( - "Abort test timed out after {} ms", - elapsed.num_milliseconds() - ); - } - } -} - -/// Start a server, send a HELLO message but then wait for the server -/// to timeout and drop the connection. -#[tokio::test] -async fn hello_timeout() { - use tokio::io::AsyncReadExt; - use tokio::net::TcpStream; - - let port = next_port(); - // For this test we want to set the hello timeout to a low value for the sake of speed. - - // The server will be a normal server, the client will just open the socket and keep the - // socket open for longer than the timeout period. The server is expected to close the socket for the - // test to pass. - - let client_test = move |_rx_client_command: mpsc::UnboundedReceiver, - _client: Client| async move { - // Client will open a socket, and sit there waiting for the socket to close, which should happen in under the timeout_wait_duration - let timeout_wait_duration = std::time::Duration::from_secs(2); - - let host = crate::harness::hostname(); - let address = (host.as_ref(), port); - let mut c = 0; - // Getting a connection can sometimes take a few tries, since the server reports it is - // ready before it actually is in some cases. - let mut stream = loop { - let stream = TcpStream::connect(address).await; - if let Ok(stream) = stream { - break stream; - } - c += 1; - if c >= 10 { - panic!("Failed to connect to server"); - } - - tokio::time::sleep(std::time::Duration::from_millis(200)).await; - }; - debug!("Client is going to connect to port {:?}", address); - - let mut buf = [0u8]; - - // Spin around for the timeout to finish and then try using the socket to see if it is still open. - let start = std::time::Instant::now(); - loop { - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - let now = std::time::Instant::now(); - if now - start > timeout_wait_duration { - debug!("Timeout wait duration has passed, so trying to read from the socket"); - let result = stream.read(&mut buf).await; - match result { - Ok(v) => { - if v > 0 { - panic!( - "Hello timeout exceeded and socket is still open, result = {}", - v - ) - } else { - // From - debug!("Client got a read of 0 bytes on the socket, so treating by terminating with success"); - break; - } - } - Err(err) => { - debug!( - "Client got an error {:?} on the socket terminating successfully", - err - ); - break; - } - } - } - } - }; - - let (client, server) = new_client_server(port, false); - perform_test(client, server, Some(client_test), regular_server_test).await; -} - -/// Start a server, fetch a list of endpoints, verify they are correct -#[tokio::test] -async fn get_endpoints() { - println!("Enter test"); - // Connect to server and get a list of endpoints - connect_with_get_endpoints(next_port()).await; -} - -/// Connect to the server using no encryption, anonymous -#[tokio::test] -async fn connect_none() { - // Connect a session using None security policy and anonymous token. - let port = next_port(); - connect_with(port, endpoint_none(port), IdentityToken::Anonymous).await; -} - -/// Connect to the server using Basic128Rsa15 + Sign -#[tokio::test] -async fn connect_basic128rsa15_sign() { - // Connect a session with Basic128Rsa and Sign - let port = next_port(); - connect_with( - port, - endpoint_basic128rsa15_sign(port), - IdentityToken::Anonymous, - ) - .await; -} - -/// Connect to the server using Basic128Rsa15 + SignEncrypt -#[tokio::test] -async fn connect_basic128rsa15_sign_and_encrypt() { - // Connect a session with Basic128Rsa and SignAndEncrypt - let port = next_port(); - connect_with( - port, - endpoint_basic128rsa15_sign_encrypt(port), - IdentityToken::Anonymous, - ) - .await; -} - -/// Connect to the server using Basic256 + Sign -#[tokio::test] -async fn connect_basic256_sign() { - // Connect a session with Basic256 and Sign - let port = next_port(); - connect_with(port, endpoint_basic256_sign(port), IdentityToken::Anonymous).await; -} - -/// Connect to the server using Basic256 + SignEncrypt -#[tokio::test] -async fn connect_basic256_sign_and_encrypt() { - // Connect a session with Basic256 and SignAndEncrypt - let port = next_port(); - connect_with( - port, - endpoint_basic256_sign_encrypt(port), - IdentityToken::Anonymous, - ) - .await; -} - -/// Connect to the server using Basic256Sha256 + Sign -#[tokio::test] -async fn connect_basic256sha256_sign() { - // Connect a session with Basic256Sha256 and Sign - let port = next_port(); - connect_with( - port, - endpoint_basic256sha256_sign(port), - IdentityToken::Anonymous, - ) - .await; -} - -/// Connect to the server using Basic256Sha256 + SignEncrypt -#[tokio::test] -async fn connect_basic256sha256_sign_and_encrypt() { - let port = next_port(); - connect_with( - port, - endpoint_basic256sha256_sign_encrypt(port), - IdentityToken::Anonymous, - ) - .await; -} - -/// Connect to the server using Aes128Sha256RsaOaep + Sign -#[tokio::test] -async fn connect_aes128sha256rsaoaep_sign() { - let port = next_port(); - connect_with( - port, - endpoint_aes128sha256rsaoaep_sign(port), - IdentityToken::Anonymous, - ) - .await; -} - -/// Connect to the server using Aes128Sha256RsaOaep + SignEncrypt -#[tokio::test] -async fn connect_aes128sha256rsaoaep_sign_encrypt() { - let port = next_port(); - connect_with( - port, - endpoint_aes128sha256rsaoaep_sign_encrypt(port), - IdentityToken::Anonymous, - ) - .await; -} - -/// Connect to the server using Aes128Sha256RsaOaep + Sign -#[tokio::test] -async fn connect_aes256sha256rsapss_sign() { - let port = next_port(); - connect_with( - port, - endpoint_aes256sha256rsapss_sign(port), - IdentityToken::Anonymous, - ) - .await; -} - -/// Connect to the server using Aes128Sha256RsaOaep + SignEncrypt -#[tokio::test] -async fn connect_aes256sha256rsapss_sign_encrypt() { - let port = next_port(); - connect_with( - port, - endpoint_aes256sha256rsapss_sign_encrypt(port), - IdentityToken::Anonymous, - ) - .await; -} - -/// Connect to the server user/pass -#[tokio::test] -async fn connect_basic128rsa15_with_username_password() { - // Connect a session using username/password token - let port = next_port(); - connect_with( - port, - endpoint_basic128rsa15_sign_encrypt(port), - client_user_token(), - ) - .await; -} - -/// Connect a session using an invalid username/password token and expect it to fail -#[tokio::test] -async fn connect_basic128rsa15_with_invalid_username_password() { - let port = next_port(); - connect_with_invalid_token( - port, - endpoint_basic128rsa15_sign_encrypt(port), - client_invalid_user_token(), - ) - .await; -} - -/// Connect a session using an X509 key and certificate -#[tokio::test] -async fn connect_basic128rsa15_with_x509_token() { - let port = next_port(); - connect_with( - port, - endpoint_basic128rsa15_sign_encrypt(port), - client_x509_token(), - ) - .await; -} - -/// Connect to a server, read a variable, write a value to the variable, read the variable to verify it changed -#[tokio::test] -async fn read_write_read() { - let port = next_port(); - let client_endpoint = endpoint_basic128rsa15_sign_encrypt(port); - let identity_token = client_x509_token(); - connect_with_client_test( - port, - move |_rx_client_command: mpsc::UnboundedReceiver, mut client: Client| async move { - info!( - "Client will try to connect to endpoint {:?}", - client_endpoint - ); - let (session, event_loop) = client - .new_session_from_endpoint(client_endpoint, identity_token) - .await - .unwrap(); - - let handle = event_loop.spawn(); - session.wait_for_connection().await; - - let node_id = stress_node_id(1); - - // Read the existing value - let results = session - .read(&[node_id.clone().into()], TimestampsToReturn::Both, 1.0) - .await - .unwrap(); - let value = &results[0]; - debug!("value = {:?}", value); - assert_eq!(*value.value.as_ref().unwrap(), Variant::Int32(0)); - - let results = session - .write(&[WriteValue { - node_id: node_id.clone(), - attribute_id: AttributeId::Value as u32, - index_range: UAString::null(), - value: Variant::Int32(1).into(), - }]) - .await - .unwrap(); - let value = results[0]; - assert_eq!(value, StatusCode::Good); - - let results = session - .read(&[node_id.into()], TimestampsToReturn::Both, 1.0) - .await - .unwrap(); - let value = &results[0]; - assert_eq!(*value.value.as_ref().unwrap(), Variant::Int32(1)); - - session.disconnect().await.unwrap(); - handle.await.unwrap(); - }, - false - ).await; -} - -/// Connect with the server and attempt to subscribe and monitor 1000 variables -#[tokio::test] -async fn subscribe_1000() { - let port = next_port(); - let client_endpoint = endpoint_basic128rsa15_sign_encrypt(port); - let identity_token = client_x509_token(); - - connect_with_client_test( - port, - move |_rx_client_command: mpsc::UnboundedReceiver, mut client: Client| async move { - info!( - "Client will try to connect to endpoint {:?}", - client_endpoint - ); - let (session, event_loop) = client - .new_session_from_endpoint(client_endpoint, identity_token) - .await - .unwrap(); - - let handle = event_loop.spawn(); - session.wait_for_connection().await; - - let start_time = Utc::now(); - - // Create subscription - let subscription_id = session - .create_subscription( - std::time::Duration::from_secs(2), - 100, - 100, - 0, - 0, - true, - DataChangeCallback::new(|_, _| { - panic!("This shouldn't be called"); - }), - ) - .await - .unwrap(); - - // NOTE: There is a default limit of 1000 items in arrays, so this list will go from 1 to 1000 inclusive - - // Create monitored items - the last one does not exist so expect that to fail - let items_to_create = (0..1000) - .map(|i| i + 1) // From v0001 to v1000 - .map(|i| (i, stress_node_id(i))) - .map(|(i, node_id)| MonitoredItemCreateRequest { - item_to_monitor: node_id.into(), - monitoring_mode: MonitoringMode::Reporting, - requested_parameters: MonitoringParameters { - client_handle: i as u32, - sampling_interval: 1000.0f64, - filter: ExtensionObject::null(), - queue_size: 1, - discard_oldest: true, - }, - }) - .collect::>(); - - let elapsed = Utc::now() - start_time; - assert!(elapsed.num_milliseconds() < 500i64); - error!("Elapsed time = {}ms", elapsed.num_milliseconds()); - - let results = session - .create_monitored_items(subscription_id, TimestampsToReturn::Both, items_to_create) - .await - .unwrap(); - results.iter().enumerate().for_each(|(i, result)| { - if i == 999 { - // Last idx var does not exist so expect it to fail - error!("Checkout {}", result.status_code); - assert!(result.status_code.is_bad()); - } else { - assert!(result.status_code.is_good()); - } - }); - - session.disconnect().await.unwrap(); - handle.await.unwrap(); - }, - false - ).await; -} - -#[tokio::test] -async fn method_call() { - // Call a method on the server, one exercising some parameters in and out - let port = next_port(); - let client_endpoint = endpoint_none(port); - - connect_with_client_test( - port, - move |_rx_client_command: mpsc::UnboundedReceiver, mut client: Client| async move { - info!( - "Client will try to connect to endpoint {:?}", - client_endpoint - ); - let (session, event_loop) = client - .new_session_from_endpoint(client_endpoint, IdentityToken::Anonymous) - .await - .unwrap(); - - let handle = event_loop.spawn(); - session.wait_for_connection().await; - - // Call the method - let input_arguments = Some(vec![Variant::from("Foo")]); - let method = CallMethodRequest { - object_id: functions_object_id(), - method_id: hellox_method_id(), - input_arguments, - }; - let result = session.call(method).await.unwrap(); - - // Result should say "Hello Foo" - assert!(result.status_code.is_good()); - let output_args = result.output_arguments.unwrap(); - assert_eq!(output_args.len(), 1); - let msg = output_args.get(0).unwrap(); - assert_eq!(msg.to_string(), "Hello Foo!"); - - session.disconnect().await.unwrap(); - handle.await.unwrap(); - }, - false - ).await; -} diff --git a/lib/Cargo.toml b/lib/Cargo.toml index f157bca91..9a53e10ee 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -21,7 +21,7 @@ appveyor = { repository = "locka99/opcua" } [features] default = ["server", "client"] -all = ["server", "client", "console-logging", "http"] +all = ["server", "client", "console-logging"] # This is for CI/CD testing on platforms with unresolved OpenSSL deps, don't use otherwise. test-vendored-openssl = ["all", "vendored-openssl"] # Server default settings @@ -38,8 +38,6 @@ generated-address-space = [] discovery-server-registration = ["client"] # OpenSSL can be compiled and statically linked to with this feature vendored-openssl = ["openssl/vendored"] -# Servers might want to show a web server with metric / diagnostic info -http = ["actix-files", "actix-web"] [dependencies] log = "0.4" @@ -70,10 +68,10 @@ foreign-types = "0.3" # dependencies below are only required when certain features are enabled rumqttc = { version = "0.23", optional = true } env_logger = { version = "0.10", optional = true } -actix-web = { version = "4.4", optional = true } -actix-files = { version = "0.6", optional = true } arbitrary = { version = "1", optional = true, features = ["derive"] } arc-swap = "1.6.0" +async-trait = "0.1.79" +hashbrown = "0.14.5" [dev-dependencies] tempdir = "0.3" diff --git a/lib/src/client/builder.rs b/lib/src/client/builder.rs index e802f15c1..9fa56db22 100644 --- a/lib/src/client/builder.rs +++ b/lib/src/client/builder.rs @@ -1,6 +1,6 @@ use std::{path::PathBuf, time::Duration}; -use crate::server::prelude::Config; +use crate::core::config::Config; use super::{Client, ClientConfig, ClientEndpoint, ClientUserToken, ANONYMOUS_USER_TOKEN_ID}; diff --git a/lib/src/client/config.rs b/lib/src/client/config.rs index 33dccc03f..e8b91fd32 100644 --- a/lib/src/client/config.rs +++ b/lib/src/client/config.rs @@ -356,7 +356,7 @@ impl ClientConfig { session_retry_max: Duration::from_secs(30), keep_alive_interval: Duration::from_secs(10), request_timeout: Duration::from_secs(60), - min_publish_interval: Duration::from_secs(1), + min_publish_interval: Duration::from_millis(100), publish_timeout: Duration::from_secs(60), max_inflight_publish: 2, session_timeout: 0, diff --git a/lib/src/client/mod.rs b/lib/src/client/mod.rs index 517f84de7..36fa8f914 100644 --- a/lib/src/client/mod.rs +++ b/lib/src/client/mod.rs @@ -121,9 +121,9 @@ use std::path::PathBuf; pub use builder::ClientBuilder; pub use config::{ClientConfig, ClientEndpoint, ClientUserToken, ANONYMOUS_USER_TOKEN_ID}; pub use session::{ - Client, DataChangeCallback, EventCallback, MonitoredItem, OnSubscriptionNotification, Session, - SessionActivity, SessionConnectMode, SessionEventLoop, SessionPollResult, Subscription, - SubscriptionCallbacks, + Client, DataChangeCallback, EventCallback, HistoryReadAction, HistoryUpdateAction, + MonitoredItem, OnSubscriptionNotification, Session, SessionActivity, SessionConnectMode, + SessionEventLoop, SessionPollResult, Subscription, SubscriptionCallbacks, }; pub use transport::AsyncSecureChannel; diff --git a/lib/src/client/retry.rs b/lib/src/client/retry.rs index ebb9fbe77..52e5d7479 100644 --- a/lib/src/client/retry.rs +++ b/lib/src/client/retry.rs @@ -1,5 +1,6 @@ use std::time::Duration; +#[derive(Debug, Clone)] pub(crate) struct ExponentialBackoff { max_sleep: Duration, max_retries: Option, diff --git a/lib/src/client/session/client.rs b/lib/src/client/session/client.rs index fd11b279f..032b32d84 100644 --- a/lib/src/client/session/client.rs +++ b/lib/src/client/session/client.rs @@ -22,7 +22,7 @@ use crate::{ types::{ ApplicationDescription, DecodingOptions, EndpointDescription, FindServersRequest, GetEndpointsRequest, MessageSecurityMode, RegisterServerRequest, RegisteredServer, - StatusCode, + StatusCode, UAString, }, }; @@ -458,12 +458,14 @@ impl Client { &self, endpoint: &EndpointDescription, channel: &AsyncSecureChannel, + locale_ids: Option>, + profile_uris: Option>, ) -> Result, StatusCode> { let request = GetEndpointsRequest { request_header: channel.make_request_header(self.config.request_timeout), endpoint_url: endpoint.endpoint_url.clone(), - locale_ids: None, - profile_uris: None, + locale_ids, + profile_uris, }; // Send the message and wait for a response. let response = channel.send(request, self.config.request_timeout).await?; @@ -491,47 +493,80 @@ impl Client { pub async fn get_server_endpoints_from_url( &self, server_url: impl Into, + ) -> Result, StatusCode> { + self.get_endpoints(server_url, &[], &[]).await + } + + /// Get the list of endpoints for the server at the given URL. + /// + /// # Arguments + /// + /// * `server_url` - URL of the discovery server to get endpoints from. + /// * `locale_ids` - List of required locale IDs on the given server endpoint. + /// * `profile_uris` - Returned endpoints should match one of these profile URIs. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of the available endpoints on the server. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + pub async fn get_endpoints( + &self, + server_url: impl Into, + locale_ids: &[&str], + profile_uris: &[&str], ) -> Result, StatusCode> { let server_url = server_url.into(); if !is_opc_ua_binary_url(&server_url) { - Err(StatusCode::BadTcpEndpointUrlInvalid) - } else { - let preferred_locales = Vec::new(); - // Most of these fields mean nothing when getting endpoints - let endpoint = EndpointDescription::from(server_url.as_ref()); - let session_info = SessionInfo { - endpoint: endpoint.clone(), - user_identity_token: IdentityToken::Anonymous, - preferred_locales, - }; - let channel = self.channel_from_session_info(session_info); + return Err(StatusCode::BadTcpEndpointUrlInvalid); + } + let preferred_locales = Vec::new(); + // Most of these fields mean nothing when getting endpoints + let endpoint = EndpointDescription::from(server_url.as_ref()); + let session_info = SessionInfo { + endpoint: endpoint.clone(), + user_identity_token: IdentityToken::Anonymous, + preferred_locales, + }; + let channel = self.channel_from_session_info(session_info); - let mut evt_loop = channel.connect().await?; + let mut evt_loop = channel.connect().await?; - let send_fut = self.get_server_endpoints_inner(&endpoint, &channel); - pin!(send_fut); + let send_fut = self.get_server_endpoints_inner( + &endpoint, + &channel, + if locale_ids.is_empty() { + None + } else { + Some(locale_ids.iter().map(|i| (*i).into()).collect()) + }, + if profile_uris.is_empty() { + None + } else { + Some(profile_uris.iter().map(|i| (*i).into()).collect()) + }, + ); + pin!(send_fut); - let res = loop { - select! { - r = evt_loop.poll() => { - if let TransportPollResult::Closed(e) = r { - return Err(e); - } - }, - res = &mut send_fut => break res - } - }; + let res = loop { + select! { + r = evt_loop.poll() => { + if let TransportPollResult::Closed(e) = r { + return Err(e); + } + }, + res = &mut send_fut => break res + } + }; - channel.close_channel().await; + channel.close_channel().await; - loop { - if matches!(evt_loop.poll().await, TransportPollResult::Closed(_)) { - break; - } + loop { + if matches!(evt_loop.poll().await, TransportPollResult::Closed(_)) { + break; } - - res } + + res } async fn find_servers_inner( @@ -572,7 +607,7 @@ impl Client { /// * `Ok(Vec)` - List of descriptions for servers known to the discovery server. /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. pub async fn find_servers( - &mut self, + &self, discovery_endpoint_url: impl Into, ) -> Result, StatusCode> { let discovery_endpoint_url = discovery_endpoint_url.into(); diff --git a/lib/src/client/session/mod.rs b/lib/src/client/session/mod.rs index d539c7ad7..bfab143e2 100644 --- a/lib/src/client/session/mod.rs +++ b/lib/src/client/session/mod.rs @@ -39,6 +39,7 @@ impl From<(EndpointDescription, IdentityToken)> for SessionInfo { pub use client::Client; pub use connect::SessionConnectMode; pub use event_loop::{SessionActivity, SessionEventLoop, SessionPollResult}; +pub use services::attributes::{HistoryReadAction, HistoryUpdateAction}; pub use services::subscriptions::{ DataChangeCallback, EventCallback, MonitoredItem, OnSubscriptionNotification, Subscription, SubscriptionCallbacks, diff --git a/lib/src/client/session/services/attributes.rs b/lib/src/client/session/services/attributes.rs index 42a1688f7..80b877b9e 100644 --- a/lib/src/client/session/services/attributes.rs +++ b/lib/src/client/session/services/attributes.rs @@ -24,20 +24,20 @@ pub enum HistoryReadAction { ReadAtTimeDetails(ReadAtTimeDetails), } -impl From for ExtensionObject { - fn from(action: HistoryReadAction) -> Self { +impl From<&HistoryReadAction> for ExtensionObject { + fn from(action: &HistoryReadAction) -> Self { match action { HistoryReadAction::ReadEventDetails(v) => { - Self::from_encodable(ObjectId::ReadEventDetails_Encoding_DefaultBinary, &v) + Self::from_encodable(ObjectId::ReadEventDetails_Encoding_DefaultBinary, v) } HistoryReadAction::ReadRawModifiedDetails(v) => { - Self::from_encodable(ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, &v) + Self::from_encodable(ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, v) } HistoryReadAction::ReadProcessedDetails(v) => { - Self::from_encodable(ObjectId::ReadProcessedDetails_Encoding_DefaultBinary, &v) + Self::from_encodable(ObjectId::ReadProcessedDetails_Encoding_DefaultBinary, v) } HistoryReadAction::ReadAtTimeDetails(v) => { - Self::from_encodable(ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary, &v) + Self::from_encodable(ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary, v) } } } @@ -157,7 +157,7 @@ impl Session { /// pub async fn history_read( &self, - history_read_details: HistoryReadAction, + history_read_details: &HistoryReadAction, timestamps_to_return: TimestampsToReturn, release_continuation_points: bool, nodes_to_read: &[HistoryReadValueId], diff --git a/lib/src/client/session/services/method.rs b/lib/src/client/session/services/method.rs index c9f13d328..6858b40b1 100644 --- a/lib/src/client/session/services/method.rs +++ b/lib/src/client/session/services/method.rs @@ -17,37 +17,40 @@ impl Session { /// /// # Arguments /// - /// * `method` - The method to call. Note this function takes anything that can be turned into - /// a [`CallMethodRequest`] which includes a ([`NodeId`], [`NodeId`], `Option>`) tuple - /// which refers to the object id, method id, and input arguments respectively. + /// * `methods` - The method to call. /// /// # Returns /// - /// * `Ok(CallMethodResult)` - A [`CallMethodResult`] for the Method call. + /// * `Ok(Vec)` - A [`CallMethodResult`] for the Method call. /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. /// pub async fn call( &self, - method: impl Into, - ) -> Result { + methods: Vec, + ) -> Result, StatusCode> { + if methods.is_empty() { + session_error!(self, "call(), was not supplied with any methods to call"); + return Err(StatusCode::BadNothingToDo); + } + session_debug!(self, "call()"); - let methods_to_call = Some(vec![method.into()]); + let cnt = methods.len(); let request = CallRequest { request_header: self.make_request_header(), - methods_to_call, + methods_to_call: Some(methods), }; let response = self.send(request).await?; if let SupportedMessage::CallResponse(response) = response { - if let Some(mut results) = response.results { - if results.len() != 1 { + if let Some(results) = response.results { + if results.len() != cnt { session_error!( self, - "call(), expecting a result from the call to the server, got {} results", + "call(), expecting {cnt} results from the call to the server, got {} results", results.len() ); Err(StatusCode::BadUnexpectedError) } else { - Ok(results.remove(0)) + Ok(results) } } else { session_error!( @@ -61,6 +64,33 @@ impl Session { } } + /// Calls a single method on an object on the server by sending a [`CallRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.11.2 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `method` - The method to call. Note this function takes anything that can be turned into + /// a [`CallMethodRequest`] which includes a ([`NodeId`], [`NodeId`], `Option>`) tuple + /// which refers to the object id, method id, and input arguments respectively. + /// + /// # Returns + /// + /// * `Ok(CallMethodResult)` - A [`CallMethodResult`] for the Method call. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn call_one( + &self, + method: impl Into, + ) -> Result { + Ok(self + .call(vec![method.into()]) + .await? + .into_iter() + .next() + .unwrap()) + } + /// Calls GetMonitoredItems via call_method(), putting a sane interface on the input / output. /// /// # Arguments @@ -80,7 +110,7 @@ impl Session { let object_id: NodeId = ObjectId::Server.into(); let method_id: NodeId = MethodId::Server_GetMonitoredItems.into(); let request: CallMethodRequest = (object_id, method_id, args).into(); - let response = self.call(request).await?; + let response = self.call_one(request).await?; if let Some(mut result) = response.output_arguments { if result.len() == 2 { let server_handles = >::try_from(&result.remove(0)) diff --git a/lib/src/client/session/services/subscriptions/event_loop.rs b/lib/src/client/session/services/subscriptions/event_loop.rs index 96f039bfa..eef72d51c 100644 --- a/lib/src/client/session/services/subscriptions/event_loop.rs +++ b/lib/src/client/session/services/subscriptions/event_loop.rs @@ -93,7 +93,6 @@ impl SubscriptionEventLoop { // Both internal ticks and external triggers result in publish requests. v = recv.wait_for(|i| i > &slf.last_external_trigger) => { if let Ok(v) = v { - debug!("Sending publish due to external trigger"); // On an external trigger, we always publish. futures.push(slf.static_publish()); next = slf.session.next_publish_time(true); @@ -103,7 +102,6 @@ impl SubscriptionEventLoop { _ = next_tick_fut => { // Avoid publishing if there are too many inflight publish requests. if futures.len() < slf.max_inflight_publish { - debug!("Sending publish due to internal tick"); futures.push(slf.static_publish()); } next = slf.session.next_publish_time(true); diff --git a/lib/src/client/session/services/subscriptions/mod.rs b/lib/src/client/session/services/subscriptions/mod.rs index 24ff936d0..c2a5486fb 100644 --- a/lib/src/client/session/services/subscriptions/mod.rs +++ b/lib/src/client/session/services/subscriptions/mod.rs @@ -35,13 +35,16 @@ pub(crate) struct ModifyMonitoredItem { /// for a simple collection of closures. pub trait OnSubscriptionNotification: Send + Sync { /// Called when a subscription changes state on the server. - fn on_subscription_status_change(&mut self, _notification: StatusChangeNotification) {} + #[allow(unused)] + fn on_subscription_status_change(&mut self, notification: StatusChangeNotification) {} /// Called for each data value change. - fn on_data_value(&mut self, _notification: DataValue, _item: &MonitoredItem) {} + #[allow(unused)] + fn on_data_value(&mut self, notification: DataValue, item: &MonitoredItem) {} /// Called for each received event. - fn on_event(&mut self, _event_fields: Option>, _item: &MonitoredItem) {} + #[allow(unused)] + fn on_event(&mut self, event_fields: Option>, item: &MonitoredItem) {} } /// A convenient wrapper around a set of callback functions that implements [OnSubscriptionNotification] diff --git a/lib/src/client/session/services/subscriptions/service.rs b/lib/src/client/session/services/subscriptions/service.rs index 6115c38b2..144008ea6 100644 --- a/lib/src/client/session/services/subscriptions/service.rs +++ b/lib/src/client/session/services/subscriptions/service.rs @@ -17,9 +17,10 @@ use crate::{ CreateMonitoredItemsRequest, CreateSubscriptionRequest, DeleteMonitoredItemsRequest, DeleteSubscriptionsRequest, ModifyMonitoredItemsRequest, ModifySubscriptionRequest, MonitoredItemCreateRequest, MonitoredItemCreateResult, MonitoredItemModifyRequest, - MonitoredItemModifyResult, MonitoringMode, MonitoringParameters, PublishRequest, - SetMonitoringModeRequest, SetPublishingModeRequest, SetTriggeringRequest, StatusCode, - TimestampsToReturn, TransferResult, TransferSubscriptionsRequest, + MonitoredItemModifyResult, MonitoringMode, MonitoringParameters, NotificationMessage, + PublishRequest, RepublishRequest, SetMonitoringModeRequest, SetPublishingModeRequest, + SetTriggeringRequest, StatusCode, TimestampsToReturn, TransferResult, + TransferSubscriptionsRequest, }, }; @@ -38,7 +39,7 @@ impl Session { ) -> Result { let request = CreateSubscriptionRequest { request_header: self.make_request_header(), - requested_publishing_interval: publishing_interval.as_secs_f64(), + requested_publishing_interval: publishing_interval.as_millis() as f64, requested_lifetime_count: lifetime_count, requested_max_keep_alive_count: max_keep_alive_count, max_notifications_per_publish, @@ -189,7 +190,7 @@ impl Session { pub async fn modify_subscription( &self, subscription_id: u32, - publishing_interval: f64, + publishing_interval: Duration, lifetime_count: u32, max_keep_alive_count: u32, max_notifications_per_publish: u32, @@ -205,7 +206,7 @@ impl Session { let request = ModifySubscriptionRequest { request_header: self.make_request_header(), subscription_id, - requested_publishing_interval: publishing_interval, + requested_publishing_interval: publishing_interval.as_millis() as f64, requested_lifetime_count: lifetime_count, requested_max_keep_alive_count: max_keep_alive_count, max_notifications_per_publish, @@ -282,7 +283,7 @@ impl Session { subscription_state.set_publishing_mode(subscription_ids, publishing_enabled); } session_debug!(self, "set_publishing_mode success"); - Ok(response.results.unwrap()) + Ok(response.results.unwrap_or_default()) } else { session_error!(self, "set_publishing_mode failed {:?}", response); Err(process_unexpected_response(response)) @@ -295,6 +296,9 @@ impl Session { /// It may also be used by one Client to take over a Subscription from another Client by /// transferring the Subscription to its Session. /// + /// NOTE: This method is incomplete, currently if you call this manually there is no way + /// to register a listener for the new subscription. + /// /// See OPC UA Part 4 - Services 5.13.7 for complete description of the service and error responses. /// /// * `subscription_ids` - one or more subscription identifiers. @@ -317,7 +321,7 @@ impl Session { // No subscriptions session_error!( self, - "set_publishing_mode, no subscription ids were provided" + "transfer_subscriptions, no subscription ids were provided" ); Err(StatusCode::BadNothingToDo) } else { @@ -327,10 +331,12 @@ impl Session { send_initial_values, }; let response = self.send(request).await?; + // TODO: Create a method where a user can register a subscription without creating it on the server + // somehow. That's necessary if this method is going to be useable manually. if let SupportedMessage::TransferSubscriptionsResponse(response) = response { process_service_result(&response.response_header)?; session_debug!(self, "transfer_subscriptions success"); - Ok(response.results.unwrap()) + Ok(response.results.unwrap_or_default()) } else { session_error!(self, "transfer_subscriptions failed {:?}", response); Err(process_unexpected_response(response)) @@ -366,7 +372,7 @@ impl Session { ); Err(StatusCode::BadInvalidArgument) } else { - let result = self.delete_subscriptions(&[subscription_id][..]).await?; + let result = self.delete_subscriptions(&[subscription_id]).await?; Ok(result[0]) } } @@ -411,7 +417,7 @@ impl Session { }); } session_debug!(self, "delete_subscriptions success"); - Ok(response.results.unwrap()) + Ok(response.results.unwrap_or_default()) } else { session_error!(self, "delete_subscriptions failed {:?}", response); Err(process_unexpected_response(response)) @@ -517,7 +523,7 @@ impl Session { "create_monitored_items, success but no monitored items were created" ); } - Ok(response.results.unwrap()) + Ok(response.results.unwrap_or_default()) } else { session_error!(self, "create_monitored_items failed {:?}", response); Err(process_unexpected_response(response)) @@ -601,7 +607,7 @@ impl Session { } } session_debug!(self, "modify_monitored_items, success"); - Ok(response.results.unwrap()) + Ok(response.results.unwrap_or_default()) } else { session_error!(self, "modify_monitored_items failed {:?}", response); Err(process_unexpected_response(response)) @@ -655,7 +661,7 @@ impl Session { ); } if let SupportedMessage::SetMonitoringModeResponse(response) = response { - Ok(response.results.unwrap()) + Ok(response.results.unwrap_or_default()) } else { session_error!(self, "set_monitoring_mode failed {:?}", response); Err(process_unexpected_response(response)) @@ -785,7 +791,7 @@ impl Session { subscription_state.delete_monitored_items(subscription_id, items_to_delete); } session_debug!(self, "delete_monitored_items, success"); - Ok(response.results.unwrap()) + Ok(response.results.unwrap_or_default()) } else { session_error!(self, "delete_monitored_items failed {:?}", response); Err(process_unexpected_response(response)) @@ -868,6 +874,47 @@ impl Session { Err(err_status) } + /// Send a request to re-publish an unacknowledged notification message from the server. + /// + /// If this succeeds, the session will automatically acknowledge the notification in the next publish request. + /// + /// # Arguments + /// + /// * `subscription_id` - The Server-assigned identifier for the Subscription to republish from. + /// * `sequence_number` - Sequence number to re-publish. + /// + /// # Returns + /// + /// * `Ok(NotificationMessage)` - Re-published notification message. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn republish( + &self, + subscription_id: u32, + sequence_number: u32, + ) -> Result { + let request = RepublishRequest { + request_header: self.channel.make_request_header(self.request_timeout), + subscription_id, + retransmit_sequence_number: sequence_number, + }; + + let response = self.channel.send(request, self.request_timeout).await?; + + if let SupportedMessage::RepublishResponse(response) = response { + process_service_result(&response.response_header)?; + session_debug!(self, "republish, success"); + { + let mut lck = trace_lock!(self.subscription_state); + lck.add_acknowledgement(subscription_id, sequence_number); + } + Ok(response.notification_message) + } else { + session_error!(self, "republish failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + /// This code attempts to take the existing subscriptions created by a previous session and /// either transfer them to this session, or construct them from scratch. pub(crate) async fn transfer_subscriptions_from_old_session(&self) { diff --git a/lib/src/client/session/services/subscriptions/state.rs b/lib/src/client/session/services/subscriptions/state.rs index cf916ade0..4108338f3 100644 --- a/lib/src/client/session/services/subscriptions/state.rs +++ b/lib/src/client/session/services/subscriptions/state.rs @@ -60,7 +60,7 @@ impl SubscriptionState { std::mem::take(&mut self.acknowledgements) } - fn add_acknowledgement(&mut self, subscription_id: u32, sequence_number: u32) { + pub(crate) fn add_acknowledgement(&mut self, subscription_id: u32, sequence_number: u32) { self.acknowledgements.push(SubscriptionAcknowledgement { subscription_id, sequence_number, @@ -90,6 +90,19 @@ impl SubscriptionState { self.subscriptions.get(&subscription_id) } + /// Get the number of subscriptions. + pub fn len(&self) -> usize { + self.subscriptions.len() + } + + /// Get the number of subscriptions that have publishing enabled. + pub fn len_active(&self) -> usize { + self.subscriptions + .iter() + .filter(|s| s.1.publishing_enabled) + .count() + } + pub(crate) fn add_subscription(&mut self, subscription: Subscription) { self.subscriptions .insert(subscription.subscription_id(), subscription); diff --git a/lib/src/client/session/services/view.rs b/lib/src/client/session/services/view.rs index 6aed0c987..8fa1d0ee5 100644 --- a/lib/src/client/session/services/view.rs +++ b/lib/src/client/session/services/view.rs @@ -24,33 +24,35 @@ impl Session { /// /// # Returns /// - /// * `Ok(Option)` - A list [`BrowseResult`] corresponding to each node to browse. A browse result + /// * `Ok(Vec)` - A list [`BrowseResult`] corresponding to each node to browse. A browse result /// may contain a continuation point, for use with `browse_next()`. /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. /// pub async fn browse( &self, nodes_to_browse: &[BrowseDescription], - ) -> Result>, StatusCode> { + max_references_per_node: u32, + view: Option, + ) -> Result, StatusCode> { if nodes_to_browse.is_empty() { session_error!(self, "browse, was not supplied with any nodes to browse"); Err(StatusCode::BadNothingToDo) } else { let request = BrowseRequest { request_header: self.make_request_header(), - view: ViewDescription { + view: view.unwrap_or_else(|| ViewDescription { view_id: NodeId::null(), timestamp: DateTime::null(), view_version: 0, - }, - requested_max_references_per_node: 1000, + }), + requested_max_references_per_node: max_references_per_node, nodes_to_browse: Some(nodes_to_browse.to_vec()), }; let response = self.send(request).await?; if let SupportedMessage::BrowseResponse(response) = response { session_debug!(self, "browse, success"); process_service_result(&response.response_header)?; - Ok(response.results) + Ok(response.results.unwrap_or_default()) } else { session_error!(self, "browse failed {:?}", response); Err(process_unexpected_response(response)) @@ -78,7 +80,7 @@ impl Session { &self, release_continuation_points: bool, continuation_points: &[ByteString], - ) -> Result>, StatusCode> { + ) -> Result, StatusCode> { if continuation_points.is_empty() { Err(StatusCode::BadNothingToDo) } else { @@ -91,7 +93,7 @@ impl Session { if let SupportedMessage::BrowseNextResponse(response) = response { session_debug!(self, "browse_next, success"); process_service_result(&response.response_header)?; - Ok(response.results) + Ok(response.results.unwrap_or_default()) } else { session_error!(self, "browse_next failed {:?}", response); Err(process_unexpected_response(response)) diff --git a/lib/src/client/session/session.rs b/lib/src/client/session/session.rs index 9402ac5aa..368ff5df6 100644 --- a/lib/src/client/session/session.rs +++ b/lib/src/client/session/session.rs @@ -57,6 +57,7 @@ pub struct Session { pub subscription_state: Mutex, pub(super) monitored_item_handle: AtomicHandle, pub(super) trigger_publish_tx: tokio::sync::watch::Sender, + decoding_options: DecodingOptions, } impl Session { @@ -79,7 +80,7 @@ impl Session { certificate_store.clone(), session_info.clone(), session_retry_policy.clone(), - decoding_options, + decoding_options.clone(), config.performance.ignore_clock_skew, auth_token.clone(), TransportConfiguration { @@ -108,6 +109,7 @@ impl Session { subscription_state: Mutex::new(SubscriptionState::new(config.min_publish_interval)), monitored_item_handle: AtomicHandle::new(1000), trigger_publish_tx, + decoding_options, }); ( @@ -169,6 +171,12 @@ impl Session { self.internal_session_id.load(Ordering::Relaxed) } + /// Get the current session ID. This is different from `session_id`, which is the client-side ID + /// to keep track of multiple sessions. This is the session ID the server uses to identify this session. + pub fn server_session_id(&self) -> NodeId { + (**(*self.session_id).load()).clone() + } + /// Convenience method to wait for a connection to the server. /// /// You should also monitor the session event loop. If it ends, this method will never return. @@ -185,4 +193,8 @@ impl Session { Ok(()) } + + pub fn decoding_options(&self) -> &DecodingOptions { + &self.decoding_options + } } diff --git a/lib/src/client/transport/channel.rs b/lib/src/client/transport/channel.rs index 7273cc9dc..54a2bf0ee 100644 --- a/lib/src/client/transport/channel.rs +++ b/lib/src/client/transport/channel.rs @@ -122,8 +122,8 @@ impl AsyncSecureChannel { pub async fn connect(&self) -> Result { self.request_send.store(None); + let mut backoff = self.session_retry_policy.new_backoff(); loop { - let mut backoff = self.session_retry_policy.new_backoff(); match self.connect_no_retry().await { Ok(event_loop) => { break Ok(event_loop); diff --git a/lib/src/client/transport/core.rs b/lib/src/client/transport/core.rs index e17f0814e..7ba5fc774 100644 --- a/lib/src/client/transport/core.rs +++ b/lib/src/client/transport/core.rs @@ -5,6 +5,7 @@ use std::time::Instant; use futures::future::Either; use parking_lot::RwLock; +use crate::core::comms::buffer::SendBuffer; use crate::core::comms::message_chunk::MessageIsFinalType; use crate::core::comms::{ chunker::Chunker, message_chunk::MessageChunk, message_chunk_info::ChunkInfo, @@ -13,8 +14,6 @@ use crate::core::comms::{ use crate::core::supported_message::SupportedMessage; use crate::types::StatusCode; -use super::buffer::SendBuffer; - #[derive(Debug)] struct MessageChunkWithChunkInfo { header: ChunkInfo, diff --git a/lib/src/client/transport/mod.rs b/lib/src/client/transport/mod.rs index 575a6f5df..d8ecd5288 100644 --- a/lib/src/client/transport/mod.rs +++ b/lib/src/client/transport/mod.rs @@ -1,4 +1,3 @@ -mod buffer; mod channel; mod core; mod state; diff --git a/lib/src/client/transport/tcp.rs b/lib/src/client/transport/tcp.rs index 5166dd8e0..6be43a32f 100644 --- a/lib/src/client/transport/tcp.rs +++ b/lib/src/client/transport/tcp.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use super::buffer::SendBuffer; use super::core::{OutgoingMessage, TransportPollResult, TransportState}; use crate::core::comms::{ + buffer::SendBuffer, secure_channel::SecureChannel, tcp_codec::{Message, TcpCodec}, tcp_types::HelloMessage, diff --git a/lib/src/client/transport/buffer.rs b/lib/src/core/comms/buffer.rs similarity index 87% rename from lib/src/client/transport/buffer.rs rename to lib/src/core/comms/buffer.rs index 5c501a242..a91a547b9 100644 --- a/lib/src/client/transport/buffer.rs +++ b/lib/src/core/comms/buffer.rs @@ -10,30 +10,39 @@ use crate::{ comms::{chunker::Chunker, message_chunk::MessageChunk, secure_channel::SecureChannel}, supported_message::SupportedMessage, }, - types::StatusCode, + types::{BinaryEncoder, StatusCode}, }; +use super::tcp_types::{AcknowledgeMessage, ErrorMessage}; + #[derive(Copy, Clone, Debug)] enum SendBufferState { Reading(usize), Writing, } +#[derive(Debug)] +enum PendingPayload { + Chunk(MessageChunk), + Ack(AcknowledgeMessage), + Error(ErrorMessage), +} + pub struct SendBuffer { /// The send buffer buffer: Cursor>, /// Queued chunks - chunks: VecDeque, + chunks: VecDeque, /// The last request id last_request_id: u32, /// Last sent sequence number last_sent_sequence_number: u32, /// Maximum size of a message, total. Use 0 for no limit - max_message_size: usize, + pub max_message_size: usize, /// Maximum number of chunks in a message. - max_chunk_count: usize, + pub max_chunk_count: usize, /// Maximum size of each individual chunk. - send_buffer_size: usize, + pub send_buffer_size: usize, state: SendBufferState, } @@ -67,12 +76,27 @@ impl SendBuffer { }; trace!("Sending chunk {:?}", next_chunk); - let size = secure_channel.apply_security(&next_chunk, self.buffer.get_mut())?; + let size = match next_chunk { + PendingPayload::Chunk(c) => secure_channel.apply_security(&c, self.buffer.get_mut())?, + PendingPayload::Ack(a) => a.encode(&mut self.buffer)?, + PendingPayload::Error(e) => e.encode(&mut self.buffer)?, + }; + self.buffer.set_position(0); self.state = SendBufferState::Reading(size); Ok(()) } + pub fn write_error(&mut self, error: ErrorMessage) { + // Clear any pending chunks, we're erroring out + self.chunks.clear(); + self.chunks.push_back(PendingPayload::Error(error)); + } + + pub fn write_ack(&mut self, ack: AcknowledgeMessage) { + self.chunks.push_back(PendingPayload::Ack(ack)); + } + pub fn write( &mut self, request_id: u32, @@ -81,11 +105,6 @@ impl SendBuffer { ) -> Result { trace!("Writing request to buffer"); - // We're not allowed to write when in reading state, we need to empty the buffer first - if matches!(self.state, SendBufferState::Reading(_)) { - return Err(StatusCode::BadInvalidState); - } - // Turn message to chunk(s) let chunks = Chunker::encode( self.last_sent_sequence_number + 1, @@ -108,7 +127,8 @@ impl SendBuffer { self.last_sent_sequence_number += chunks.len() as u32; // Send chunks - self.chunks.extend(chunks.into_iter()); + self.chunks + .extend(chunks.into_iter().map(PendingPayload::Chunk)); Ok(request_id) } } @@ -157,6 +177,24 @@ impl SendBuffer { pub fn can_read(&self) -> bool { matches!(self.state, SendBufferState::Reading(_)) || self.buffer.position() != 0 } + + pub fn revise( + &mut self, + send_buffer_size: usize, + max_message_size: usize, + max_chunk_count: usize, + ) { + if self.send_buffer_size > send_buffer_size { + self.buffer.get_mut().shrink_to(send_buffer_size + 1024); + self.send_buffer_size = send_buffer_size; + } + if self.max_message_size > max_message_size && max_message_size > 0 { + self.max_message_size = max_message_size; + } + if self.max_chunk_count > max_chunk_count && max_chunk_count > 0 { + self.max_chunk_count = max_chunk_count; + } + } } #[cfg(test)] @@ -170,7 +208,7 @@ mod tests { use crate::core::comms::secure_channel::{Role, SecureChannel}; use crate::crypto::CertificateStore; - use crate::server::prelude::StatusCode; + use crate::types::StatusCode; use crate::types::{ DateTime, DecodingOptions, NodeId, ReadRequest, ReadValueId, RequestHeader, TimestampsToReturn, diff --git a/lib/src/core/comms/chunker.rs b/lib/src/core/comms/chunker.rs index bd965334a..595a9488d 100644 --- a/lib/src/core/comms/chunker.rs +++ b/lib/src/core/comms/chunker.rs @@ -51,6 +51,10 @@ impl Chunker { let chunk_info = chunks[0].chunk_info(secure_channel)?; chunk_info.sequence_header.sequence_number }; + trace!( + "Received chunk with sequence number {}", + first_sequence_number + ); if first_sequence_number < starting_sequence_number { error!( "First sequence number of {} is less than last value {}", @@ -258,7 +262,7 @@ impl Chunker { } Err(err) => { debug!("Cannot decode message {:?}, err = {:?}", object_id, err); - Err(StatusCode::BadServiceUnsupported) + Err(err) } } } diff --git a/lib/src/core/comms/mod.rs b/lib/src/core/comms/mod.rs index 9f6df5ba3..1a2b0fd6a 100644 --- a/lib/src/core/comms/mod.rs +++ b/lib/src/core/comms/mod.rs @@ -5,6 +5,7 @@ //! Contains all code related to sending / receiving messages from a transport //! and turning those messages into and out of chunks. +pub mod buffer; pub mod chunker; pub mod message_chunk; pub mod message_chunk_info; @@ -14,13 +15,3 @@ pub mod security_header; pub mod tcp_codec; pub mod tcp_types; pub mod url; - -pub mod prelude { - pub use super::chunker::*; - pub use super::message_chunk::*; - pub use super::secure_channel::*; - pub use super::security_header::*; - pub use super::tcp_codec::*; - pub use super::tcp_types::*; - pub use super::url::*; -} diff --git a/lib/src/core/comms/secure_channel.rs b/lib/src/core/comms/secure_channel.rs index 87905ff77..47b9e9b77 100644 --- a/lib/src/core/comms/secure_channel.rs +++ b/lib/src/core/comms/secure_channel.rs @@ -6,10 +6,9 @@ use std::{ io::{Cursor, Write}, ops::Range, sync::Arc, + time::{Duration, Instant}, }; -use chrono::{Duration, TimeDelta}; - use crate::crypto::{ aeskey::AesKey, pkey::{KeySize, PrivateKey, PublicKey}, @@ -20,7 +19,7 @@ use crate::crypto::{ use crate::sync::*; use crate::types::{ service_types::ChannelSecurityToken, status_code::StatusCode, write_bytes, write_u8, - BinaryEncoder, ByteString, DateTime, DecodingOptions, MessageSecurityMode, + BinaryEncoder, ByteString, DecodingOptions, MessageSecurityMode, }; use super::{ @@ -47,7 +46,7 @@ pub struct SecureChannel { /// Secure channel id secure_channel_id: u32, /// Token creation time. - token_created_at: DateTime, + token_created_at: Instant, /// Token lifetime token_lifetime: u32, /// Token identifier @@ -80,7 +79,7 @@ impl SecureChannel { security_mode: MessageSecurityMode::None, secure_channel_id: 0, token_id: 0, - token_created_at: DateTime::now(), + token_created_at: Instant::now(), token_lifetime: 0, local_nonce: Vec::new(), remote_nonce: Vec::new(), @@ -113,7 +112,7 @@ impl SecureChannel { security_policy: SecurityPolicy::None, secure_channel_id: 0, token_id: 0, - token_created_at: DateTime::now(), + token_created_at: Instant::now(), token_lifetime: 0, local_nonce: Vec::new(), remote_nonce: Vec::new(), @@ -169,14 +168,14 @@ impl SecureChannel { pub fn clear_security_token(&mut self) { self.secure_channel_id = 0; self.token_id = 0; - self.token_created_at = DateTime::now(); + self.token_created_at = Instant::now(); self.token_lifetime = 0; } pub fn set_security_token(&mut self, channel_token: ChannelSecurityToken) { self.secure_channel_id = channel_token.channel_id; self.token_id = channel_token.token_id; - self.token_created_at = DateTime::now(); + self.token_created_at = Instant::now(); self.token_lifetime = channel_token.revised_lifetime; } @@ -188,7 +187,7 @@ impl SecureChannel { self.secure_channel_id } - pub fn token_created_at(&self) -> DateTime { + pub fn token_created_at(&self) -> Instant { self.token_created_at } @@ -204,7 +203,7 @@ impl SecureChannel { self.token_id } - pub fn set_client_offset(&mut self, client_offset: Duration) { + pub fn set_client_offset(&mut self, client_offset: chrono::Duration) { self.decoding_options.client_offset = client_offset; } @@ -227,9 +226,9 @@ impl SecureChannel { } else { // Check if secure channel 75% close to expiration in which case send a renew let renew_lifetime = (self.token_lifetime() * 3) / 4; - let renew_lifetime = TimeDelta::try_milliseconds(renew_lifetime as i64).unwrap(); + let renew_lifetime = Duration::from_millis(renew_lifetime as u64); // Renew the token? - DateTime::now() - self.token_created_at() > renew_lifetime + Instant::now() - self.token_created_at() > renew_lifetime } } @@ -373,9 +372,12 @@ impl SecureChannel { /// Test if the token has expired yet pub fn token_has_expired(&self) -> bool { let token_created_at = self.token_created_at; - let token_expires = - token_created_at + TimeDelta::try_seconds(self.token_lifetime as i64).unwrap(); - DateTime::now().ge(&token_expires) + let token_expires = token_created_at + Duration::from_secs(self.token_lifetime as u64); + Instant::now() > token_expires + } + + pub fn token_renewal_deadline(&self) -> Instant { + self.token_created_at + Duration::from_secs((self.token_lifetime as u64) * 3 / 4) } /// Calculates the signature size for a message depending on the supplied security header @@ -1278,4 +1280,8 @@ impl SecureChannel { } } } + + pub fn set_token_lifetime(&mut self, token_lifetime: u32) { + self.token_lifetime = token_lifetime; + } } diff --git a/lib/src/core/comms/tcp_types.rs b/lib/src/core/comms/tcp_types.rs index cc44a630b..122d77ee4 100644 --- a/lib/src/core/comms/tcp_types.rs +++ b/lib/src/core/comms/tcp_types.rs @@ -173,7 +173,7 @@ impl MessageHeader { /// Implementation of the HEL message in OPC UA #[derive(Debug, Clone, PartialEq)] pub struct HelloMessage { - pub message_header: MessageHeader, + message_header: MessageHeader, pub protocol_version: u32, pub receive_buffer_size: u32, pub send_buffer_size: u32, @@ -282,7 +282,7 @@ impl HelloMessage { /// Implementation of the ACK message in OPC UA #[derive(Debug, Clone, PartialEq)] pub struct AcknowledgeMessage { - pub message_header: MessageHeader, + message_header: MessageHeader, pub protocol_version: u32, pub receive_buffer_size: u32, pub send_buffer_size: u32, @@ -324,10 +324,31 @@ impl BinaryEncoder for AcknowledgeMessage { } } +impl AcknowledgeMessage { + pub fn new( + protocol_version: u32, + receive_buffer_size: u32, + send_buffer_size: u32, + max_message_size: u32, + max_chunk_count: u32, + ) -> Self { + let mut ack = AcknowledgeMessage { + message_header: MessageHeader::new(MessageType::Acknowledge), + protocol_version, + receive_buffer_size, + send_buffer_size, + max_message_size, + max_chunk_count, + }; + ack.message_header.message_size = ack.byte_len() as u32; + ack + } +} + /// Implementation of the ERR message in OPC UA #[derive(Debug, Clone, PartialEq)] pub struct ErrorMessage { - pub message_header: MessageHeader, + message_header: MessageHeader, pub error: u32, pub reason: UAString, } @@ -359,12 +380,155 @@ impl BinaryEncoder for ErrorMessage { impl ErrorMessage { pub fn from_status_code(status_code: StatusCode) -> ErrorMessage { + Self::new(status_code, &status_code.sub_code().description()) + } + + pub fn new(status_code: StatusCode, reason: &str) -> ErrorMessage { let mut error = ErrorMessage { message_header: MessageHeader::new(MessageType::Error), error: status_code.bits(), - reason: UAString::from(status_code.sub_code().description()), + reason: UAString::from(reason), }; error.message_header.message_size = error.byte_len() as u32; error } } + +#[cfg(test)] +mod tests { + use std::io::Cursor; + + use crate::{ + core::comms::tcp_types::{ + AcknowledgeMessage, BinaryEncoder, HelloMessage, MessageHeader, MessageType, + }, + types::{ + ApplicationDescription, ByteString, DecodingOptions, EndpointDescription, + MessageSecurityMode, UAString, + }, + }; + + fn hello_data() -> Vec { + vec![ + 0x48, 0x45, 0x4c, 0x46, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x0a, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x19, 0x00, 0x00, 0x00, 0x6f, 0x70, 0x63, 0x2e, 0x74, 0x63, 0x70, 0x3a, 0x2f, 0x2f, + 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x3a, 0x31, 0x32, 0x33, 0x34, + 0x2f, + ] + } + + fn ack_data() -> Vec { + vec![ + 0x41, 0x43, 0x4b, 0x46, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xff, 0x00, 0x00, + ] + } + + #[test] + pub fn hello() { + let mut stream = Cursor::new(hello_data()); + let decoding_options = DecodingOptions::test(); + let hello = HelloMessage::decode(&mut stream, &decoding_options).unwrap(); + println!("hello = {:?}", hello); + assert_eq!(hello.message_header.message_type, MessageType::Hello); + assert_eq!(hello.message_header.message_size, 57); + assert_eq!(hello.protocol_version, 0); + assert_eq!(hello.receive_buffer_size, 655360); + assert_eq!(hello.send_buffer_size, 655360); + assert_eq!(hello.max_message_size, 0); + assert_eq!(hello.max_chunk_count, 0); + assert_eq!( + hello.endpoint_url, + UAString::from("opc.tcp://127.0.0.1:1234/") + ); + } + + #[test] + pub fn acknowledge() { + let mut stream = Cursor::new(ack_data()); + let decoding_options = DecodingOptions::test(); + let ack = AcknowledgeMessage::decode(&mut stream, &decoding_options).unwrap(); + println!("ack = {:?}", ack); + assert_eq!(ack.message_header.message_type, MessageType::Acknowledge); + assert_eq!(ack.message_header.message_size, 28); + assert_eq!(ack.protocol_version, 0); + assert_eq!(ack.receive_buffer_size, 524288); + assert_eq!(ack.send_buffer_size, 524288); + assert_eq!(ack.max_message_size, 16777216); + assert_eq!(ack.max_chunk_count, 65535); + } + + #[test] + fn endpoint_url() { + // Ensure hello with None endpoint is invalid + // Ensure hello with URL > 4096 chars is invalid + let mut h = HelloMessage { + message_header: MessageHeader { + message_type: MessageType::Invalid, + message_size: 0, + }, + protocol_version: 0, + receive_buffer_size: 0, + send_buffer_size: 0, + max_message_size: 0, + max_chunk_count: 0, + endpoint_url: UAString::null(), + }; + + let endpoints = vec![EndpointDescription { + endpoint_url: UAString::from("opc.tcp://foo"), + security_policy_uri: UAString::null(), + security_mode: MessageSecurityMode::None, + server: ApplicationDescription::default(), + security_level: 0, + server_certificate: ByteString::null(), + transport_profile_uri: UAString::null(), + user_identity_tokens: None, + }]; + + // Negative tests + assert!(!h.matches_endpoint(&endpoints)); + h.endpoint_url = UAString::from(""); + assert!(!h.matches_endpoint(&endpoints)); + h.endpoint_url = UAString::from("opc.tcp://foo/blah"); + assert!(!h.matches_endpoint(&endpoints)); + // 4097 bytes + h.endpoint_url = UAString::from((0..4097).map(|_| 'A').collect::()); + assert!(!h.is_endpoint_valid_length()); + + // Positive tests + h.endpoint_url = UAString::from("opc.tcp://foo/"); + assert!(h.matches_endpoint(&endpoints)); + h.endpoint_url = UAString::from("opc.tcp://bar/"); // Ignore hostname + assert!(h.matches_endpoint(&endpoints)); + h.endpoint_url = UAString::from((0..4096).map(|_| 'A').collect::()); + assert!(h.is_endpoint_valid_length()) + } + + #[test] + fn valid_buffer_sizes() { + // Test that invalid buffer sizes are rejected, while valid buffer sizes are accepted + let mut h = HelloMessage { + message_header: MessageHeader { + message_type: MessageType::Invalid, + message_size: 0, + }, + protocol_version: 0, + receive_buffer_size: 0, + send_buffer_size: 0, + max_message_size: 0, + max_chunk_count: 0, + endpoint_url: UAString::null(), + }; + assert!(!h.is_valid_buffer_sizes()); + h.receive_buffer_size = 8195; + assert!(!h.is_valid_buffer_sizes()); + h.send_buffer_size = 8195; + assert!(!h.is_valid_buffer_sizes()); + h.receive_buffer_size = 8196; + assert!(!h.is_valid_buffer_sizes()); + h.send_buffer_size = 8196; + assert!(h.is_valid_buffer_sizes()); + } +} diff --git a/lib/src/core/handle.rs b/lib/src/core/handle.rs index bb892808d..e35c3c4b4 100644 --- a/lib/src/core/handle.rs +++ b/lib/src/core/handle.rs @@ -41,6 +41,7 @@ impl Handle { } /// Variant of the handle factory using atomics +#[derive(Debug)] pub struct AtomicHandle { next: AtomicU32, first: u32, diff --git a/lib/src/core/mod.rs b/lib/src/core/mod.rs index ca93982c9..89e5f8a32 100644 --- a/lib/src/core/mod.rs +++ b/lib/src/core/mod.rs @@ -107,8 +107,4 @@ pub mod runtime; #[rustfmt::skip] pub mod supported_message; -/// Contains most of the things that are typically required from a client / server. -pub mod prelude { - pub use super::{comms::prelude::*, config::Config, supported_message::*}; - pub use crate::types::{status_code::StatusCode, *}; -} +pub use supported_message::SupportedMessage; diff --git a/lib/src/core/tests/comms.rs b/lib/src/core/tests/comms.rs index 81ba9fffe..844172d66 100644 --- a/lib/src/core/tests/comms.rs +++ b/lib/src/core/tests/comms.rs @@ -1,59 +1,7 @@ -use std::io::*; - use crate::crypto::SecurityPolicy; use crate::types::*; -use crate::core::comms::{secure_channel::*, tcp_types::*}; - -fn hello_data() -> Vec { - vec![ - 0x48, 0x45, 0x4c, 0x46, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, - 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x00, - 0x00, 0x00, 0x6f, 0x70, 0x63, 0x2e, 0x74, 0x63, 0x70, 0x3a, 0x2f, 0x2f, 0x31, 0x32, 0x37, - 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x3a, 0x31, 0x32, 0x33, 0x34, 0x2f, - ] -} - -fn ack_data() -> Vec { - vec![ - 0x41, 0x43, 0x4b, 0x46, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xff, 0x00, 0x00, - ] -} - -#[test] -pub fn hello() { - let mut stream = Cursor::new(hello_data()); - let decoding_options = DecodingOptions::test(); - let hello = HelloMessage::decode(&mut stream, &decoding_options).unwrap(); - println!("hello = {:?}", hello); - assert_eq!(hello.message_header.message_type, MessageType::Hello); - assert_eq!(hello.message_header.message_size, 57); - assert_eq!(hello.protocol_version, 0); - assert_eq!(hello.receive_buffer_size, 655360); - assert_eq!(hello.send_buffer_size, 655360); - assert_eq!(hello.max_message_size, 0); - assert_eq!(hello.max_chunk_count, 0); - assert_eq!( - hello.endpoint_url, - UAString::from("opc.tcp://127.0.0.1:1234/") - ); -} - -#[test] -pub fn acknowledge() { - let mut stream = Cursor::new(ack_data()); - let decoding_options = DecodingOptions::test(); - let ack = AcknowledgeMessage::decode(&mut stream, &decoding_options).unwrap(); - println!("ack = {:?}", ack); - assert_eq!(ack.message_header.message_type, MessageType::Acknowledge); - assert_eq!(ack.message_header.message_size, 28); - assert_eq!(ack.protocol_version, 0); - assert_eq!(ack.receive_buffer_size, 524288); - assert_eq!(ack.send_buffer_size, 524288); - assert_eq!(ack.max_message_size, 16777216); - assert_eq!(ack.max_chunk_count, 65535); -} +use crate::core::comms::secure_channel::*; #[test] pub fn secure_channel_nonce_basic128rsa15() { diff --git a/lib/src/core/tests/hello.rs b/lib/src/core/tests/hello.rs deleted file mode 100644 index 32e7af1c0..000000000 --- a/lib/src/core/tests/hello.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::types::{ - byte_string::ByteString, - service_types::{ApplicationDescription, EndpointDescription, MessageSecurityMode}, - string::UAString, -}; - -use crate::core::comms::tcp_types::{HelloMessage, MessageHeader, MessageType}; - -#[test] -fn endpoint_url() { - // Ensure hello with None endpoint is invalid - // Ensure hello with URL > 4096 chars is invalid - let mut h = HelloMessage { - message_header: MessageHeader { - message_type: MessageType::Invalid, - message_size: 0, - }, - protocol_version: 0, - receive_buffer_size: 0, - send_buffer_size: 0, - max_message_size: 0, - max_chunk_count: 0, - endpoint_url: UAString::null(), - }; - - let endpoints = vec![EndpointDescription { - endpoint_url: UAString::from("opc.tcp://foo"), - security_policy_uri: UAString::null(), - security_mode: MessageSecurityMode::None, - server: ApplicationDescription::default(), - security_level: 0, - server_certificate: ByteString::null(), - transport_profile_uri: UAString::null(), - user_identity_tokens: None, - }]; - - // Negative tests - assert!(!h.matches_endpoint(&endpoints)); - h.endpoint_url = UAString::from(""); - assert!(!h.matches_endpoint(&endpoints)); - h.endpoint_url = UAString::from("opc.tcp://foo/blah"); - assert!(!h.matches_endpoint(&endpoints)); - // 4097 bytes - h.endpoint_url = UAString::from((0..4097).map(|_| 'A').collect::()); - assert!(!h.is_endpoint_valid_length()); - - // Positive tests - h.endpoint_url = UAString::from("opc.tcp://foo/"); - assert!(h.matches_endpoint(&endpoints)); - h.endpoint_url = UAString::from("opc.tcp://bar/"); // Ignore hostname - assert!(h.matches_endpoint(&endpoints)); - h.endpoint_url = UAString::from((0..4096).map(|_| 'A').collect::()); - assert!(h.is_endpoint_valid_length()) -} - -#[test] -fn valid_buffer_sizes() { - // Test that invalid buffer sizes are rejected, while valid buffer sizes are accepted - let mut h = HelloMessage { - message_header: MessageHeader { - message_type: MessageType::Invalid, - message_size: 0, - }, - protocol_version: 0, - receive_buffer_size: 0, - send_buffer_size: 0, - max_message_size: 0, - max_chunk_count: 0, - endpoint_url: UAString::null(), - }; - assert!(!h.is_valid_buffer_sizes()); - h.receive_buffer_size = 8195; - assert!(!h.is_valid_buffer_sizes()); - h.send_buffer_size = 8195; - assert!(!h.is_valid_buffer_sizes()); - h.receive_buffer_size = 8196; - assert!(!h.is_valid_buffer_sizes()); - h.send_buffer_size = 8196; - assert!(h.is_valid_buffer_sizes()); -} diff --git a/lib/src/core/tests/mod.rs b/lib/src/core/tests/mod.rs index 8a2d6080c..19ecd53ff 100644 --- a/lib/src/core/tests/mod.rs +++ b/lib/src/core/tests/mod.rs @@ -171,7 +171,6 @@ impl Test { mod chunk; mod comms; -mod hello; mod secure_channel; mod services; mod supported_message; diff --git a/lib/src/lib.rs b/lib/src/lib.rs index bf689fa06..c1a47a2ba 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -8,8 +8,6 @@ extern crate tempdir; extern crate bitflags; #[macro_use] extern crate serde_derive; -#[cfg(feature = "http")] -extern crate actix_web; #[cfg(test)] extern crate serde_json; #[macro_use] @@ -115,8 +113,3 @@ fn from_hex(v: &str) -> Vec { _ => panic!("Invalid hex length"), } } - -mod prelude { - #[cfg(feature = "server")] - pub use crate::server::prelude::*; -} diff --git a/lib/src/server/address_space/address_space.rs b/lib/src/server/address_space/address_space.rs index b2beff266..48ee7f9ab 100644 --- a/lib/src/server/address_space/address_space.rs +++ b/lib/src/server/address_space/address_space.rs @@ -1,785 +1,459 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Implementation of `AddressSpace`. -use std::collections::HashMap; -use std::sync::Arc; - -use chrono::Utc; - -use crate::sync::*; -use crate::types::{ - node_ids::VariableId::*, - service_types::{BrowseDirection, CallMethodRequest, CallMethodResult, NodeClass}, - status_code::StatusCode, - *, -}; +use std::collections::VecDeque; + +use hashbrown::{Equivalent, HashMap, HashSet}; -use crate::server::{ - address_space::{ - node::{HasNodeId, NodeType}, - object::{Object, ObjectBuilder}, - references::{Reference, ReferenceDirection, References}, - variable::Variable, - AttrFnGetter, +use crate::{ + server::node_manager::{ParsedReadValueId, ParsedWriteValue, RequestContext, TypeTree}, + types::{ + BrowseDirection, DataValue, LocalizedText, NodeClass, NodeId, QualifiedName, + ReferenceTypeId, StatusCode, TimestampsToReturn, }, - callbacks, constants, - diagnostics::ServerDiagnostics, - historical::HistoryServerCapabilities, - session::SessionManager, - state::ServerState, }; -/// Finds a node in the address space and coerces it into a reference of the expected node type. -macro_rules! find_node { - ($a: expr, $id: expr, $node_type: ident) => { - $a.find_node($id).and_then(|node| match node { - NodeType::$node_type(ref node) => Some(node.as_ref()), - _ => None, - }) - }; +use super::{ + read_node_value, validate_node_read, validate_node_write, HasNodeId, NodeType, ObjectBuilder, + Variable, +}; + +#[derive(PartialEq, Eq, Clone, Debug, Hash)] +pub struct Reference { + pub reference_type: NodeId, + pub target_node: NodeId, } -/// Finds a node in the address space and coerces it into a mutable reference of the expected node type. -macro_rules! find_node_mut { - ($a: expr, $id: expr, $node_type: ident) => { - $a.find_node_mut($id).and_then(|node| match node { - NodeType::$node_type(ref mut node) => Some(node.as_mut()), - _ => None, - }) - }; +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum ReferenceDirection { + Forward, + Inverse, } -/// Searches for the specified node by type, expecting it to exist -macro_rules! expect_and_find_node { - ($a: expr, $id: expr, $node_type: ident) => { - find_node!($a, $id, $node_type) - .or_else(|| { - panic!("There should be a node of id {:?}!", $id); - }) - .unwrap() - }; +// Note, must have same hash and eq implementation as Reference. +#[derive(PartialEq, Eq, Clone, Debug, Hash)] +struct ReferenceKey<'a> { + pub reference_type: &'a NodeId, + pub target_node: &'a NodeId, } -/// Searches for the specified object node, expecting it to exist -macro_rules! expect_and_find_object { - ($a: expr, $id: expr) => { - expect_and_find_node!($a, $id, Object) - }; +impl<'a> Equivalent for ReferenceKey<'a> { + fn equivalent(&self, key: &Reference) -> bool { + &key.reference_type == self.reference_type && &key.target_node == self.target_node + } } -/// Tests if the node of the expected type exists -macro_rules! is_node { - ($a: expr, $id: expr, $node_type: ident) => { - if let Some(node) = $a.find_node($id) { - if let NodeType::$node_type(_) = node { - true - } else { - false - } - } else { - false +impl<'a> From<&'a Reference> for ReferenceKey<'a> { + fn from(value: &'a Reference) -> Self { + Self { + reference_type: &value.reference_type, + target_node: &value.target_node, } - }; + } } -/// Tests if the object node exists -macro_rules! is_object { - ($a: expr, $id: expr) => { - is_node!($a, $id, Object) - }; +#[derive(PartialEq, Eq, Clone, Debug, Hash)] +pub struct ReferenceRef<'a> { + pub reference_type: &'a NodeId, + pub target_node: &'a NodeId, + pub direction: ReferenceDirection, } -/// Tests if the method node exists -macro_rules! is_method { - ($a: expr, $id: expr) => { - is_node!($a, $id, Method) - }; -} +// Note that there is a potentially significant benefit to using hashbrown directly here, +// (which is what the std HashMap is built on!), since it lets us remove references from +// the hash sets without cloning given node IDs. -/// Gets a field from the live diagnostics table. -macro_rules! server_diagnostics_summary { - ($address_space: expr, $variable_id: expr, $field: ident) => { - let server_diagnostics = $address_space.server_diagnostics.as_ref().unwrap().clone(); - $address_space.set_variable_getter( - $variable_id, - move |_, timestamps_to_return, _, _, _, _| { - let server_diagnostics = server_diagnostics.read(); - let server_diagnostics_summary = server_diagnostics.server_diagnostics_summary(); - - debug!( - "Request to get server diagnostics field {}, value = {}", - stringify!($variable_id), - server_diagnostics_summary.$field - ); - - let mut value = DataValue::from(Variant::from(server_diagnostics_summary.$field)); - let now = DateTime::now(); - value.set_timestamps(timestamps_to_return, now, now); - Ok(Some(value)) - }, - ); - }; +pub struct References { + /// References by source node ID. + by_source: HashMap>, + /// References by target node ID. + by_target: HashMap>, } -pub(crate) type MethodCallback = Box; +impl References { + pub fn new() -> Self { + Self { + by_source: HashMap::new(), + by_target: HashMap::new(), + } + } -const OPCUA_INTERNAL_NAMESPACE_IDX: u16 = 1; + pub fn insert<'a, T, S>( + &mut self, + source: &NodeId, + references: &'a [(&'a NodeId, &S, ReferenceDirection)], + ) where + T: Into, + S: Into + Clone, + { + for (target, typ, direction) in references { + let typ: NodeId = (*typ).clone().into(); + match direction { + ReferenceDirection::Forward => self.insert_reference(source, target, typ), + ReferenceDirection::Inverse => self.insert_reference(target, source, typ), + } + } + } -#[derive(PartialEq, Eq, Clone, Debug, Hash)] -struct MethodKey { - object_id: NodeId, - method_id: NodeId, -} + pub fn insert_reference( + &mut self, + source_node: &NodeId, + target_node: &NodeId, + reference_type: impl Into, + ) { + if source_node == target_node { + panic!( + "Node id from == node id to {}, self reference is not allowed", + source_node + ); + } -/// The `AddressSpace` describes all of the nodes managed by the server and the references between -/// them. Usually it will be populated with the default OPC UA node set plus any that have been -/// added by the server. -/// -/// The `AddressSpace` enforces minimal modelling rules - the implementation is expected to abide -/// by rules when adding nodes. To aid with adding nodes to the address space, each node is -/// a [`NodeType`] which can be one of [`DataType`], [`Object`], [`ObjectType`], [`ReferenceType`], [`Method`], -/// [`Variable`], [`VariableType`] or [`View`]. Each node type has various mandatory and optional -/// attributes that can be set with function calls. In addition, each node type has a corresponding -/// builder, e.g. [`VariableBuilder`] that can be used to simplify adding nodes. -/// -/// Some of the methods in `AddressSpace` are liable to change over time especially as more of the -/// heavy lifting is done via builders. -/// -/// [`NodeType`]: ../node/enum.NodeType.html -/// [`DataType`]: ../data_type/struct.DataType.html -/// [`Object`]: ../object/struct.Object.html -/// [`ObjectType`]: ../object_type/struct.ObjectType.html -/// [`ReferenceType`]: ../reference_type/struct.ReferenceType.html -/// [`Method`]: ../method/struct.Method.html -/// [`Variable`]: ../variable/struct.Variable.html -/// [`VariableType`]: ../variable_type/struct.VariableType.html -/// [`View`]: ../view/struct.View.html -/// [`VariableBuilder`]: ../variable/struct.VariableBuilder.html -/// -pub struct AddressSpace { - /// A map of all the nodes that are part of the address space - node_map: HashMap, - /// The references between nodes - references: References, - /// This is the last time that nodes or references to nodes were added or removed from the address space. - last_modified: DateTimeUtc, - /// Access to server diagnostics - server_diagnostics: Option>>, - /// The namespace to create sequential node ids - default_namespace: u16, - /// The namespace to generate sequential audit node ids - audit_namespace: u16, - /// The namespace to generate sequential internal node ids - internal_namespace: u16, - /// The list of all registered namespaces. - namespaces: Vec, -} + let forward_refs = match self.by_source.get_mut(source_node) { + Some(r) => r, + None => self.by_source.entry(source_node.clone()).or_default(), + }; -impl Default for AddressSpace { - fn default() -> Self { - AddressSpace { - node_map: HashMap::new(), - references: References::default(), - last_modified: Utc::now(), - server_diagnostics: None, - default_namespace: OPCUA_INTERNAL_NAMESPACE_IDX, - audit_namespace: OPCUA_INTERNAL_NAMESPACE_IDX, - internal_namespace: OPCUA_INTERNAL_NAMESPACE_IDX, - // By default, there will be two standard namespaces. The first is the default - // OPC UA namespace for its standard nodes. The second is the internal namespace used - // by this implementation. - namespaces: vec!["http://opcfoundation.org/UA/".to_string()], + let reference_type = reference_type.into(); + + if !forward_refs.insert(Reference { + reference_type: reference_type.clone(), + target_node: target_node.clone(), + }) { + // If the reference is already added, no reason to try adding it to the inverse. + return; } + + let inverse_refs = match self.by_target.get_mut(target_node) { + Some(r) => r, + None => self.by_target.entry(target_node.clone()).or_default(), + }; + + inverse_refs.insert(Reference { + reference_type: reference_type, + target_node: source_node.clone(), + }); } -} -impl AddressSpace { - /// Constructs a default address space consisting of all the nodes and references in the OPC - /// UA default nodeset. - pub fn new() -> AddressSpace { - // Construct the Root folder and the top level nodes - let mut address_space = AddressSpace::default(); - address_space.add_default_nodes(); - address_space + pub fn insert_references<'a>( + &mut self, + references: impl Iterator)>, + ) { + for (source, target, typ) in references { + self.insert_reference(source, target, typ); + } } - /// Returns the last modified date for the address space - pub fn last_modified(&self) -> DateTimeUtc { - self.last_modified + pub fn delete_reference( + &mut self, + source_node: &NodeId, + target_node: &NodeId, + reference_type: impl Into, + ) -> bool { + let mut found = false; + let reference_type = reference_type.into(); + let rf = ReferenceKey { + reference_type: &reference_type, + target_node: target_node, + }; + found |= self + .by_source + .get_mut(source_node) + .map(|f| f.remove(&rf)) + .unwrap_or_default(); + + let rf = ReferenceKey { + reference_type: &reference_type, + target_node: &source_node, + }; + + found |= self + .by_target + .get_mut(target_node) + .map(|f| f.remove(&rf)) + .unwrap_or_default(); + + found } - /// Registers a namespace described by a uri with address space. The return code is the index - /// of the newly added namespace / index. The index is used with `NodeId`. Registering a - /// namespace that is already registered will return the index to the previous instance. - /// The last registered namespace becomes the default namespace unless you explcitly call - /// `set_default_namespace()` after this. - pub fn register_namespace(&mut self, namespace: &str) -> Result { - let now = DateTime::now(); - if namespace.is_empty() || self.namespaces.len() == u16::MAX as usize { - Err(()) + pub fn delete_node_references(&mut self, source_node: &NodeId) -> bool { + let mut found = false; + // Remove any forward references and their inverse. + found |= if let Some(refs) = self.by_source.remove(source_node) { + for referenced in refs { + self.by_target.get_mut(&referenced.target_node).map(|n| { + n.remove(&ReferenceKey { + reference_type: &referenced.reference_type, + target_node: source_node, + }) + }); + } + true } else { - // Check if namespace already exists or not - if let Some(i) = self.namespace_index(namespace) { - // Existing namespace index - Ok(i) - } else { - // Add and register new namespace - self.namespaces.push(namespace.into()); - self.set_namespaces(&now); - // New namespace index - let ns = (self.namespaces.len() - 1) as u16; - // Make this the new default namespace - self.default_namespace = ns; - Ok(ns) + false + }; + + // Remove any inverse references and their original. + found |= if let Some(refs) = self.by_target.remove(source_node) { + for referenced in refs { + self.by_source.get_mut(&referenced.target_node).map(|n| { + n.remove(&ReferenceKey { + reference_type: &referenced.reference_type, + target_node: source_node, + }) + }); } - } - } + true + } else { + false + }; - /// Finds the namespace index of a given namespace - pub fn namespace_index(&self, namespace: &str) -> Option { - self.namespaces - .iter() - .position(|ns| { - let ns: &str = ns.as_ref(); - ns == namespace - }) - .map(|i| i as u16) + found } - fn set_servers(&mut self, server_state: Arc>, now: &DateTime) { - let server_state = trace_read_lock!(server_state); - if let Some(ref mut v) = self.find_variable_mut(Server_ServerArray) { - let _ = v.set_value_direct( - Variant::from(&server_state.servers), - StatusCode::Good, - now, - now, - ); - } + pub fn has_reference( + &self, + source_node: &NodeId, + target_node: &NodeId, + reference_type: impl Into, + ) -> bool { + let reference_type = reference_type.into(); + self.by_source + .get(source_node) + .map(|n| { + n.contains(&ReferenceKey { + reference_type: &reference_type, + target_node, + }) + }) + .unwrap_or_default() } - fn set_namespaces(&mut self, now: &DateTime) { - let value = Variant::from(&self.namespaces); - if let Some(ref mut v) = self.find_variable_mut(Server_NamespaceArray) { - let _ = v.set_value_direct(value, StatusCode::Good, now, now); - } + pub fn find_references<'a: 'b, 'b>( + &'a self, + source_node: &'b NodeId, + filter: Option<(impl Into, bool)>, + type_tree: &'b TypeTree, + direction: BrowseDirection, + ) -> impl Iterator> + 'b { + ReferenceIterator::new( + source_node, + direction, + self, + filter.map(|f| (f.0.into(), f.1)), + type_tree, + ) } +} - /// Sets the service level 0-255 worst to best quality of service - pub fn set_service_level(&mut self, service_level: u8, now: &DateTime) { - self.set_variable_value(Server_ServiceLevel, service_level, now, now); - } +// Handy feature to let us easily return a concrete type from `find_references`. +struct ReferenceIterator<'a, 'b> { + filter: Option<(NodeId, bool)>, + type_tree: &'b TypeTree, + iter_s: Option>, + iter_t: Option>, +} - /// Sets values for nodes representing the server. - pub fn set_server_state(&mut self, server_state: Arc>) { - // Server state requires the generated address space, otherwise nothing - #[cfg(feature = "generated-address-space")] - { - let now = DateTime::now(); - - // Servers - self.set_servers(server_state.clone(), &now); - - // Register the server's application uri as a namespace - { - let server_state = trace_read_lock!(server_state); - let server_config = trace_read_lock!(server_state.config); - let _ = self.register_namespace(&server_config.application_uri); +impl<'a, 'b> Iterator for ReferenceIterator<'a, 'b> { + type Item = ReferenceRef<'a>; + + fn next(&mut self) -> Option { + loop { + let Some(inner) = self.next_inner() else { + return None; + }; + + if let Some(filter) = &self.filter { + if !filter.1 && inner.reference_type != &filter.0 + || filter.1 + && !self + .type_tree + .is_subtype_of(&inner.reference_type, &filter.0) + { + continue; + } } - // ServerCapabilities - { - let server_state = trace_read_lock!(server_state); - let server_config = trace_read_lock!(server_state.config); - self.set_variable_value( - Server_ServerCapabilities_MaxArrayLength, - server_config.limits.max_array_length as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_MaxStringLength, - server_config.limits.max_string_length as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_MaxByteStringLength, - server_config.limits.max_byte_string_length as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_MaxBrowseContinuationPoints, - constants::MAX_BROWSE_CONTINUATION_POINTS as u16, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_MaxHistoryContinuationPoints, - constants::MAX_HISTORY_CONTINUATION_POINTS as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_MaxQueryContinuationPoints, - constants::MAX_QUERY_CONTINUATION_POINTS as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_MinSupportedSampleRate, - constants::MIN_SAMPLING_INTERVAL, - &now, - &now, - ); - let locale_ids: Vec = server_config - .locale_ids - .iter() - .map(|v| UAString::from(v).into()) - .collect(); - self.set_variable_value( - Server_ServerCapabilities_LocaleIdArray, - (VariantTypeId::String, locale_ids), - &now, - &now, - ); - - let ol = &server_state.operational_limits; - self.set_variable_value( - Server_ServerCapabilities_OperationLimits_MaxNodesPerRead, - ol.max_nodes_per_read as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_OperationLimits_MaxNodesPerWrite, - ol.max_nodes_per_write as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_OperationLimits_MaxNodesPerMethodCall, - ol.max_nodes_per_method_call as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_OperationLimits_MaxNodesPerBrowse, - ol.max_nodes_per_browse as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_OperationLimits_MaxNodesPerRegisterNodes, - ol.max_nodes_per_register_nodes as u32, - &now, - &now, - ); - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerTranslateBrowsePathsToNodeIds, ol.max_nodes_per_translate_browse_paths_to_node_ids as u32, &now, &now); - self.set_variable_value( - Server_ServerCapabilities_OperationLimits_MaxNodesPerNodeManagement, - ol.max_nodes_per_node_management as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_OperationLimits_MaxMonitoredItemsPerCall, - ol.max_monitored_items_per_call as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryReadData, - ol.max_nodes_per_history_read_data as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryReadEvents, - ol.max_nodes_per_history_read_events as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryUpdateData, - ol.max_nodes_per_history_update_data as u32, - &now, - &now, - ); - self.set_variable_value( - Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryUpdateEvents, - ol.max_nodes_per_history_update_events as u32, - &now, - &now, - ); - } + break Some(inner); + } + } - // Server_ServerCapabilities_ServerProfileArray - if let Some(ref mut v) = - self.find_variable_mut(Server_ServerCapabilities_ServerProfileArray) - { - // Declares what the server implements. Subitems are implied by the profile. A subitem - // marked - is optional to the spec - let server_profiles = [ - // Base server behaviour - // SecurityPolicy - None - // User Token - User Name Password Server Facet - // Address Space Base - // AttributeRead - // -Attribute Write Index - // -Attribute Write Values - // Base Info Core Structure - // -Base Info OptionSet - // -Base Info Placeholder Modelling Rules - // -Base Info ValueAsText - // Discovery Find Servers Self - // Discovery Get Endpoints - // -Security - No Application Authentications - // -Security - Security Administration - // Session Base - // Session General Service Behaviour - // Session Minimum 1 - // View Basic - // View Minimum Continuation Point 01 - // View RegisterNodes - // View TranslateBrowsePath - "http://opcfoundation.org/UA-Profile/Server/Behaviour", - // Embedded UA server - // SecurityPolicy - Basic128Rsa15 - // Security - // - Security Certificate Validation - // - Security Basic 128Rsa15 - // - Security Encryption Required - // - Security Signing Required - // Standard DataChange Subscription Server Facet - // Base Information - // - Base Info GetMonitoredItems Method - // Monitored Item Services - // - Monitored Items Deadband Filter - // - Monitor Items 10 - // - Monitor Items 100 - // - Monitor MinQueueSize_02 - // - Monitor Triggering - // Subscription Services - // - Subscription Minimum 02 - // - Subscription Publish Min 05 - // Method Services - // - Method call - // User Token - X509 Certificate Server Facet - // - Security User X509 - Server supports public / private key pair for user identity - // Micro Embedded Device Server Profile - // Base Information - // - Base Info Type System - Exposes a Type system with DataTypes, ReferenceTypes, ObjectTypes and VariableTypes - // including all of OPC UA namespace (namespace 0) types that are used by the Server as defined in Part 6. - // - Base Info Placeholder Modelling Rules - The server supports defining cusom Object or Variables that include the use of OptionalPlaceholder - // or MandatoryPlaceholder modelling rules - // - Base Info Engineering Units - The server supports defining Variables that include the Engineering Units property - // Security - // Security Default ApplicationInstanceCertificate - has a default ApplicationInstanceCertificate that is valid - "http://opcfoundation.org/UA-Profile/Server/EmbeddedUA", - // TODO server profile - // Standard UA Server Profile - // Enhanced DataChange Subscription Server Facet - // Monitored Item Services - // - Monitor Items 500 - Support at least 500 MonitoredItems per Subscription - // - Monitor MinQueueSize_05 - Support at least 5 queue entries - // Subscription Services - // - Subscription Minimum 05 - Support at least 5 subscriptions per Session - // - Subscription Publish Min 10 - Support at least Publish service requests per session - // Embedded UA Server Profile - // Base Information - // - Base Info Diagnostics - // Discovery Services - // - Discovery Register (be able to call RegisterServer) - // - Discovery Register2 (be able to call RegisterServer2) - // Session Services - // - Session Change User - Support use of ActivateSession to change the Session user - // - Session Cancel - Support the Cancel Service to cancel outstanding requests - // - Session Minimum 50 Parallel - Support minimum 50 parallel Sessions - // - // "http://opcfoundation.org/UA-Profile/Server/StandardUA", - ]; - let _ = v.set_value_direct( - Variant::from((VariantTypeId::String, &server_profiles[..])), - StatusCode::Good, - &now, - &now, - ); - } + fn size_hint(&self) -> (usize, Option) { + let mut lower = 0; + let mut upper = None; + if let Some(iter_s) = &self.iter_s { + let (lower_i, upper_i) = iter_s.size_hint(); + lower = lower_i; + upper = upper_i; + } - // Server_ServerDiagnostics_ServerDiagnosticsSummary - // Server_ServerDiagnostics_SamplingIntervalDiagnosticsArray - // Server_ServerDiagnostics_SubscriptionDiagnosticsArray - // Server_ServerDiagnostics_EnabledFlag - { - let server_state = trace_read_lock!(server_state); - self.server_diagnostics = Some(server_state.diagnostics.clone()); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_ServerViewCount, - server_view_count - ); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_CurrentSessionCount, - current_session_count - ); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_CumulatedSessionCount, - cumulated_session_count - ); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_SecurityRejectedSessionCount, - security_rejected_session_count - ); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_SessionTimeoutCount, - session_timeout_count - ); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_SessionAbortCount, - session_abort_count - ); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_RejectedSessionCount, - rejected_session_count - ); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_PublishingIntervalCount, - publishing_interval_count - ); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_CurrentSubscriptionCount, - current_subscription_count - ); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_CumulatedSubscriptionCount, - cumulated_subscription_count - ); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_SecurityRejectedRequestsCount, - security_rejected_requests_count - ); - server_diagnostics_summary!( - self, - Server_ServerDiagnostics_ServerDiagnosticsSummary_RejectedRequestsCount, - rejected_requests_count - ); + if let Some(iter_t) = &self.iter_s { + let (lower_i, upper_i) = iter_t.size_hint(); + lower += lower_i; + upper = match (upper, upper_i) { + (Some(l), Some(r)) => Some(l + r), + _ => None, } + } - // ServiceLevel - 0-255 worst to best quality of service - self.set_service_level(255u8, &now); - - // Auditing - var - // ServerDiagnostics - // VendorServiceInfo - // ServerRedundancy - - // Server_ServerStatus_StartTime - self.set_variable_value(Server_ServerStatus_StartTime, now, &now, &now); - - // Server_ServerStatus_CurrentTime - self.set_variable_getter( - Server_ServerStatus_CurrentTime, - move |_, timestamps_to_return, _, _, _, _| { - let now = DateTime::now(); - let mut value = DataValue::from(now); - value.set_timestamps(timestamps_to_return, now, now); - Ok(Some(value)) - }, - ); + (lower, upper) + } +} - // State OPC UA Part 5 12.6, Valid states are - // State (Server_ServerStatus_State) - self.set_variable_getter( - Server_ServerStatus_State, - move |_, timestamps_to_return, _, _, _, _| { - // let server_state = trace_read_lock!(server_state); - let now = DateTime::now(); - let mut value = DataValue::from(0i32); - value.set_timestamps(timestamps_to_return, now, now); - Ok(Some(value)) - }, - ); +impl<'a, 'b> ReferenceIterator<'a, 'b> { + pub fn new( + source_node: &'b NodeId, + direction: BrowseDirection, + references: &'a References, + filter: Option<(NodeId, bool)>, + type_tree: &'b TypeTree, + ) -> Self { + Self { + filter, + type_tree, + iter_s: matches!(direction, BrowseDirection::Both | BrowseDirection::Forward) + .then(|| references.by_source.get(source_node)) + .flatten() + .map(|r| r.iter()), + iter_t: matches!(direction, BrowseDirection::Both | BrowseDirection::Inverse) + .then(|| references.by_target.get(source_node)) + .flatten() + .map(|r| r.iter()), + } + } - // ServerStatus_BuildInfo - { - // BuildDate - // BuildNumber - // ManufacturerName - // ProductName - // ProductUri - // SoftwareVersion + fn next_inner(&mut self) -> Option> { + if let Some(iter_s) = &mut self.iter_s { + match iter_s.next() { + Some(r) => { + return Some(ReferenceRef { + reference_type: &r.reference_type, + target_node: &r.target_node, + direction: ReferenceDirection::Forward, + }) + } + None => self.iter_s = None, } + } - // Server method handlers - use crate::server::address_space::method_impls; - self.register_method_handler( - MethodId::Server_ResendData, - Box::new(method_impls::ServerResendDataMethod), - ); - self.register_method_handler( - MethodId::Server_GetMonitoredItems, - Box::new(method_impls::ServerGetMonitoredItemsMethod), - ); + if let Some(iter_t) = &mut self.iter_t { + match iter_t.next() { + Some(r) => { + return Some(ReferenceRef { + reference_type: &r.reference_type, + target_node: &r.target_node, + direction: ReferenceDirection::Inverse, + }) + } + None => self.iter_t = None, + } } - } - /// Sets the history server capabilities based on the supplied flags - pub fn set_history_server_capabilities(&mut self, capabilities: &HistoryServerCapabilities) { - let now = DateTime::now(); - self.set_variable_value( - HistoryServerCapabilities_AccessHistoryDataCapability, - capabilities.access_history_data, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_AccessHistoryEventsCapability, - capabilities.access_history_events, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_MaxReturnDataValues, - capabilities.max_return_data, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_MaxReturnEventValues, - capabilities.max_return_events, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_InsertDataCapability, - capabilities.insert_data, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_ReplaceDataCapability, - capabilities.replace_data, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_UpdateDataCapability, - capabilities.update_data, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_DeleteRawCapability, - capabilities.delete_raw, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_DeleteAtTimeCapability, - capabilities.delete_at_time, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_InsertEventCapability, - capabilities.insert_event, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_ReplaceEventCapability, - capabilities.replace_event, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_UpdateEventCapability, - capabilities.update_event, - &now, - &now, - ); - self.set_variable_value( - HistoryServerCapabilities_InsertAnnotationCapability, - capabilities.insert_annotation, - &now, - &now, - ); + None } +} - /// Returns the root folder - pub fn root_folder(&self) -> &Object { - expect_and_find_object!(self, &NodeId::root_folder_id()) - } +/// Represents an in-memory address space. +pub struct AddressSpace { + node_map: HashMap, + namespaces: HashMap, + references: References, +} - /// Returns the objects folder - pub fn objects_folder(&self) -> &Object { - expect_and_find_object!(self, &NodeId::objects_folder_id()) +impl AddressSpace { + pub fn new() -> Self { + Self { + node_map: HashMap::new(), + namespaces: HashMap::new(), + references: References::new(), + } } - /// Returns the types folder - pub fn types_folder(&self) -> &Object { - expect_and_find_object!(self, &NodeId::types_folder_id()) - } + pub fn load_into_type_tree(&self, type_tree: &mut TypeTree) { + let mut found_ids = VecDeque::new(); + // Populate types first so that we have reference types to browse in the next stage. + for node in self.node_map.values() { + let nc = node.node_class(); + if !matches!( + nc, + NodeClass::DataType + | NodeClass::ObjectType + | NodeClass::VariableType + | NodeClass::ReferenceType + ) { + continue; + } - /// Returns the views folder - pub fn views_folder(&self) -> &Object { - expect_and_find_object!(self, &NodeId::views_folder_id()) - } + let node_id = node.node_id(); - fn assert_namespace(&self, node_id: &NodeId) { - if node_id.namespace as usize > self.namespaces.len() { - panic!("Namespace index {} does not exist", node_id.namespace); + let parent = self.references.by_target.get(node_id).and_then(|refs| { + refs.iter() + .find(|r| &r.reference_type == &ReferenceTypeId::HasSubtype.into()) + }); + // If a node somehow lacks a super-type, insert it as a child of the relevant base type. + let parent_id = if let Some(parent) = parent { + parent.target_node.clone() + } else { + continue; + }; + + type_tree.add_type_node(&node_id, &parent_id, nc); + found_ids.push_back((node_id.clone(), node_id, Vec::new(), nc)); } - } - /// Sets the default namespace - pub fn set_default_namespace(&mut self, default_namespace: u16) { - self.default_namespace = default_namespace; - } + // Recursively browse each discovered type for non-type children + while let Some((node, root_type, path, node_class)) = found_ids.pop_front() { + for child in self.find_references( + &node, + Some((ReferenceTypeId::HierarchicalReferences, true)), + type_tree, + BrowseDirection::Forward, + ) { + if child + .reference_type + .as_reference_type_id() + .is_ok_and(|r| r == ReferenceTypeId::HasSubtype) + { + continue; + } + let Some(node_type) = self.node_map.get(child.target_node) else { + continue; + }; + + let nc = node_type.node_class(); + + if matches!( + nc, + NodeClass::DataType + | NodeClass::ObjectType + | NodeClass::VariableType + | NodeClass::ReferenceType + ) { + continue; + } + let mut path = path.clone(); + path.push(node_type.as_node().browse_name()); - /// Gets the default namespace - pub fn default_namespace(&self) -> u16 { - self.default_namespace - } + found_ids.push_back((child.target_node.clone(), root_type, path, nc)); + } - /// Get the default namespace for audit events - pub fn audit_namespace(&self) -> u16 { - self.audit_namespace + if !path.is_empty() { + type_tree.add_type_property(&node, &root_type, &path, node_class); + } + } } - /// Get the internal namespace - pub fn internal_namespace(&self) -> u16 { - self.internal_namespace + pub fn add_namespace(&mut self, namespace: &str, index: u16) { + self.namespaces.insert(index, namespace.to_string()); } - /// Inserts a node into the address space node map and its references to other target nodes. - /// The tuple of references is the target node id, reference type id and a bool which is false for - /// a forward reference and indicating inverse - pub fn insert( + pub fn insert<'a, T, S>( &mut self, node: T, - references: Option<&[(&NodeId, &S, ReferenceDirection)]>, + references: Option<&'a [(&'a NodeId, &S, ReferenceDirection)]>, ) -> bool where T: Into, S: Into + Clone, { let node_type = node.into(); - let node_id = node_type.node_id(); + let node_id = node_type.node_id().clone(); self.assert_namespace(&node_id); @@ -787,163 +461,138 @@ impl AddressSpace { error!("This node {} already exists", node_id); false } else { - self.node_map.insert(node_id.clone(), node_type); // If references are supplied, add them now if let Some(references) = references { - self.references.insert(&node_id, references); + self.references.insert::(&node_id, references); } - self.update_last_modified(); + self.node_map.insert(node_id, node_type); + true } } - /// Adds the standard nodeset to the address space - pub fn add_default_nodes(&mut self) { - debug!("populating address space"); - - #[cfg(feature = "generated-address-space")] - { - // Reserve space in the maps. The default node set contains just under 2000 values for - // nodes, references and inverse references. - self.node_map.reserve(2000); - // Run the generated code that will populate the address space with the default nodes - super::generated::populate_address_space(self); + pub fn namespace_index(&self, namespace: &str) -> Option { + self.namespaces + .iter() + .find(|(_, ns)| namespace == ns.as_str()) + .map(|(i, _)| *i) + } + + fn assert_namespace(&self, node_id: &NodeId) { + if !self.namespaces.contains_key(&node_id.namespace) { + panic!("Namespace index {} not in address space", node_id.namespace); } } - // Inserts a bunch of references between two nodes into the address space - pub fn insert_references(&mut self, references: &[(&NodeId, &NodeId, &T)]) - where - T: Into + Clone, - { - self.references.insert_references(references); - self.update_last_modified(); + pub fn node_exists(&self, node_id: &NodeId) -> bool { + self.node_map.contains_key(node_id) } - /// Inserts a single reference between two nodes in the address space - pub fn insert_reference( + pub fn insert_reference( &mut self, - node_id: &NodeId, - target_node_id: &NodeId, - reference_type_id: T, - ) where - T: Into + Clone, - { + source_node: &NodeId, + target_node: &NodeId, + reference_type: impl Into, + ) { self.references - .insert_reference(node_id, target_node_id, &reference_type_id); - self.update_last_modified(); + .insert_reference(source_node, target_node, reference_type) } - pub fn set_node_type(&mut self, node_id: &NodeId, node_type: T) - where - T: Into, - { - self.insert_reference( - node_id, - &node_type.into(), - ReferenceTypeId::HasTypeDefinition, - ); + pub fn insert_references<'a>( + &mut self, + references: impl Iterator)>, + ) { + self.references.insert_references(references) } - pub fn node_exists(&self, node_id: &NodeId) -> bool { - self.node_map.contains_key(node_id) + pub fn delete_reference( + &mut self, + source_node: &NodeId, + target_node: &NodeId, + reference_type: impl Into, + ) -> bool { + self.references + .delete_reference(source_node, target_node, reference_type) } - /// Adds a folder with a specified id - pub fn add_folder_with_id( - &mut self, - node_id: &NodeId, - browse_name: R, - display_name: S, - parent_node_id: &NodeId, - ) -> bool - where - R: Into, - S: Into, - { - self.assert_namespace(node_id); - ObjectBuilder::new(node_id, browse_name, display_name) - .is_folder() - .organized_by(parent_node_id.clone()) - .insert(self) + pub fn delete_node_references(&mut self, source_node: &NodeId) -> bool { + self.references.delete_node_references(source_node) } - /// Adds a folder using a generated node id - pub fn add_folder( - &mut self, - browse_name: R, - display_name: S, - parent_node_id: &NodeId, - ) -> Result - where - R: Into, - S: Into, - { - let node_id = NodeId::next_numeric(self.default_namespace); - self.assert_namespace(&node_id); - if self.add_folder_with_id(&node_id, browse_name, display_name, parent_node_id) { - Ok(node_id) - } else { - Err(()) - } + pub fn has_reference( + &self, + source_node: &NodeId, + target_node: &NodeId, + reference_type: impl Into, + ) -> bool { + self.references + .has_reference(source_node, target_node, reference_type) } - /// Adds a list of variables to the specified parent node - pub fn add_variables( - &mut self, - variables: Vec, - parent_node_id: &NodeId, - ) -> Vec { - let result = variables - .into_iter() - .map(|v| { - self.insert( - v, - Some(&[( - parent_node_id, - &ReferenceTypeId::Organizes, - ReferenceDirection::Inverse, - )]), - ) - }) - .collect(); - self.update_last_modified(); - result + pub fn find_references<'a: 'b, 'b>( + &'a self, + source_node: &'b NodeId, + filter: Option<(impl Into, bool)>, + type_tree: &'b TypeTree, + direction: BrowseDirection, + ) -> impl Iterator> + 'b { + self.references + .find_references(source_node, filter, type_tree, direction) } - /// Deletes a node by its node id, and all of its properties and optionally any references to or from it it in the - /// address space. - pub fn delete(&mut self, node_id: &NodeId, delete_target_references: bool) -> bool { - // Delete any children recursively - if let Some(child_nodes) = self.find_aggregates_of(node_id) { - child_nodes.into_iter().for_each(|node_id| { - debug!("Deleting child node {}", node_id); - let _ = self.delete(&node_id, delete_target_references); - }); + pub fn find_node_by_browse_name<'a: 'b, 'b>( + &'a self, + source_node: &'b NodeId, + filter: Option<(impl Into, bool)>, + type_tree: &'b TypeTree, + direction: BrowseDirection, + browse_name: impl Into, + ) -> Option<&'a NodeType> { + let name = browse_name.into(); + for rf in self.find_references(source_node, filter, type_tree, direction) { + let node = self.find_node(rf.target_node); + if let Some(node) = node { + if node.as_node().browse_name() == &name { + return Some(node); + } + } } - // Remove the node - let removed_node = self.node_map.remove(node_id); - // Remove references - let removed_target_references = if delete_target_references { - self.references.delete_node_references(node_id) - } else { - false + None + } + + pub fn find_node_by_browse_path<'a: 'b, 'b>( + &'a self, + source_node: &'b NodeId, + filter: Option<(impl Into, bool)>, + type_tree: &'b TypeTree, + direction: BrowseDirection, + browse_path: &[QualifiedName], + ) -> Option<&'a NodeType> { + let Some(mut node) = self.find_node(&source_node) else { + return None; }; - removed_node.is_some() || removed_target_references + let filter: Option<(NodeId, bool)> = filter.map(|(id, c)| (id.into(), c)); + for path_elem in browse_path { + let mut found = false; + for rf in self.find_references(node.node_id(), filter.clone(), type_tree, direction) { + let child = self.find_node(rf.target_node); + if let Some(child) = child { + if child.as_node().browse_name() == path_elem { + node = child; + found = true; + break; + } + } + } + if !found { + return None; + } + } + Some(node) } - /// Finds the matching reference and deletes it - pub fn delete_reference( - &mut self, - node_id: &NodeId, - target_node_id: &NodeId, - reference_type_id: T, - ) -> bool - where - T: Into, - { - self.references - .delete_reference(node_id, target_node_id, reference_type_id) + pub fn namespaces(&self) -> &HashMap { + &self.namespaces } /// Find node by something that can be turned into a node id and return a reference to it. @@ -972,415 +621,889 @@ impl AddressSpace { self.node_map.get_mut(node_id) } - /// Find and return a variable with the specified node id or return None if it cannot be - /// found or is not a variable - pub fn find_variable(&self, node_id: N) -> Option<&Variable> - where - N: Into, - { - self.find_variable_by_ref(&node_id.into()) - } + pub fn validate_node_read<'a>( + &'a self, + context: &RequestContext, + node_to_read: &ParsedReadValueId, + ) -> Result<&'a NodeType, StatusCode> { + let Some(node) = self.find(&node_to_read.node_id) else { + debug!( + "read_node_value result for read node id {}, attribute {:?} cannot find node", + node_to_read.node_id, node_to_read.attribute_id + ); + return Err(StatusCode::BadNodeIdUnknown); + }; + + validate_node_read(node, context, node_to_read)?; - /// Find and return a variable with the specified node id or return None if it cannot be - /// found or is not a variable - pub fn find_variable_by_ref(&self, node_id: &NodeId) -> Option<&Variable> { - find_node!(self, node_id, Variable) + Ok(node) } - /// Find and return a variable with the specified node id or return None if it cannot be - /// found or is not a variable - pub fn find_variable_mut(&mut self, node_id: N) -> Option<&mut Variable> - where - N: Into, - { - self.find_variable_mut_by_ref(&node_id.into()) + pub fn read( + &self, + context: &RequestContext, + node_to_read: &ParsedReadValueId, + max_age: f64, + timestamps_to_return: TimestampsToReturn, + ) -> DataValue { + let node = match self.validate_node_read(context, node_to_read) { + Ok(n) => n, + Err(e) => { + return DataValue { + status: Some(e), + ..Default::default() + }; + } + }; + + read_node_value(node, context, node_to_read, max_age, timestamps_to_return) } - /// Find and return a variable with the specified node id or return None if it cannot be - /// found or is not a variable - pub fn find_variable_mut_by_ref(&mut self, node_id: &NodeId) -> Option<&mut Variable> { - find_node_mut!(self, node_id, Variable) + pub fn validate_node_write<'a>( + &'a mut self, + context: &RequestContext, + node_to_write: &ParsedWriteValue, + type_tree: &TypeTree, + ) -> Result<&'a mut NodeType, StatusCode> { + let Some(node) = self.find_mut(&node_to_write.node_id) else { + debug!( + "write_node_value result for read node id {}, attribute {:?} cannot find node", + node_to_write.node_id, node_to_write.attribute_id + ); + return Err(StatusCode::BadNodeIdUnknown); + }; + + validate_node_write(node, context, node_to_write, type_tree)?; + + Ok(node) } - /// Set a variable value from its NodeId. The function will return false if the variable does - /// not exist, or the node is not a variable. - pub fn set_variable_value( - &mut self, - node_id: N, - value: V, - source_timestamp: &DateTime, - server_timestamp: &DateTime, - ) -> bool - where - N: Into, - V: Into, - { - self.set_variable_value_by_ref(&node_id.into(), value, source_timestamp, server_timestamp) + pub fn delete(&mut self, node_id: &NodeId, delete_target_references: bool) -> Option { + let n = self.node_map.remove(node_id); + let source = self.references.by_source.remove(node_id); + // Remove any outgoing references + if delete_target_references { + for rf in source.into_iter().flatten() { + if let Some(rec) = self.references.by_target.get_mut(&rf.target_node) { + rec.remove(&ReferenceKey { + reference_type: &rf.reference_type, + target_node: node_id, + }); + } + } + } + + let target = self.references.by_target.remove(node_id); + + // Optionally remove forwards references pointing at this node. + if delete_target_references { + for rf in target.into_iter().flatten() { + if let Some(rec) = self.references.by_source.get_mut(&rf.target_node) { + rec.remove(&ReferenceKey { + reference_type: &rf.reference_type, + target_node: node_id, + }); + } + } + } + + n } - /// Set a variable value from its NodeId. The function will return false if the variable does - /// not exist, or the node is not a variable. - pub fn set_variable_value_by_ref( + pub fn add_folder( &mut self, node_id: &NodeId, - value: V, - source_timestamp: &DateTime, - server_timestamp: &DateTime, - ) -> bool - where - V: Into, - { - if let Some(ref mut variable) = self.find_variable_mut_by_ref(node_id) { - let _ = variable.set_value_direct( - value, - StatusCode::Good, - source_timestamp, - server_timestamp, - ); - true - } else { - false - } + browse_name: impl Into, + display_name: impl Into, + parent_node_id: &NodeId, + ) -> bool { + self.assert_namespace(node_id); + ObjectBuilder::new(node_id, browse_name, display_name) + .is_folder() + .organized_by(parent_node_id.clone()) + .insert(self) } - /// Gets a variable value with the supplied NodeId. The function will return Err if the - /// NodeId does not exist or is not a variable. - pub fn get_variable_value(&self, node_id: N) -> Result - where - N: Into, - { - self.find_variable(node_id) - .map(|variable| { - variable.value( - TimestampsToReturn::Neither, - NumericRange::None, - &QualifiedName::null(), - 0.0, + pub fn add_variables( + &mut self, + variables: Vec, + parent_node_id: &NodeId, + ) -> Vec { + variables + .into_iter() + .map(|v| { + self.insert( + v, + Some(&[( + parent_node_id, + &ReferenceTypeId::Organizes, + ReferenceDirection::Inverse, + )]), ) }) - .ok_or_else(|| ()) + .collect() } +} - /// Registers a method callback on the specified object id and method id - pub fn register_method_handler(&mut self, method_id: N, handler: MethodCallback) - where - N: Into, - { - // Check the object id and method id actually exist as things in the address space - let method_id = method_id.into(); - if let Some(method) = self.find_mut(&method_id) { - match method { - NodeType::Method(method) => method.set_callback(handler), - _ => panic!("{} is not a method node", method_id), - } - } else { - panic!("{} method id does not exist", method_id); - } +#[cfg(test)] +mod tests { + use crate::{ + server::{ + address_space::{ + types::{NodeBase, Object, Variable}, + EventNotifier, MethodBuilder, NodeType, ObjectBuilder, ObjectTypeBuilder, + VariableBuilder, + }, + node_manager::TypeTree, + }, + types::{ + argument::Argument, Array, BrowseDirection, DataTypeId, DecodingOptions, LocalizedText, + NodeClass, NodeId, NumericRange, ObjectId, ObjectTypeId, QualifiedName, + ReferenceTypeId, TimestampsToReturn, UAString, Variant, VariantTypeId, + }, + }; + + use super::AddressSpace; + + fn make_sample_address_space() -> AddressSpace { + let mut address_space = AddressSpace::new(); + address_space.add_namespace("http://opcfoundation.org/UA/", 0); + crate::server::address_space::populate_address_space(&mut address_space); + add_sample_vars_to_address_space(&mut address_space); + address_space } - /// Test if the type definition is defined and valid for a class of the specified type. - /// i.e. if we have a Variable or Object class that the type is a VariableType or ObjectType - /// respectively. - pub fn is_valid_type_definition( - &self, - node_class: NodeClass, - type_definition: &NodeId, - ) -> bool { - match node_class { - NodeClass::Object => { - if type_definition.is_null() { - false - } else if let Some(NodeType::ObjectType(_)) = self.find_node(type_definition) { - true - } else { - false - } - } - NodeClass::Variable => { - if type_definition.is_null() { - false - } else if let Some(NodeType::VariableType(_)) = self.find_node(type_definition) { - true - } else { - false - } - } - _ => { - // Other node classes must NOT supply a type definition - type_definition.is_null() - } + fn add_sample_vars_to_address_space(address_space: &mut AddressSpace) { + address_space.add_namespace("urn:test", 1); + let ns = 1; + + // Create a sample folder under objects folder + let sample_folder_id = NodeId::next_numeric(ns); + ObjectBuilder::new(&sample_folder_id, "Sample", "Sample") + .organized_by(ObjectId::ObjectsFolder) + .insert(address_space); + + // Add some variables to our sample folder + let vars = vec![ + Variable::new(&NodeId::new(ns, "v1"), "v1", "v1", 30i32), + Variable::new(&NodeId::new(ns, 300), "v2", "v2", true), + Variable::new(&NodeId::new(ns, "v3"), "v3", "v3", "Hello world"), + Variable::new(&NodeId::new(ns, "v4"), "v4", "v4", 100.123f64), + ]; + for var in vars { + let node_id = var.node_id().clone(); + address_space.insert::<_, NodeId>(var, None); + address_space.insert_reference( + &sample_folder_id, + &node_id, + ReferenceTypeId::HasComponent, + ); } } - /// This finds the type definition (if any corresponding to the input object) - fn get_type_id(&self, node_id: &NodeId) -> Option { - self.references.get_type_id(node_id) + #[test] + fn find_root_folder() { + let address_space = make_sample_address_space(); + let node_type = address_space.find_node(&NodeId::new(0, 84)); + assert!(node_type.is_some()); + + let node = node_type.unwrap().as_node(); + assert_eq!(node.node_id(), &NodeId::new(0, 84)); + assert_eq!(node.node_id(), &ObjectId::RootFolder.into()); } - /// Test if a reference relationship exists between one node and another node - pub fn has_reference( - &self, - source_node: &NodeId, - target_node: &NodeId, - reference_type: T, - ) -> bool - where - T: Into, - { - self.references - .has_reference(source_node, target_node, reference_type) + #[test] + fn find_objects_folder() { + let address_space = make_sample_address_space(); + let node_type = address_space.find(ObjectId::ObjectsFolder); + assert!(node_type.is_some()); } - /// Tests if a method exists on a specific object. This will be true if the method id is - /// a HasComponent of the object itself, or a HasComponent of the object type - fn method_exists_on_object(&self, object_id: &NodeId, method_id: &NodeId) -> bool { - // Look for the method first on the object id, else on the object's type - if self.has_reference(object_id, method_id, ReferenceTypeId::HasComponent) { - true - } else if let Some(object_type_id) = self.get_type_id(object_id) { - self.has_reference(&object_type_id, method_id, ReferenceTypeId::HasComponent) - } else { - error!("Method call to {:?} on {:?} but the method id is not on the object or its object type!", method_id, object_id); - false - } + #[test] + fn find_types_folder() { + let address_space = make_sample_address_space(); + let node_type = address_space.find(ObjectId::TypesFolder); + assert!(node_type.is_some()); } - /// Calls a method node with the supplied request and expecting a result. - /// - /// Calls require a registered handler to handle the method. If there is no handler, or if - /// the request refers to a non existent object / method, the function will return an error. - pub fn call_method( - &mut self, - _server_state: &ServerState, - session_id: &NodeId, - session_manager: Arc>, - request: &CallMethodRequest, - ) -> Result { - let (object_id, method_id) = (&request.object_id, &request.method_id); - // Handle the call - if !is_object!(self, object_id) { - error!( - "Method call to {:?} on {:?} but the node id is not recognized!", - method_id, object_id - ); - Err(StatusCode::BadNodeIdUnknown) - } else if !is_method!(self, method_id) { - error!( - "Method call to {:?} on {:?} but the method id is not recognized!", - method_id, object_id - ); - Err(StatusCode::BadMethodInvalid) - } else if !self.method_exists_on_object(object_id, method_id) { - error!( - "Method call to {:?} on {:?} but the method does not exist on the object!", - method_id, object_id - ); - Err(StatusCode::BadMethodInvalid) - } else if let Some(method) = self.find_mut(method_id) { - // TODO check security - session / user may not have permission to call methods - match method { - NodeType::Method(method) => method.call(session_id, session_manager, request), - _ => Err(StatusCode::BadMethodInvalid), - } - } else { - Err(StatusCode::BadMethodInvalid) - } + #[test] + fn find_views_folder() { + let address_space = make_sample_address_space(); + let node_type = address_space.find(ObjectId::ViewsFolder); + assert!(node_type.is_some()); } - /// Recursive function tries to find if a type is a subtype of another type by looking at its - /// references. Function will positively match a type against itself. - pub fn is_subtype(&self, subtype_id: &NodeId, base_type_id: &NodeId) -> bool { - subtype_id == base_type_id || { - // Apply same test to all children of the base type - if let Some(references) = - self.find_references(base_type_id, Some((ReferenceTypeId::HasSubtype, false))) - { - // Each child will test if it is the parent / match for the subtype - references - .iter() - .any(|r| self.is_subtype(subtype_id, &r.target_node)) - } else { - false - } + #[test] + fn find_common_nodes() { + let address_space = make_sample_address_space(); + let nodes: Vec = vec![ + ObjectId::RootFolder.into(), + ObjectId::ObjectsFolder.into(), + ObjectId::TypesFolder.into(), + ObjectId::ViewsFolder.into(), + ObjectId::DataTypesFolder.into(), + DataTypeId::BaseDataType.into(), + // Types + DataTypeId::Boolean.into(), + DataTypeId::ByteString.into(), + DataTypeId::DataValue.into(), + DataTypeId::DateTime.into(), + DataTypeId::DiagnosticInfo.into(), + DataTypeId::Enumeration.into(), + DataTypeId::ExpandedNodeId.into(), + DataTypeId::Guid.into(), + DataTypeId::LocalizedText.into(), + DataTypeId::NodeId.into(), + DataTypeId::Number.into(), + DataTypeId::QualifiedName.into(), + DataTypeId::StatusCode.into(), + DataTypeId::String.into(), + DataTypeId::Structure.into(), + DataTypeId::XmlElement.into(), + DataTypeId::Double.into(), + DataTypeId::Float.into(), + DataTypeId::Integer.into(), + DataTypeId::SByte.into(), + DataTypeId::Int16.into(), + DataTypeId::Int32.into(), + DataTypeId::Int64.into(), + DataTypeId::Byte.into(), + DataTypeId::UInt16.into(), + DataTypeId::UInt32.into(), + DataTypeId::UInt64.into(), + ObjectId::OPCBinarySchema_TypeSystem.into(), + ObjectTypeId::DataTypeSystemType.into(), + // Refs + ObjectId::ReferenceTypesFolder.into(), + ReferenceTypeId::References.into(), + ReferenceTypeId::HierarchicalReferences.into(), + ReferenceTypeId::HasChild.into(), + ReferenceTypeId::HasSubtype.into(), + ReferenceTypeId::Organizes.into(), + ReferenceTypeId::NonHierarchicalReferences.into(), + ReferenceTypeId::HasTypeDefinition.into(), + ]; + for n in nodes { + assert!(address_space.find_node(&n).is_some()); } } - /// Finds objects by a specified type. - fn find_nodes_by_type( - &self, - node_type_class: NodeClass, - node_type_id: T, - include_subtypes: bool, - ) -> Option> - where - T: Into, - { - let node_type_id = node_type_id.into(); - // Ensure the node type is of the right class - if let Some(node) = self.node_map.get(&node_type_id) { - if node.node_class() == node_type_class { - // Find nodes with a matching type definition - let nodes = self - .node_map - .iter() - .filter(|(_, v)| v.node_class() == NodeClass::Object) - .filter(move |(k, _)| { - // Node has to have a type definition reference to the type - if let Some(type_refs) = self - .find_references(k, Some((ReferenceTypeId::HasTypeDefinition, false))) - { - // Type definition must find the sought after type - type_refs.iter().any(|r| { - include_subtypes && self.is_subtype(&node_type_id, &r.target_node) - || r.target_node == node_type_id - }) - } else { - false - } - }) - .map(|(k, _)| k.clone()) - .collect::>(); - if nodes.is_empty() { - None - } else { - Some(nodes) - } - } else { - debug!("Cannot find nodes by type because node type id {:?} is not a matching class {:?}", node_type_id, node_type_class); - None - } - } else { - debug!( - "Cannot find nodes by type because node type id {:?} does not exist", - node_type_id - ); - None - } + + #[test] + fn object_attributes() { + let on = NodeId::new(1, "o1"); + let o = Object::new(&on, "Browse01", "Display01", EventNotifier::empty()); + assert_eq!(o.node_class(), NodeClass::Object); + assert_eq!(o.node_id(), &on); + assert_eq!(o.browse_name(), &QualifiedName::new(0, "Browse01")); + assert_eq!(o.display_name(), &"Display01".into()); } - pub fn find_objects_by_type( - &self, - object_type: T, - include_subtypes: bool, - ) -> Option> - where - T: Into, - { - self.find_nodes_by_type(NodeClass::ObjectType, object_type, include_subtypes) + #[test] + fn find_node_by_id() { + let address_space = make_sample_address_space(); + let ns = 1; + + assert!(!address_space.node_exists(&NodeId::null())); + assert!(!address_space.node_exists(&NodeId::new(11, "v3"))); + + assert!(address_space.node_exists(&NodeId::new(ns, "v1"))); + assert!(address_space.node_exists(&NodeId::new(ns, 300))); + assert!(address_space.node_exists(&NodeId::new(ns, "v3"))); } - pub fn find_variables_by_type( - &self, - variable_type: T, - include_subtypes: bool, - ) -> Option> - where - T: Into, - { - self.find_nodes_by_type(NodeClass::VariableType, variable_type, include_subtypes) + #[test] + fn find_references() { + let address_space = make_sample_address_space(); + + let references: Vec<_> = address_space + .find_references( + &NodeId::root_folder_id(), + Some((ReferenceTypeId::Organizes, false)), + &TypeTree::new(), + BrowseDirection::Forward, + ) + .collect(); + assert_eq!(references.len(), 3); + + let references: Vec<_> = address_space + .find_references( + &NodeId::root_folder_id(), + None::<(NodeId, bool)>, + &TypeTree::new(), + BrowseDirection::Forward, + ) + .collect(); + assert_eq!(references.len(), 4); + + let references: Vec<_> = address_space + .find_references( + &NodeId::objects_folder_id(), + Some((ReferenceTypeId::Organizes, false)), + &TypeTree::new(), + BrowseDirection::Forward, + ) + .collect(); + assert_eq!(references.len(), 2); + + let r1 = &references[0]; + assert_eq!(r1.reference_type, &ReferenceTypeId::Organizes.into()); + let child_node_id = r1.target_node.clone(); + + let child = address_space.find_node(&child_node_id); + assert!(child.is_some()); } - /// Finds all child propertiesof the parent node. i.e. Aggregates or any subtype - pub fn find_aggregates_of(&self, parent_node: &NodeId) -> Option> { - self.find_references(parent_node, Some((ReferenceTypeId::Aggregates, true))) - .map(|references| { - references - .iter() - .map(|r| { - // debug!("reference {:?}", r); - r.target_node.clone() - }) - .collect() - }) + #[test] + fn find_inverse_references() { + let address_space = make_sample_address_space(); + + //println!("{:#?}", address_space); + let references: Vec<_> = address_space + .find_references( + &NodeId::root_folder_id(), + Some((ReferenceTypeId::Organizes, false)), + &TypeTree::new(), + BrowseDirection::Inverse, + ) + .collect(); + assert!(references.is_empty()); + + let references: Vec<_> = address_space + .find_references( + &NodeId::objects_folder_id(), + Some((ReferenceTypeId::Organizes, false)), + &TypeTree::new(), + BrowseDirection::Inverse, + ) + .collect(); + assert_eq!(references.len(), 1); } - /// Finds hierarchical references of the parent node, i.e. children, event sources, organizes etc from the parent node to other nodes. - /// This function will return node ids even if the nodes themselves do not exist in the address space. - pub fn find_hierarchical_references(&self, parent_node: &NodeId) -> Option> { - self.find_references( - parent_node, - Some((ReferenceTypeId::HierarchicalReferences, true)), - ) - .map(|references| { - references - .iter() - .map(|r| { - // debug!("reference {:?}", r); - r.target_node.clone() - }) - .collect() - }) + #[test] + fn find_reference_subtypes() { + let address_space = make_sample_address_space(); + let mut type_tree = TypeTree::new(); + address_space.load_into_type_tree(&mut type_tree); + + let reference_types = vec![ + ( + ReferenceTypeId::References, + ReferenceTypeId::HierarchicalReferences, + ), + (ReferenceTypeId::References, ReferenceTypeId::HasChild), + (ReferenceTypeId::References, ReferenceTypeId::HasSubtype), + (ReferenceTypeId::References, ReferenceTypeId::Organizes), + (ReferenceTypeId::References, ReferenceTypeId::Aggregates), + (ReferenceTypeId::References, ReferenceTypeId::HasProperty), + (ReferenceTypeId::References, ReferenceTypeId::HasComponent), + ( + ReferenceTypeId::References, + ReferenceTypeId::HasOrderedComponent, + ), + (ReferenceTypeId::References, ReferenceTypeId::HasEventSource), + (ReferenceTypeId::References, ReferenceTypeId::HasNotifier), + (ReferenceTypeId::References, ReferenceTypeId::GeneratesEvent), + ( + ReferenceTypeId::References, + ReferenceTypeId::AlwaysGeneratesEvent, + ), + (ReferenceTypeId::References, ReferenceTypeId::HasEncoding), + ( + ReferenceTypeId::References, + ReferenceTypeId::HasModellingRule, + ), + (ReferenceTypeId::References, ReferenceTypeId::HasDescription), + ( + ReferenceTypeId::References, + ReferenceTypeId::HasTypeDefinition, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasChild, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasSubtype, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::Organizes, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::Aggregates, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasProperty, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasComponent, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasOrderedComponent, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasEventSource, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasNotifier, + ), + (ReferenceTypeId::HasChild, ReferenceTypeId::Aggregates), + (ReferenceTypeId::HasChild, ReferenceTypeId::HasComponent), + ( + ReferenceTypeId::HasChild, + ReferenceTypeId::HasHistoricalConfiguration, + ), + (ReferenceTypeId::HasChild, ReferenceTypeId::HasProperty), + ( + ReferenceTypeId::HasChild, + ReferenceTypeId::HasOrderedComponent, + ), + (ReferenceTypeId::HasChild, ReferenceTypeId::HasSubtype), + (ReferenceTypeId::Aggregates, ReferenceTypeId::HasComponent), + ( + ReferenceTypeId::Aggregates, + ReferenceTypeId::HasHistoricalConfiguration, + ), + (ReferenceTypeId::Aggregates, ReferenceTypeId::HasProperty), + ( + ReferenceTypeId::Aggregates, + ReferenceTypeId::HasOrderedComponent, + ), + ( + ReferenceTypeId::HasComponent, + ReferenceTypeId::HasOrderedComponent, + ), + ( + ReferenceTypeId::HasEventSource, + ReferenceTypeId::HasNotifier, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasNotifier, + ), + ( + ReferenceTypeId::References, + ReferenceTypeId::NonHierarchicalReferences, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::GeneratesEvent, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::AlwaysGeneratesEvent, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::HasEncoding, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::HasModellingRule, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::HasDescription, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::HasTypeDefinition, + ), + ( + ReferenceTypeId::GeneratesEvent, + ReferenceTypeId::AlwaysGeneratesEvent, + ), + ]; + + // Make sure that subtypes match when subtypes are to be compared and doesn't when they should + // not be compared. + reference_types.iter().for_each(|r| { + let r1 = r.0.into(); + let r2 = r.1.into(); + assert!(type_tree.is_subtype_of(&r2, &r1)); + }); } - /// Finds forward references from the specified node. The reference filter can optionally filter results - /// by a specific type and subtypes. - pub fn find_references( - &self, - node: &NodeId, - reference_filter: Option<(T, bool)>, - ) -> Option> - where - T: Into + Clone, - { - self.references.find_references(node, reference_filter) + /// This test is to ensure that adding a Variable with a value of Array to address space sets the + /// ValueRank and ArrayDimensions attributes correctly. + #[test] + fn array_as_variable() { + // 1 dimensional array with 100 element + let values = (0..100) + .map(|i| Variant::Int32(i)) + .collect::>(); + + // Get the variable node back from the address space, ensure that the ValueRank and ArrayDimensions are correct + let node_id = NodeId::new(2, 1); + let v = Variable::new(&node_id, "x", "x", (VariantTypeId::Int32, values)); + + let value_rank = v.value_rank(); + assert_eq!(value_rank, 1); + let array_dimensions = v.array_dimensions().unwrap(); + assert_eq!(array_dimensions, vec![100u32]); } - /// Finds inverse references, it those that point to the specified node. The reference filter can - /// optionally filter results by a specific type and subtypes. - pub fn find_inverse_references( - &self, - node: &NodeId, - reference_filter: Option<(T, bool)>, - ) -> Option> - where - T: Into + Clone, - { - self.references - .find_inverse_references(node, reference_filter) + /// This test is to ensure that adding a Variable with a value of Array to address space sets the + /// ValueRank and ArrayDimensions attributes correctly. + #[test] + fn multi_dimension_array_as_variable() { + // 2 dimensional array with 10x10 elements + + let values = (0..100) + .map(|i| Variant::Int32(i)) + .collect::>(); + let mda = Array::new_multi(VariantTypeId::Int32, values, vec![10u32, 10u32]).unwrap(); + assert!(mda.is_valid()); + + // Get the variable node back from the address space, ensure that the ValueRank and ArrayDimensions are correct + let node_id = NodeId::new(2, 1); + let v = Variable::new(&node_id, "x", "x", mda); + + let value_rank = v.value_rank(); + assert_eq!(value_rank, 2); + let array_dimensions = v.array_dimensions().unwrap(); + assert_eq!(array_dimensions, vec![10u32, 10u32]); } - /// Finds references for optionally forwards, inverse or both and return the references. The usize - /// represents the index in the collection where the inverse references start (if applicable) - pub fn find_references_by_direction( - &self, - node_id: &NodeId, - browse_direction: BrowseDirection, - reference_filter: Option<(T, bool)>, - ) -> (Vec, usize) - where - T: Into + Clone, - { - self.references - .find_references_by_direction(node_id, browse_direction, reference_filter) + #[test] + fn browse_nodes() { + let address_space = make_sample_address_space(); + + // Test that a node can be found + let object_id = ObjectId::RootFolder.into(); + let result = address_space.find_node_by_browse_path( + &object_id, + None::<(NodeId, bool)>, + &TypeTree::new(), + BrowseDirection::Forward, + &["Objects".into(), "Sample".into(), "v1".into()], + ); + let node = result.unwrap(); + assert_eq!(node.as_node().browse_name(), &QualifiedName::from("v1")); + + // Test that a non existent node cannot be found + let result = address_space.find_node_by_browse_path( + &object_id, + None::<(NodeId, bool)>, + &TypeTree::new(), + BrowseDirection::Forward, + &["Objects".into(), "Sample".into(), "vxxx".into()], + ); + assert!(result.is_none()); } - /// Updates the last modified timestamp to now - fn update_last_modified(&mut self) { - self.last_modified = Utc::now(); + #[test] + fn object_builder() { + let mut address_space = make_sample_address_space(); + + let node_type_id = NodeId::new(1, "HelloType"); + let _ot = ObjectTypeBuilder::new(&node_type_id, "HelloType", "HelloType") + .subtype_of(ObjectTypeId::BaseObjectType) + .insert(&mut address_space); + + let node_id = NodeId::new(1, "Hello"); + let _o = ObjectBuilder::new(&node_id, "Foo", "Foo") + .event_notifier(EventNotifier::SUBSCRIBE_TO_EVENTS) + .organized_by(ObjectId::ObjectsFolder) + .has_type_definition(node_type_id.clone()) + .insert(&mut address_space); + + // Verify the variable is there + let _o = match address_space.find_node(&node_id).unwrap() { + NodeType::Object(o) => o, + _ => panic!(), + }; + + // Verify the reference to the objects folder is there + assert!(address_space.has_reference( + &ObjectId::ObjectsFolder.into(), + &node_id, + ReferenceTypeId::Organizes + )); + assert!(address_space.has_reference( + &node_id, + &node_type_id, + ReferenceTypeId::HasTypeDefinition + )); } - /// Sets the getter for a variable node - fn set_variable_getter(&mut self, variable_id: N, getter: F) - where - N: Into, - F: FnMut( - &NodeId, - TimestampsToReturn, - AttributeId, - NumericRange, - &QualifiedName, - f64, - ) -> Result, StatusCode> - + Send - + 'static, - { - if let Some(ref mut v) = self.find_variable_mut(variable_id) { - let getter = AttrFnGetter::new(getter); - v.set_value_getter(Arc::new(Mutex::new(getter))); + #[test] + fn object_type_builder() { + let mut address_space = make_sample_address_space(); + + let node_type_id = NodeId::new(1, "HelloType"); + let _ot = ObjectTypeBuilder::new(&node_type_id, "HelloType", "HelloType") + .subtype_of(ObjectTypeId::BaseObjectType) + .insert(&mut address_space); + + let _ot = match address_space.find_node(&node_type_id).unwrap() { + NodeType::ObjectType(ot) => ot, + _ => panic!(), + }; + + assert!(address_space.has_reference( + &ObjectTypeId::BaseObjectType.into(), + &node_type_id, + ReferenceTypeId::HasSubtype + )); + } + + #[test] + fn variable_builder() { + let result = std::panic::catch_unwind(|| { + // This should panic + let _v = VariableBuilder::new(&NodeId::null(), "", "").build(); + }); + assert!(result.is_err()); + + // This should build + let _v = VariableBuilder::new(&NodeId::new(1, 1), "", "") + .data_type(DataTypeId::Boolean) + .build(); + + // Check a variable with a bunch of fields set + let v = VariableBuilder::new(&NodeId::new(1, "Hello"), "BrowseName", "DisplayName") + .description("Desc") + .data_type(DataTypeId::UInt32) + .value_rank(10) + .array_dimensions(&[1, 2, 3]) + .historizing(true) + .value(Variant::from(999)) + .minimum_sampling_interval(123.0) + .build(); + + assert_eq!(v.node_id(), &NodeId::new(1, "Hello")); + assert_eq!(v.browse_name(), &QualifiedName::new(0, "BrowseName")); + assert_eq!(v.display_name(), &"DisplayName".into()); + assert_eq!(v.data_type(), DataTypeId::UInt32.into()); + assert_eq!(v.description().unwrap(), &"Desc".into()); + assert_eq!(v.value_rank(), 10); + assert_eq!(v.array_dimensions().unwrap(), vec![1, 2, 3]); + assert_eq!(v.historizing(), true); + assert_eq!( + v.value( + TimestampsToReturn::Neither, + NumericRange::None, + &QualifiedName::null(), + 0.0 + ) + .value + .unwrap(), + Variant::from(999) + ); + assert_eq!(v.minimum_sampling_interval().unwrap(), 123.0); + + // Add a variable to the address space + + let mut address_space = make_sample_address_space(); + let node_id = NodeId::new(1, "Hello"); + let _v = VariableBuilder::new(&node_id, "BrowseName", "DisplayName") + .description("Desc") + .value_rank(10) + .data_type(DataTypeId::UInt32) + .array_dimensions(&[1, 2, 3]) + .historizing(true) + .value(Variant::from(999)) + .minimum_sampling_interval(123.0) + .organized_by(ObjectId::ObjectsFolder) + .insert(&mut address_space); + + // Verify the variable is there + assert!(address_space.find_node(&node_id).is_some()); + // Verify the reference to the objects folder is there + assert!(address_space.has_reference( + &ObjectId::ObjectsFolder.into(), + &node_id, + ReferenceTypeId::Organizes + )); + } + + #[test] + fn method_builder() { + let mut address_space = make_sample_address_space(); + + address_space.add_namespace("urn:test", 1); + let ns = 1; + + let object_id: NodeId = ObjectId::ObjectsFolder.into(); + + let fn_node_id = NodeId::new(ns, "HelloWorld"); + let out_args = NodeId::new(ns, "HelloWorldOut"); + + let inserted = MethodBuilder::new(&fn_node_id, "HelloWorld", "HelloWorld") + .component_of(object_id.clone()) + .output_args( + &mut address_space, + &out_args, + &[("Result", DataTypeId::String).into()], + ) + .insert(&mut address_space); + assert!(inserted); + + assert!(matches!( + address_space.find_node(&fn_node_id), + Some(NodeType::Method(_)) + )); + + let refs: Vec<_> = address_space + .find_references( + &fn_node_id, + Some((ReferenceTypeId::HasProperty, false)), + &TypeTree::new(), + BrowseDirection::Forward, + ) + .collect(); + assert_eq!(refs.len(), 1); + + let child = address_space + .find_node(&refs.get(0).unwrap().target_node) + .unwrap(); + if let NodeType::Variable(v) = child { + // verify OutputArguments + // verify OutputArguments / Argument value + assert_eq!(v.data_type(), DataTypeId::Argument.into()); + assert_eq!(v.display_name(), &LocalizedText::from("OutputArguments")); + let v = v + .value( + TimestampsToReturn::Neither, + NumericRange::None, + &QualifiedName::null(), + 0.0, + ) + .value + .unwrap(); + if let Variant::Array(array) = v { + let v = array.values; + assert_eq!(v.len(), 1); + let v = v.get(0).unwrap().clone(); + if let Variant::ExtensionObject(v) = v { + // deserialize the Argument here + let decoding_options = DecodingOptions::test(); + let argument = v.decode_inner::(&decoding_options).unwrap(); + assert_eq!(argument.name, UAString::from("Result")); + assert_eq!(argument.data_type, DataTypeId::String.into()); + assert_eq!(argument.value_rank, -1); + assert_eq!(argument.array_dimensions, None); + assert_eq!(argument.description, LocalizedText::null()); + } else { + panic!("Variant was expected to be extension object, was {:?}", v); + } + } else { + panic!("Variant was expected to be array, was {:?}", v); + } + } else { + panic!(); } } - /// Returns the references - pub fn references(&self) -> &References { - &self.references + #[test] + fn simple_delete_node() { + crate::console_logging::init(); + + // This is a super basic, debuggable delete test. There is a single Root node, and a + // child object. After deleting the child, only the Root should exist with no references at + // all to the child. + + // A blank address space, with nothing at all in it + let mut address_space = make_sample_address_space(); + + // Add a root node + let root_node = NodeId::root_folder_id(); + + let node = Object::new(&root_node, "Root", "", EventNotifier::empty()); + let _ = address_space.insert::(node, None); + + let node_id = NodeId::new(1, "Hello"); + let _o = ObjectBuilder::new(&node_id, "Foo", "Foo") + .organized_by(root_node.clone()) + .insert(&mut address_space); + + // Verify the object and refs are there + assert!(address_space.find_node(&node_id).is_some()); + assert!(address_space.has_reference(&root_node, &node_id, ReferenceTypeId::Organizes)); + + // Try one time deleting references, the other time not deleting them. + address_space.delete(&node_id, true); + // Delete the node and the refs + assert!(address_space.find_node(&node_id).is_none()); + assert!(address_space.find_node(&root_node).is_some()); + assert!(!address_space.has_reference(&root_node, &node_id, ReferenceTypeId::Organizes)); + } + + #[test] + fn delete_node() { + crate::console_logging::init(); + + // Try creating and deleting a node, verifying that it's totally gone afterwards + (0..2).for_each(|i| { + let mut address_space = make_sample_address_space(); + + let node_type_id = NodeId::new(1, "HelloType"); + let _ot = ObjectTypeBuilder::new(&node_type_id, "HelloType", "HelloType") + .subtype_of(ObjectTypeId::BaseObjectType) + .insert(&mut address_space); + + let node_id = NodeId::new(1, "Hello"); + let _o = ObjectBuilder::new(&node_id, "Foo", "Foo") + .event_notifier(EventNotifier::SUBSCRIBE_TO_EVENTS) + .organized_by(ObjectId::ObjectsFolder) + .has_type_definition(node_type_id.clone()) + .insert(&mut address_space); + + // Verify the object and refs are there + assert!(address_space.find_node(&node_id).is_some()); + assert!(address_space.has_reference( + &ObjectId::ObjectsFolder.into(), + &node_id, + ReferenceTypeId::Organizes + )); + assert!(!address_space.has_reference( + &node_id, + &ObjectId::ObjectsFolder.into(), + ReferenceTypeId::Organizes + )); + assert!(address_space.has_reference( + &node_id, + &node_type_id, + ReferenceTypeId::HasTypeDefinition + )); + + // Try one time deleting references, the other time not deleting them. + let delete_target_references = i == 1; + address_space.delete(&node_id, delete_target_references); + if !delete_target_references { + // Deleted the node and outgoing refs, but not incoming refs + assert!(address_space.find_node(&node_id).is_none()); + assert!(address_space.has_reference( + &ObjectId::ObjectsFolder.into(), + &node_id, + ReferenceTypeId::Organizes + )); + assert!(!address_space.has_reference( + &node_id, + &node_type_id, + ReferenceTypeId::HasTypeDefinition + )); + } else { + // Delete the node and the refs + assert!(address_space.find_node(&node_id).is_none()); + assert!(!address_space.has_reference( + &ObjectId::ObjectsFolder.into(), + &node_id, + ReferenceTypeId::Organizes + )); + assert!(!address_space.has_reference( + &node_id, + &node_type_id, + ReferenceTypeId::HasTypeDefinition + )); + } + }); } } diff --git a/lib/src/server/address_space/base.rs b/lib/src/server/address_space/base.rs index 22e94dfca..c79f7c36a 100644 --- a/lib/src/server/address_space/base.rs +++ b/lib/src/server/address_space/base.rs @@ -10,19 +10,19 @@ use super::node::{Node, NodeBase}; #[derive(Debug)] pub(crate) struct Base { /// The node id of this node - node_id: NodeId, + pub(super) node_id: NodeId, /// The node class of this node - node_class: NodeClass, + pub(super) node_class: NodeClass, /// The node's browse name which must be unique amongst its siblings - browse_name: QualifiedName, + pub(super) browse_name: QualifiedName, /// The human readable display name - display_name: LocalizedText, + pub(super) display_name: LocalizedText, /// The description of the node (optional) - description: Option, + pub(super) description: Option, /// Write mask bits (optional) - write_mask: Option, + pub(super) write_mask: Option, /// User write mask bits (optional) - user_write_mask: Option, + pub(super) user_write_mask: Option, } impl NodeBase for Base { @@ -30,24 +30,24 @@ impl NodeBase for Base { self.node_class } - fn node_id(&self) -> NodeId { - self.node_id.clone() + fn node_id(&self) -> &NodeId { + &self.node_id } - fn browse_name(&self) -> QualifiedName { - self.browse_name.clone() + fn browse_name(&self) -> &QualifiedName { + &self.browse_name } - fn display_name(&self) -> LocalizedText { - self.display_name.clone() + fn display_name(&self) -> &LocalizedText { + &self.display_name } fn set_display_name(&mut self, display_name: LocalizedText) { self.display_name = display_name; } - fn description(&self) -> Option { - self.description.clone() + fn description(&self) -> Option<&LocalizedText> { + self.description.as_ref() } fn set_description(&mut self, description: LocalizedText) { @@ -82,10 +82,13 @@ impl Node for Base { ) -> Option { match attribute_id { AttributeId::NodeClass => Some((self.node_class as i32).into()), - AttributeId::NodeId => Some(self.node_id().into()), - AttributeId::BrowseName => Some(self.browse_name().into()), - AttributeId::DisplayName => Some(self.display_name().into()), - AttributeId::Description => self.description().map(|description| description.into()), + AttributeId::NodeId => Some(self.node_id().clone().into()), + AttributeId::BrowseName => Some(self.browse_name().clone().into()), + AttributeId::DisplayName => Some(self.display_name().clone().into()), + AttributeId::Description => self + .description() + .cloned() + .map(|description| description.into()), AttributeId::WriteMask => self.write_mask.map(|v| v.into()), AttributeId::UserWriteMask => self.user_write_mask.map(|v| v.into()), _ => None, diff --git a/lib/src/server/address_space/data_type.rs b/lib/src/server/address_space/data_type.rs index 8e7cf45c2..9055627f3 100644 --- a/lib/src/server/address_space/data_type.rs +++ b/lib/src/server/address_space/data_type.rs @@ -4,17 +4,37 @@ //! Contains the implementation of `Method` and `MethodBuilder`. -use crate::types::service_types::DataTypeAttributes; +use crate::{ + types::service_types::DataTypeAttributes, + types::{ + AttributeId, AttributesMask, DataValue, NumericRange, StatusCode, TimestampsToReturn, + Variant, + }, +}; use super::{base::Base, node::Node, node::NodeBase}; node_builder_impl!(DataTypeBuilder, DataType); +node_builder_impl_subtype!(DataTypeBuilder); + +impl DataTypeBuilder { + pub fn is_abstract(mut self, is_abstract: bool) -> Self { + self.node.set_is_abstract(is_abstract); + self + } + + pub fn write_mask(mut self, write_mask: WriteMask) -> Self { + self.node.set_write_mask(write_mask); + self + } +} /// A `DataType` is a type of node within the `AddressSpace`. #[derive(Debug)] pub struct DataType { - base: Base, - is_abstract: bool, + pub(super) base: Base, + pub(super) is_abstract: bool, + // TODO: Handle DataTypeDefinition. Requires codegen extensions } impl Default for DataType { diff --git a/lib/src/server/address_space/generic.rs b/lib/src/server/address_space/generic.rs new file mode 100644 index 000000000..9b5a8a7e2 --- /dev/null +++ b/lib/src/server/address_space/generic.rs @@ -0,0 +1,221 @@ +use crate::types::{ + AddNodeAttributes, AttributeId, DataValue, DateTime, NodeClass, NodeId, QualifiedName, + StatusCode, +}; + +use super::{ + base::Base, DataType, EventNotifier, Method, NodeType, Object, ObjectType, ReferenceType, + Variable, VariableType, View, +}; + +const fn mask(attribute: AttributeId) -> u32 { + match attribute { + AttributeId::AccessLevel => 0, + AttributeId::ArrayDimensions => 1, + AttributeId::ContainsNoLoops => 3, + AttributeId::DataType => 4, + AttributeId::Description => 5, + AttributeId::DisplayName => 6, + AttributeId::EventNotifier => 7, + AttributeId::Executable => 8, + AttributeId::Historizing => 9, + AttributeId::InverseName => 10, + AttributeId::IsAbstract => 11, + AttributeId::MinimumSamplingInterval => 12, + AttributeId::Symmetric => 15, + AttributeId::UserAccessLevel => 16, + AttributeId::UserExecutable => 17, + AttributeId::UserWriteMask => 18, + AttributeId::ValueRank => 19, + AttributeId::WriteMask => 20, + AttributeId::Value => 21, + _ => 31, + } +} + +macro_rules! masked_or_default { + ($attr:expr, $attrs:expr, $field:ident) => { + if (1 << mask($attr)) & $attrs.specified_attributes != 0 { + $attrs.$field + } else { + Default::default() + } + }; +} + +macro_rules! masked_or_default_opt { + ($attr:expr, $attrs:expr, $field:ident) => { + if (1 << mask($attr)) & $attrs.specified_attributes != 0 { + Some($attrs.$field) + } else { + Default::default() + } + }; +} + +macro_rules! base { + ($attrs:expr, $node_id:expr, $node_class:expr, $browse_name:expr) => { + Base { + node_id: $node_id, + node_class: $node_class, + browse_name: $browse_name, + display_name: masked_or_default!(AttributeId::DisplayName, $attrs, display_name), + description: masked_or_default_opt!(AttributeId::Description, $attrs, description), + write_mask: masked_or_default_opt!(AttributeId::WriteMask, $attrs, write_mask), + user_write_mask: masked_or_default_opt!( + AttributeId::UserWriteMask, + $attrs, + user_write_mask + ), + } + }; +} + +pub fn new_node_from_attributes( + node_id: NodeId, + browse_name: QualifiedName, + node_class: NodeClass, + node_attributes: AddNodeAttributes, +) -> Result { + let now = DateTime::now(); + let r = match node_attributes { + AddNodeAttributes::Object(a) => NodeType::Object(Box::new(Object { + base: base!(a, node_id, node_class, browse_name), + event_notifier: if (1 << mask(AttributeId::EventNotifier)) & a.specified_attributes != 0 + { + EventNotifier::from_bits(a.event_notifier) + .ok_or_else(|| StatusCode::BadNodeAttributesInvalid)? + } else { + EventNotifier::empty() + }, + })), + AddNodeAttributes::Variable(a) => NodeType::Variable(Box::new(Variable { + base: base!(a, node_id, node_class, browse_name), + data_type: masked_or_default!(AttributeId::DataType, a, data_type), + historizing: masked_or_default!(AttributeId::Historizing, a, historizing), + value_rank: masked_or_default!(AttributeId::ValueRank, a, value_rank), + value: if (1 << mask(AttributeId::Value)) & a.specified_attributes != 0 { + DataValue { + source_timestamp: Some(now), + server_timestamp: Some(now), + value: Some(a.value), + status: Some(StatusCode::Good), + ..Default::default() + } + } else { + DataValue::default() + }, + access_level: masked_or_default!(AttributeId::AccessLevel, a, access_level), + user_access_level: masked_or_default!( + AttributeId::UserAccessLevel, + a, + user_access_level + ), + array_dimensions: masked_or_default!(AttributeId::ArrayDimensions, a, array_dimensions), + minimum_sampling_interval: masked_or_default_opt!( + AttributeId::MinimumSamplingInterval, + a, + minimum_sampling_interval + ), + })), + AddNodeAttributes::Method(a) => NodeType::Method(Box::new(Method { + base: base!(a, node_id, node_class, browse_name), + executable: masked_or_default!(AttributeId::Executable, a, executable), + user_executable: masked_or_default!(AttributeId::UserExecutable, a, user_executable), + })), + AddNodeAttributes::ObjectType(a) => NodeType::ObjectType(Box::new(ObjectType { + base: base!(a, node_id, node_class, browse_name), + is_abstract: masked_or_default!(AttributeId::IsAbstract, a, is_abstract), + })), + AddNodeAttributes::VariableType(a) => NodeType::VariableType(Box::new(VariableType { + base: base!(a, node_id, node_class, browse_name), + data_type: masked_or_default!(AttributeId::DataType, a, data_type), + is_abstract: masked_or_default!(AttributeId::IsAbstract, a, is_abstract), + value_rank: masked_or_default!(AttributeId::ValueRank, a, value_rank), + value: if (1 << mask(AttributeId::Value)) & a.specified_attributes != 0 { + Some(DataValue { + source_timestamp: Some(now), + server_timestamp: Some(now), + value: Some(a.value), + status: Some(StatusCode::Good), + ..Default::default() + }) + } else { + None + }, + array_dimensions: masked_or_default!(AttributeId::ArrayDimensions, a, array_dimensions), + })), + AddNodeAttributes::ReferenceType(a) => NodeType::ReferenceType(Box::new(ReferenceType { + base: base!(a, node_id, node_class, browse_name), + symmetric: masked_or_default!(AttributeId::Symmetric, a, symmetric), + is_abstract: masked_or_default!(AttributeId::IsAbstract, a, is_abstract), + inverse_name: masked_or_default_opt!(AttributeId::InverseName, a, inverse_name), + })), + AddNodeAttributes::DataType(a) => NodeType::DataType(Box::new(DataType { + base: base!(a, node_id, node_class, browse_name), + is_abstract: masked_or_default!(AttributeId::IsAbstract, a, is_abstract), + })), + AddNodeAttributes::View(a) => NodeType::View(Box::new(View { + base: base!(a, node_id, node_class, browse_name), + event_notifier: if (1 << mask(AttributeId::EventNotifier)) & a.specified_attributes != 0 + { + EventNotifier::from_bits(a.event_notifier) + .ok_or_else(|| StatusCode::BadNodeAttributesInvalid)? + } else { + EventNotifier::empty() + }, + contains_no_loops: masked_or_default!( + AttributeId::ContainsNoLoops, + a, + contains_no_loops + ), + })), + AddNodeAttributes::Generic(a) => { + let base = base!(a, node_id, node_class, browse_name); + let mut node = match node_class { + NodeClass::Unspecified => return Err(StatusCode::BadNodeClassInvalid), + NodeClass::Object => NodeType::Object(Box::new(Object { + base, + ..Default::default() + })), + NodeClass::Variable => NodeType::Variable(Box::new(Variable { + base, + ..Default::default() + })), + NodeClass::Method => NodeType::Method(Box::new(Method { + base, + ..Default::default() + })), + NodeClass::ObjectType => NodeType::ObjectType(Box::new(ObjectType { + base, + ..Default::default() + })), + NodeClass::VariableType => NodeType::VariableType(Box::new(VariableType { + base, + ..Default::default() + })), + NodeClass::ReferenceType => NodeType::ReferenceType(Box::new(ReferenceType { + base, + ..Default::default() + })), + NodeClass::DataType => NodeType::DataType(Box::new(DataType { + base, + ..Default::default() + })), + NodeClass::View => NodeType::View(Box::new(View { + base, + ..Default::default() + })), + }; + let node_mut = node.as_mut_node(); + for attr in a.attribute_values.into_iter().flatten() { + let id = AttributeId::from_u32(attr.attribute_id) + .map_err(|_| StatusCode::BadAttributeIdInvalid)?; + node_mut.set_attribute(id, attr.value)?; + } + node + } + AddNodeAttributes::None => return Err(StatusCode::BadNodeAttributesInvalid), + }; + Ok(r) +} diff --git a/lib/src/server/address_space/method.rs b/lib/src/server/address_space/method.rs index 410377533..9ad9c4955 100644 --- a/lib/src/server/address_space/method.rs +++ b/lib/src/server/address_space/method.rs @@ -4,20 +4,20 @@ //! Contains the implementation of `Method` and `MethodBuilder`. -use std::sync::Arc; - -use crate::sync::*; -use crate::types::service_types::{Argument, MethodAttributes}; +use crate::{ + types::service_types::{Argument, MethodAttributes}, + types::{ + AttributeId, AttributesMask, DataTypeId, DataValue, ExtensionObject, NumericRange, + ObjectId, StatusCode, TimestampsToReturn, VariableTypeId, Variant, VariantTypeId, + }, +}; use super::{ - address_space::MethodCallback, base::Base, node::{Node, NodeBase}, variable::VariableBuilder, }; -use crate::server::session::SessionManager; - node_builder_impl!(MethodBuilder, Method); node_builder_impl_component_of!(MethodBuilder); node_builder_impl_generates_event!(MethodBuilder); @@ -25,20 +25,40 @@ node_builder_impl_generates_event!(MethodBuilder); impl MethodBuilder { /// Specify output arguments from the method. This will create an OutputArguments /// variable child of the method which describes the out parameters. - pub fn output_args(self, address_space: &mut AddressSpace, arguments: &[Argument]) -> Self { - self.insert_args("OutputArguments", address_space, arguments); + pub fn output_args( + self, + address_space: &mut AddressSpace, + node_id: &NodeId, + arguments: &[Argument], + ) -> Self { + self.insert_args(node_id, "OutputArguments", address_space, arguments); self } /// Specify input arguments to the method. This will create an InputArguments /// variable child of the method which describes the in parameters. - pub fn input_args(self, address_space: &mut AddressSpace, arguments: &[Argument]) -> Self { - self.insert_args("InputArguments", address_space, arguments); + pub fn input_args( + self, + address_space: &mut AddressSpace, + node_id: &NodeId, + arguments: &[Argument], + ) -> Self { + self.insert_args(node_id, "InputArguments", address_space, arguments); + self + } + + pub fn executable(mut self, executable: bool) -> Self { + self.node.set_executable(executable); + self + } + + pub fn user_executable(mut self, executable: bool) -> Self { + self.node.set_user_executable(executable); self } - pub fn callback(mut self, callback: MethodCallback) -> Self { - self.node.set_callback(callback); + pub fn write_mask(mut self, write_mask: WriteMask) -> Self { + self.node.set_write_mask(write_mask); self } @@ -57,14 +77,14 @@ impl MethodBuilder { fn insert_args( &self, + node_id: &NodeId, args_name: &str, address_space: &mut AddressSpace, arguments: &[Argument], ) { let fn_node_id = self.node.node_id(); - let args_id = NodeId::next_numeric(fn_node_id.namespace); let args_value = Self::args_to_variant(arguments); - VariableBuilder::new(&args_id, args_name, args_name) + VariableBuilder::new(node_id, args_name, args_name) .property_of(fn_node_id) .has_type_definition(VariableTypeId::PropertyType) .data_type(DataTypeId::Argument) @@ -79,11 +99,9 @@ impl MethodBuilder { #[derive(Derivative)] #[derivative(Debug)] pub struct Method { - base: Base, - executable: bool, - user_executable: bool, - #[derivative(Debug = "ignore")] - callback: Option, + pub(super) base: Base, + pub(super) executable: bool, + pub(super) user_executable: bool, } impl Default for Method { @@ -92,7 +110,6 @@ impl Default for Method { base: Base::new(NodeClass::Method, &NodeId::null(), "", ""), executable: false, user_executable: false, - callback: None, } } } @@ -164,7 +181,6 @@ impl Method { base: Base::new(NodeClass::Method, node_id, browse_name, display_name), executable, user_executable, - callback: None, } } @@ -205,7 +221,7 @@ impl Method { } pub fn is_valid(&self) -> bool { - self.has_callback() && self.base.is_valid() + self.base.is_valid() } pub fn executable(&self) -> bool { @@ -226,30 +242,4 @@ impl Method { pub fn set_user_executable(&mut self, user_executable: bool) { self.user_executable = user_executable; } - - pub fn set_callback(&mut self, callback: MethodCallback) { - self.callback = Some(callback); - } - - pub fn has_callback(&self) -> bool { - self.callback.is_some() - } - - pub fn call( - &mut self, - session_id: &NodeId, - session_manager: Arc>, - request: &CallMethodRequest, - ) -> Result { - if let Some(ref mut callback) = self.callback { - // Call the handler - callback.call(session_id, session_manager, request) - } else { - error!( - "Method call to {} has no handler, treating as invalid", - self.node_id() - ); - Err(StatusCode::BadMethodInvalid) - } - } } diff --git a/lib/src/server/address_space/method_impls.rs b/lib/src/server/address_space/method_impls.rs deleted file mode 100644 index 1acc6aa51..000000000 --- a/lib/src/server/address_space/method_impls.rs +++ /dev/null @@ -1,183 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::sync::Arc; - -use crate::sync::*; -use crate::types::{ - service_types::{CallMethodRequest, CallMethodResult}, - status_code::StatusCode, - *, -}; - -use crate::server::{callbacks::Method, session::SessionManager}; - -/// Count the number of provided input arguments, comparing them to the expected number. -fn ensure_input_argument_count( - request: &CallMethodRequest, - expected: usize, -) -> Result<(), StatusCode> { - if let Some(ref input_arguments) = request.input_arguments { - let actual = input_arguments.len(); - if actual == expected { - Ok(()) - } else if actual < expected { - debug!("Method call fails BadArgumentsMissing"); - Err(StatusCode::BadArgumentsMissing) - } else { - debug!("Method call fails BadTooManyArguments"); - Err(StatusCode::BadTooManyArguments) - } - } else if expected == 0 { - Ok(()) - } else { - debug!("Method call fails BadArgumentsMissing"); - Err(StatusCode::BadArgumentsMissing) - } -} - -/// Gets the input argument value, expecting it to the specified variant type. If it fails, -/// it returns an error -macro_rules! get_input_argument { - ( $request:expr, $index: expr, $variant_type: ident ) => {{ - let input_arguments = $request.input_arguments.as_ref().unwrap(); - let arg = input_arguments.get($index).unwrap(); - if let Variant::$variant_type(value) = arg { - Ok(value) - } else { - // Argument is not the expected type - Err(StatusCode::BadInvalidArgument) - } - }}; -} - -/// Search all sessions in the session map except the specified one for a matching subscription id -fn subscription_exists_on_other_session( - this_session_id: &NodeId, - session_manager: Arc>, - subscription_id: u32, -) -> bool { - // Check if the subscription exists on another session - let session_manager = trace_read_lock!(session_manager); - session_manager.sessions.iter().any(|(_, s)| { - let s = trace_read_lock!(s); - s.session_id() != this_session_id && s.subscriptions().contains(subscription_id) - }) -} - -/// This is the handler for Server.ResendData method call. -pub struct ServerResendDataMethod; - -impl Method for ServerResendDataMethod { - fn call( - &mut self, - session_id: &NodeId, - session_manager: Arc>, - request: &CallMethodRequest, - ) -> Result { - debug!("Method handler for ResendData"); - - // OPC UA part 5 - ResendData([in] UInt32 subscriptionId); - // - // subscriptionId - Identifier of the subscription to refresh - // - // Return codes - // - // BadSubscriptionIdInvalid - // BadUserAccessDenied - - ensure_input_argument_count(request, 1)?; - - let subscription_id = get_input_argument!(request, 0, UInt32)?; - - { - let session_manager = trace_read_lock!(session_manager); - if let Some(session) = session_manager.find_session_by_id(session_id) { - let mut session = trace_write_lock!(session); - if let Some(subscription) = session.subscriptions_mut().get_mut(*subscription_id) { - subscription.set_resend_data(); - return Ok(CallMethodResult { - status_code: StatusCode::Good, - input_argument_results: Some(vec![StatusCode::Good]), - input_argument_diagnostic_infos: None, - output_arguments: None, - }); - }; - } else { - return Err(StatusCode::BadSessionIdInvalid); - } - } - - if subscription_exists_on_other_session(session_id, session_manager, *subscription_id) { - Err(StatusCode::BadUserAccessDenied) - } else { - Err(StatusCode::BadSubscriptionIdInvalid) - } - } -} - -/// This is the handler for the Server.GetMonitoredItems method call. -pub struct ServerGetMonitoredItemsMethod; - -impl Method for ServerGetMonitoredItemsMethod { - fn call( - &mut self, - session_id: &NodeId, - session_manager: Arc>, - request: &CallMethodRequest, - ) -> Result { - debug!("Method handler for GetMonitoredItems"); - - // OPC UA part 5 - GetMonitoredItems([in] UInt32 subscriptionId, [out] UInt32[] serverHandles, [out] UInt32[] clientHandles); - // - // subscriptionId - Identifier of the subscription - // serverHandles - Array of serverHandles for all MonitoredItems of the Subscription identified by subscriptionId - // clientHandles - Array of clientHandles for all MonitoredItems of the Subscription identified by subscriptionId - // - // Return codes - // - // BadSubscriptionIdInvalid - // BadUserAccessDenied - - ensure_input_argument_count(request, 1)?; - - let subscription_id = get_input_argument!(request, 0, UInt32)?; - - // Check for subscription on the session supplied - { - let session_manager = trace_read_lock!(session_manager); - if let Some(session) = session_manager.find_session_by_id(session_id) { - let session = trace_read_lock!(session); - if let Some(subscription) = - session.subscriptions().subscriptions().get(subscription_id) - { - // Response - // serverHandles: Vec - // clientHandles: Vec - let (server_handles, client_handles) = subscription.get_handles(); - - let server_handles = Variant::from(server_handles); - let client_handles = Variant::from(client_handles); - let output_arguments = vec![server_handles, client_handles]; - - return Ok(CallMethodResult { - status_code: StatusCode::Good, - input_argument_results: Some(vec![StatusCode::Good]), - input_argument_diagnostic_infos: None, - output_arguments: Some(output_arguments), - }); - }; - } else { - return Err(StatusCode::BadSessionIdInvalid); - } - } - if subscription_exists_on_other_session(session_id, session_manager, *subscription_id) { - debug!("Method handler for GetMonitoredItems returns BadUserAccessDenied"); - Err(StatusCode::BadUserAccessDenied) - } else { - debug!("Method handler for GetMonitoredItems returns BadSubscriptionIdInvalid"); - Err(StatusCode::BadSubscriptionIdInvalid) - } - } -} diff --git a/lib/src/server/address_space/mod.rs b/lib/src/server/address_space/mod.rs index e42943a2b..8d8c1dbaa 100644 --- a/lib/src/server/address_space/mod.rs +++ b/lib/src/server/address_space/mod.rs @@ -1,134 +1,34 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Provides functionality to create an address space, find nodes, add nodes, change attributes -//! and values on nodes. - -use std::{result::Result, sync::Arc}; - -use crate::sync::*; -use crate::types::status_code::StatusCode; -use crate::types::{ - AttributeId, DataValue, NodeId, NumericRange, QualifiedName, TimestampsToReturn, -}; - -use super::callbacks::{AttributeGetter, AttributeSetter}; - -pub use self::address_space::AddressSpace; - -/// An implementation of attribute getter that can be easily constructed from a mutable function -pub struct AttrFnGetter -where - F: FnMut( - &NodeId, - TimestampsToReturn, - AttributeId, - NumericRange, - &QualifiedName, - f64, - ) -> Result, StatusCode> - + Send, -{ - getter: F, -} - -impl AttributeGetter for AttrFnGetter -where - F: FnMut( - &NodeId, - TimestampsToReturn, - AttributeId, - NumericRange, - &QualifiedName, - f64, - ) -> Result, StatusCode> - + Send, -{ - fn get( - &mut self, - node_id: &NodeId, - timestamps_to_return: TimestampsToReturn, - attribute_id: AttributeId, - index_range: NumericRange, - data_encoding: &QualifiedName, - max_age: f64, - ) -> Result, StatusCode> { - (self.getter)( - node_id, - timestamps_to_return, - attribute_id, - index_range, - data_encoding, - max_age, - ) - } -} - -impl AttrFnGetter -where - F: FnMut( - &NodeId, - TimestampsToReturn, - AttributeId, - NumericRange, - &QualifiedName, - f64, - ) -> Result, StatusCode> - + Send, -{ - pub fn new(getter: F) -> AttrFnGetter { - AttrFnGetter { getter } - } - - pub fn new_boxed(getter: F) -> Arc>> { - Arc::new(Mutex::new(Self::new(getter))) - } -} - -/// An implementation of attribute setter that can be easily constructed using a mutable function -pub struct AttrFnSetter -where - F: FnMut(&NodeId, AttributeId, NumericRange, DataValue) -> Result<(), StatusCode> + Send, -{ - setter: F, -} - -impl AttributeSetter for AttrFnSetter -where - F: FnMut(&NodeId, AttributeId, NumericRange, DataValue) -> Result<(), StatusCode> + Send, -{ - fn set( - &mut self, - node_id: &NodeId, - attribute_id: AttributeId, - index_range: NumericRange, - data_value: DataValue, - ) -> Result<(), StatusCode> { - (self.setter)(node_id, attribute_id, index_range, data_value) - } -} - -impl AttrFnSetter -where - F: FnMut(&NodeId, AttributeId, NumericRange, DataValue) -> Result<(), StatusCode> + Send, -{ - pub fn new(setter: F) -> AttrFnSetter { - AttrFnSetter { setter } - } - - pub fn new_boxed(setter: F) -> Arc>> { - Arc::new(Mutex::new(Self::new(setter))) - } +mod address_space; +mod generic; +mod utils; + +pub use address_space::{AddressSpace, Reference, ReferenceDirection, ReferenceRef}; +pub use data_type::{DataType, DataTypeBuilder}; +pub use generic::new_node_from_attributes; +pub use method::{Method, MethodBuilder}; +pub use node::{HasNodeId, Node, NodeBase, NodeType}; +pub use object::{Object, ObjectBuilder}; +pub use object_type::{ObjectType, ObjectTypeBuilder}; +pub use reference_type::{ReferenceType, ReferenceTypeBuilder}; +pub use utils::*; +pub use variable::{Variable, VariableBuilder}; +pub use variable_type::{VariableType, VariableTypeBuilder}; +pub use view::{View, ViewBuilder}; + +// TODO: Remove, this is for compat with old generated code. +pub(crate) mod types { + pub use super::{ + AddressSpace, DataType, Method, NodeBase, Object, ObjectType, ReferenceDirection, + ReferenceType, Variable, VariableType, + }; } // A macro for creating builders. Builders can be used for more conveniently creating objects, // variables etc. macro_rules! node_builder_impl { ( $node_builder_ty:ident, $node_ty:ident ) => { - use $crate::server::address_space::{ - address_space::AddressSpace, references::ReferenceDirection, - }; + use $crate::server::address_space::{address_space::AddressSpace, ReferenceDirection}; + use $crate::types::{LocalizedText, NodeId, QualifiedName, ReferenceTypeId}; /// A builder for constructing a node of same name. This can be used as an easy way /// to create a node and the references it has to another node in a simple fashion. @@ -154,7 +54,7 @@ macro_rules! node_builder_impl { .display_name(display_name) } - pub fn get_node_id(&self) -> NodeId { + pub fn get_node_id(&self) -> &NodeId { self.node.node_id() } @@ -379,10 +279,8 @@ macro_rules! node_builder_impl_property_of { /// node has a base: Base macro_rules! node_base_impl { ( $node_struct:ident ) => { - use crate::{ - server::address_space::node::NodeType, - types::{status_code::StatusCode, *}, - }; + use crate::server::address_space::NodeType; + use crate::types::{NodeClass, WriteMask}; impl Into for $node_struct { fn into(self) -> NodeType { @@ -390,20 +288,20 @@ macro_rules! node_base_impl { } } - impl NodeBase for $node_struct { + impl crate::server::address_space::NodeBase for $node_struct { fn node_class(&self) -> NodeClass { self.base.node_class() } - fn node_id(&self) -> NodeId { + fn node_id(&self) -> &NodeId { self.base.node_id() } - fn browse_name(&self) -> QualifiedName { + fn browse_name(&self) -> &QualifiedName { self.base.browse_name() } - fn display_name(&self) -> LocalizedText { + fn display_name(&self) -> &LocalizedText { self.base.display_name() } @@ -411,7 +309,7 @@ macro_rules! node_base_impl { self.base.set_display_name(display_name); } - fn description(&self) -> Option { + fn description(&self) -> Option<&LocalizedText> { self.base.description() } @@ -438,25 +336,19 @@ macro_rules! node_base_impl { }; } -pub mod address_space; -pub mod base; -pub mod data_type; -pub mod method; -pub mod node; -pub mod object; -pub mod object_type; -pub mod reference_type; -pub mod references; -pub mod relative_path; -pub mod variable; -pub mod variable_type; -pub mod view; - -#[rustfmt::skip] -#[cfg(feature = "generated-address-space")] +mod base; +mod data_type; mod generated; -#[cfg(feature = "generated-address-space")] -mod method_impls; +mod method; +mod node; +mod object; +mod object_type; +mod reference_type; +mod variable; +mod variable_type; +mod view; + +pub use generated::populate_address_space; bitflags! { pub struct AccessLevel: u8 { @@ -491,18 +383,3 @@ bitflags! { const HISTORY_WRITE = 8; } } - -pub mod types { - pub use super::address_space::AddressSpace; - pub use super::data_type::{DataType, DataTypeBuilder}; - pub use super::method::{Method, MethodBuilder}; - pub use super::node::{NodeBase, NodeType}; - pub use super::object::{Object, ObjectBuilder}; - pub use super::object_type::{ObjectType, ObjectTypeBuilder}; - pub use super::reference_type::{ReferenceType, ReferenceTypeBuilder}; - pub use super::references::ReferenceDirection; - pub use super::variable::{Variable, VariableBuilder}; - pub use super::variable_type::{VariableType, VariableTypeBuilder}; - pub use super::view::{View, ViewBuilder}; - pub use super::{AttrFnGetter, AttrFnSetter}; -} diff --git a/lib/src/server/address_space/node.rs b/lib/src/server/address_space/node.rs index 4206436c6..4e3aa623f 100644 --- a/lib/src/server/address_space/node.rs +++ b/lib/src/server/address_space/node.rs @@ -7,9 +7,7 @@ use crate::types::{ NodeId, NumericRange, QualifiedName, TimestampsToReturn, Variant, WriteMask, }; -use super::types::{ - DataType, Method, Object, ObjectType, ReferenceType, Variable, VariableType, View, -}; +use super::{DataType, Method, Object, ObjectType, ReferenceType, Variable, VariableType, View}; /// A `NodeType` is an enumeration holding every kind of node which can be hosted within the `AddressSpace`. #[derive(Debug)] @@ -25,17 +23,17 @@ pub enum NodeType { } pub trait HasNodeId { - fn node_id(&self) -> NodeId; + fn node_id(&self) -> &NodeId; } impl HasNodeId for NodeType { - fn node_id(&self) -> NodeId { + fn node_id(&self) -> &NodeId { self.as_node().node_id() } } impl NodeType { - pub fn as_node(&self) -> &dyn Node { + pub fn as_node<'a>(&'a self) -> &'a (dyn Node + 'a) { match self { NodeType::Object(value) => value.as_ref(), NodeType::ObjectType(value) => value.as_ref(), @@ -83,18 +81,18 @@ pub trait NodeBase { fn node_class(&self) -> NodeClass; /// Returns the node's `NodeId` - fn node_id(&self) -> NodeId; + fn node_id(&self) -> &NodeId; /// Returns the node's browse name - fn browse_name(&self) -> QualifiedName; + fn browse_name(&self) -> &QualifiedName; /// Returns the node's display name - fn display_name(&self) -> LocalizedText; + fn display_name(&self) -> &LocalizedText; /// Sets the node's display name fn set_display_name(&mut self, display_name: LocalizedText); - fn description(&self) -> Option; + fn description(&self) -> Option<&LocalizedText>; fn set_description(&mut self, description: LocalizedText); diff --git a/lib/src/server/address_space/object.rs b/lib/src/server/address_space/object.rs index aaf758d02..e58eeae6b 100644 --- a/lib/src/server/address_space/object.rs +++ b/lib/src/server/address_space/object.rs @@ -4,7 +4,13 @@ //! Contains the implementation of `Object` and `ObjectBuilder`. -use crate::types::service_types::ObjectAttributes; +use crate::{ + types::service_types::ObjectAttributes, + types::{ + AttributeId, AttributesMask, DataValue, NumericRange, ObjectTypeId, StatusCode, + TimestampsToReturn, Variant, + }, +}; use super::{base::Base, node::Node, node::NodeBase, EventNotifier}; @@ -22,6 +28,11 @@ impl ObjectBuilder { self } + pub fn write_mask(mut self, write_mask: WriteMask) -> Self { + self.node.set_write_mask(write_mask); + self + } + pub fn has_type_definition(self, type_id: T) -> Self where T: Into, @@ -48,8 +59,8 @@ impl ObjectBuilder { /// An `Object` is a type of node within the `AddressSpace`. #[derive(Debug)] pub struct Object { - base: Base, - event_notifier: EventNotifier, + pub(super) base: Base, + pub(super) event_notifier: EventNotifier, } impl Default for Object { diff --git a/lib/src/server/address_space/object_type.rs b/lib/src/server/address_space/object_type.rs index 24006bbbd..3c4f28d6c 100644 --- a/lib/src/server/address_space/object_type.rs +++ b/lib/src/server/address_space/object_type.rs @@ -4,7 +4,13 @@ //! Contains the implementation of `ObjectType` and `ObjectTypeBuilder`. -use crate::types::service_types::ObjectTypeAttributes; +use crate::{ + types::service_types::ObjectTypeAttributes, + types::{ + AttributeId, AttributesMask, DataValue, NumericRange, StatusCode, TimestampsToReturn, + Variant, + }, +}; use super::{base::Base, node::Node, node::NodeBase}; @@ -20,13 +26,18 @@ impl ObjectTypeBuilder { self.node.set_is_abstract(is_abstract); self } + + pub fn write_mask(mut self, write_mask: WriteMask) -> Self { + self.node.set_write_mask(write_mask); + self + } } /// An `ObjectType` is a type of node within the `AddressSpace`. #[derive(Debug)] pub struct ObjectType { - base: Base, - is_abstract: bool, + pub(super) base: Base, + pub(super) is_abstract: bool, } impl Default for ObjectType { diff --git a/lib/src/server/address_space/reference_type.rs b/lib/src/server/address_space/reference_type.rs index a8ba55ee4..bd0b8ce27 100644 --- a/lib/src/server/address_space/reference_type.rs +++ b/lib/src/server/address_space/reference_type.rs @@ -4,26 +4,54 @@ //! Contains the implementation of `ReferenceType` and `ReferenceTypeBuilder`. -use crate::types::service_types::ReferenceTypeAttributes; +use crate::{ + types::service_types::ReferenceTypeAttributes, + types::{ + AttributeId, AttributesMask, DataValue, NumericRange, StatusCode, TimestampsToReturn, + Variant, + }, +}; use super::{base::Base, node::Node, node::NodeBase}; node_builder_impl!(ReferenceTypeBuilder, ReferenceType); node_builder_impl_subtype!(ReferenceTypeBuilder); +impl ReferenceTypeBuilder { + pub fn is_abstract(mut self, is_abstract: bool) -> Self { + self.node.set_is_abstract(is_abstract); + self + } + + pub fn write_mask(mut self, write_mask: WriteMask) -> Self { + self.node.set_write_mask(write_mask); + self + } + + pub fn symmetric(mut self, symmetric: bool) -> Self { + self.node.set_symmetric(symmetric); + self + } + + pub fn inverse_name(mut self, inverse_name: impl Into) -> Self { + self.node.set_inverse_name(inverse_name.into()); + self + } +} + /// A `ReferenceType` is a type of node within the `AddressSpace`. #[derive(Debug)] pub struct ReferenceType { - base: Base, - symmetric: bool, - is_abstract: bool, - inverse_name: Option, + pub(super) base: Base, + pub(super) symmetric: bool, + pub(super) is_abstract: bool, + pub(super) inverse_name: Option, } impl Default for ReferenceType { fn default() -> Self { Self { - base: Base::new(NodeClass::VariableType, &NodeId::null(), "", ""), + base: Base::new(NodeClass::ReferenceType, &NodeId::null(), "", ""), symmetric: false, is_abstract: false, inverse_name: None, diff --git a/lib/src/server/address_space/references.rs b/lib/src/server/address_space/references.rs deleted file mode 100644 index ce9466c7a..000000000 --- a/lib/src/server/address_space/references.rs +++ /dev/null @@ -1,502 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::collections::{HashMap, HashSet}; - -use crate::types::*; - -/// The `NodeId` is the target node. The reference is held in a list by the source node. -/// The target node does not need to exist. -#[derive(PartialEq, Eq, Clone, Debug, Hash)] -pub struct Reference { - pub reference_type: NodeId, - pub target_node: NodeId, -} - -impl Reference { - pub fn new(reference_type: T, target_node: NodeId) -> Reference - where - T: Into, - { - Reference { - reference_type: reference_type.into(), - target_node, - } - } -} - -#[derive(Debug, Copy, Clone)] -pub enum ReferenceDirection { - Forward, - Inverse, -} - -pub struct References { - /// The references map contains all forward references, i.e. the key is the node that has - /// a reference to one or more other nodes. - references_map: HashMap>, - /// The referenced by map allows a reverse lookup, to find what nodes this node is referenced - /// by. It is not the same as an inverse reference. A node may be referenced one or more - /// times by the other node. - referenced_by_map: HashMap>, -} - -impl Default for References { - fn default() -> Self { - Self { - references_map: HashMap::with_capacity(2000), - referenced_by_map: HashMap::with_capacity(2000), - } - } -} - -impl References { - /// Inserts a single reference into the map. - pub fn insert( - &mut self, - source_node: &NodeId, - references: &[(&NodeId, &T, ReferenceDirection)], - ) where - T: Into + Clone, - { - references.iter().for_each(|r| { - // An inverse reference will flip the nodes around - match r.2 { - ReferenceDirection::Forward => self.insert_reference(source_node, r.0, r.1), - ReferenceDirection::Inverse => self.insert_reference(r.0, source_node, r.1), - }; - }); - } - - /// For testing purposes, tests if the node exists anywhere in either direction. This test - /// just scans everything, looking for any mention of the node. Useful for tests which delete - /// nodes and expect them to be completely gone. - #[cfg(test)] - pub fn reference_to_node_exists(&self, node_id: &NodeId) -> bool { - if self.referenced_by_map.contains_key(node_id) { - debug!("Node {} is a key in references_to_map", node_id); - true - } else if self.references_map.contains_key(node_id) { - debug!("Node {} is a key in references_from_map", node_id); - true - } else if self - .references_map - .iter() - .find(|(k, v)| { - if let Some(r) = v.iter().find(|r| r.target_node == *node_id) { - debug!( - "Node {} is a value in references_from_map[{}, reference = {:?}", - node_id, k, r - ); - true - } else { - false - } - }) - .is_some() - { - true - } else if self - .referenced_by_map - .iter() - .find(|(k, v)| { - if v.contains(node_id) { - debug!( - "Node {} is a value in referenced_by_map, key {}", - node_id, k - ); - true - } else { - false - } - }) - .is_some() - { - true - } else { - false - } - } - - pub fn insert_reference( - &mut self, - source_node: &NodeId, - target_node: &NodeId, - reference_type: &T, - ) where - T: Into + Clone, - { - if source_node == target_node { - panic!( - "Node id from == node id to {}, self reference is not allowed", - source_node - ); - } - - let reference_type: NodeId = reference_type.clone().into(); - let reference = Reference::new(reference_type, target_node.clone()); - - if let Some(ref mut references) = self.references_map.get_mut(source_node) { - // Duplicates are possible from the machine generated code, so skip dupes - if !references.contains(&reference) { - references.push(reference); - } - } else { - // Some nodes will have more than one reference, so save some reallocs by reserving - // space for some more. - let mut references = Vec::with_capacity(8); - references.push(reference); - self.references_map.insert(source_node.clone(), references); - } - - // Add a reverse lookup reference - if let Some(ref mut lookup_set) = self.referenced_by_map.get_mut(target_node) { - lookup_set.insert(source_node.clone()); - } else { - let mut lookup_set = HashSet::new(); - lookup_set.insert(source_node.clone()); - self.referenced_by_map - .insert(target_node.clone(), lookup_set); - } - } - - /// Inserts references into the map. - pub fn insert_references(&mut self, references: &[(&NodeId, &NodeId, &T)]) - where - T: Into + Clone, - { - references.iter().for_each(|r| { - self.insert_reference(r.0, r.1, r.2); - }); - } - - fn remove_node_from_referenced_nodes( - &mut self, - nodes_to_check: HashSet, - node_to_remove: &NodeId, - ) { - nodes_to_check.into_iter().for_each(|node_to_check| { - // Removes any references that refer from the node to check back to the node to remove - let remove_entry = - if let Some(ref mut references) = self.references_map.get_mut(&node_to_check) { - references.retain(|r| r.target_node != *node_to_remove); - references.is_empty() - } else { - false - }; - if remove_entry { - self.references_map.remove(&node_to_check); - } - // Remove lookup that refer from the node to check back to the node to remove - let remove_lookup_map = - if let Some(ref mut lookup_map) = self.referenced_by_map.get_mut(&node_to_check) { - lookup_map.remove(node_to_remove); - lookup_map.is_empty() - } else { - false - }; - if remove_lookup_map { - self.referenced_by_map.remove(&node_to_check); - } - }); - } - - /// Deletes a matching references between one node and the target node of the specified - /// reference type. The function returns true if the reference was found and deleted. - pub fn delete_reference( - &mut self, - source_node: &NodeId, - target_node: &NodeId, - reference_type: T, - ) -> bool - where - T: Into, - { - let reference_type = reference_type.into(); - - let mut deleted = false; - let mut remove_entry = false; - // Remove the source node reference - if let Some(references) = self.references_map.get_mut(source_node) { - // Make a set of all the nodes that this node references - let other_nodes_before = references - .iter() - .map(|r| r.target_node.clone()) - .collect::>(); - // Delete a reference - references.retain(|r| { - if r.reference_type == reference_type && r.target_node == *target_node { - deleted = true; - false - } else { - true - } - }); - if references.is_empty() { - remove_entry = true; - } - - // Make a set of all nodes that this node references (after removal) - let other_nodes_after = references - .iter() - .map(|r| r.target_node.clone()) - .collect::>(); - - // If nodes are no longer referenced, then the ones that were removed must also have their - // references changed. - let difference = other_nodes_before - .difference(&other_nodes_after) - .cloned() - .collect::>(); - if !difference.is_empty() { - self.remove_node_from_referenced_nodes(difference, source_node); - } - } - if remove_entry { - self.references_map.remove(source_node); - } - - deleted - } - - /// Deletes all references to the node. - pub fn delete_node_references(&mut self, source_node: &NodeId) -> bool { - let deleted_references = if let Some(references) = self.references_map.remove(source_node) { - // Deleted every reference from the node, and clean up the reverse lookup map - let nodes_referenced = references - .iter() - .map(|r| r.target_node.clone()) - .collect::>(); - self.remove_node_from_referenced_nodes(nodes_referenced, source_node); - true - } else { - false - }; - - let deleted_lookups = if let Some(lookup_map) = self.referenced_by_map.remove(source_node) { - self.remove_node_from_referenced_nodes(lookup_map, source_node); - true - } else { - false - }; - - deleted_references || deleted_lookups - } - - /// Test if a reference relationship exists between one node and another node - pub fn has_reference( - &self, - source_node: &NodeId, - target_node: &NodeId, - reference_type: T, - ) -> bool - where - T: Into, - { - if let Some(references) = self.references_map.get(source_node) { - let reference = Reference::new(reference_type.into(), target_node.clone()); - references.contains(&reference) - } else { - false - } - } - - /// Finds forward references from the node - pub fn find_references( - &self, - source_node: &NodeId, - reference_filter: Option<(T, bool)>, - ) -> Option> - where - T: Into + Clone, - { - if let Some(node_references) = self.references_map.get(source_node) { - let result = self.filter_references_by_type(node_references, &reference_filter); - if result.is_empty() { - None - } else { - Some(result) - } - } else { - None - } - } - - /// Returns inverse references for the target node, i.e if there are references where - /// `Reference.target_node` matches the supplied target node then return references - /// where `Reference.target_node` is the source node. - pub fn find_inverse_references( - &self, - target_node: &NodeId, - reference_filter: Option<(T, bool)>, - ) -> Option> - where - T: Into + Clone, - { - if let Some(lookup_map) = self.referenced_by_map.get(target_node) { - // Iterate all nodes that reference this node, collecting their references - let mut result = Vec::with_capacity(16); - lookup_map.iter().for_each(|source_node| { - if let Some(references) = self.references_map.get(source_node) { - let references = references - .iter() - .filter(|r| r.target_node == *target_node) - .map(|r| Reference { - reference_type: r.reference_type.clone(), - target_node: source_node.clone(), - }) - .collect::>(); - let mut references = - self.filter_references_by_type(&references, &reference_filter); - if !references.is_empty() { - result.append(&mut references); - } - } - }); - if result.is_empty() { - None - } else { - Some(result) - } - } else { - None - } - } - - fn filter_references_by_type( - &self, - references: &[Reference], - reference_filter: &Option<(T, bool)>, - ) -> Vec - where - T: Into + Clone, - { - match reference_filter { - None => references.to_owned(), - Some((reference_type_id, include_subtypes)) => { - let reference_type_id = reference_type_id.clone().into(); - references - .iter() - .filter(|r| { - self.reference_type_matches( - &reference_type_id, - &r.reference_type, - *include_subtypes, - ) - }) - .cloned() - .collect::>() - } - } - } - - /// Find references optionally to and/or from the specified node id. The browse direction - /// indicates the desired direction, or both. The reference filter indicates if only references - /// of a certain type (including sub types) should be fetched. - pub fn find_references_by_direction( - &self, - node: &NodeId, - browse_direction: BrowseDirection, - reference_filter: Option<(T, bool)>, - ) -> (Vec, usize) - where - T: Into + Clone, - { - let mut references = Vec::new(); - let inverse_ref_idx: usize; - match browse_direction { - BrowseDirection::Forward => { - if let Some(mut forward_references) = self.find_references(node, reference_filter) { - references.append(&mut forward_references); - } - inverse_ref_idx = references.len(); - } - BrowseDirection::Inverse => { - inverse_ref_idx = 0; - if let Some(mut inverse_references) = - self.find_inverse_references(node, reference_filter) - { - references.append(&mut inverse_references); - } - } - BrowseDirection::Both => { - let reference_filter: Option<(NodeId, bool)> = - reference_filter.map(|(reference_type, include_subtypes)| { - (reference_type.into(), include_subtypes) - }); - if let Some(mut forward_references) = - self.find_references(node, reference_filter.clone()) - { - references.append(&mut forward_references); - } - inverse_ref_idx = references.len(); - if let Some(mut inverse_references) = - self.find_inverse_references(node, reference_filter) - { - references.append(&mut inverse_references); - } - } - BrowseDirection::Invalid => { - error!("BrowseDirection::Invalid passed to find_references_by_direction"); - inverse_ref_idx = 0; - } - } - (references, inverse_ref_idx) - } - - /// Test if a reference type matches another reference type which is potentially a subtype. - /// If `include_subtypes` is set to true, the function will test if the subttype - /// for a match. - pub fn reference_type_matches( - &self, - ref_type: &NodeId, - ref_subtype: &NodeId, - include_subtypes: bool, - ) -> bool { - if ref_type == ref_subtype { - true - } else if include_subtypes { - let has_subtype: NodeId = ReferenceTypeId::HasSubtype.into(); - - let mut stack = Vec::with_capacity(20); - stack.push(ref_type.clone()); - - // Search every type and subtype until exhausted - let mut found = false; - while let Some(current) = stack.pop() { - // Get all references to subtypes - if *ref_subtype == current { - found = true; - break; - } else if let Some(references) = self.references_map.get(¤t) { - let mut subtypes = references - .iter() - .filter(|r| r.reference_type == has_subtype) - .map(|r| r.target_node.clone()) - .collect::>(); - if subtypes.contains(ref_subtype) { - found = true; - break; - } - stack.append(&mut subtypes); - } - } - found - } else { - false - } - } - - pub fn get_type_id(&self, node: &NodeId) -> Option { - if let Some(references) = self.references_map.get(node) { - let has_type_definition_id = ReferenceTypeId::HasTypeDefinition.into(); - references - .iter() - .find(|r| r.reference_type == has_type_definition_id) - .map(|reference| reference.target_node.clone()) - } else { - None - } - } -} diff --git a/lib/src/server/address_space/relative_path.rs b/lib/src/server/address_space/relative_path.rs deleted file mode 100644 index b01383b71..000000000 --- a/lib/src/server/address_space/relative_path.rs +++ /dev/null @@ -1,176 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::collections::HashSet; - -use crate::types::{ - node_id::NodeId, - service_types::{RelativePath, RelativePathElement}, - status_code::StatusCode, - QualifiedName, -}; - -use super::{node::NodeType, AddressSpace}; - -/// Given a browse path consisting of browse names, walk nodes from the root until we find a single node (or not). -/// This function is a simplified use case for event filters and such like where a browse path -/// is defined as an array and doesn't need to be parsed out of a relative path. All nodes in the -/// path must be objects or variables. -pub(crate) fn find_node_from_browse_path<'a>( - address_space: &'a AddressSpace, - parent_node_id: &NodeId, - browse_path: &[QualifiedName], -) -> Result<&'a NodeType, StatusCode> { - if browse_path.is_empty() { - Err(StatusCode::BadNotFound) - } else { - // Each instance declaration in the path shall be an object or variable node. The final node in the - // path may be an object node; however, object nodes are only available for Events which are - // visible in the server's address space - let mut parent_node_id = parent_node_id.clone(); - for browse_name in browse_path { - if let Some(child_nodes) = address_space.find_hierarchical_references(&parent_node_id) { - let found_node_id = child_nodes.iter().find(|node_id| { - if let Some(node) = address_space.find_node(node_id) { - if node.as_node().browse_name() == *browse_name { - // Check that the node is an Object or Variable - matches!(node, NodeType::Object(_) | NodeType::Variable(_)) - } else { - false - } - } else { - false - } - }); - if let Some(found_node_id) = found_node_id { - parent_node_id = found_node_id.clone(); - } else { - //debug!( - // "Cannot find node under {} with browse_path of {:?}/1", - // parent_node_id, browse_path - //); - return Err(StatusCode::BadNotFound); - } - } else { - //debug!( - // "Cannot find node under {} with browse_path of {:?}/2", - // parent_node_id, browse_path - //); - return Err(StatusCode::BadNotFound); - } - } - Ok(address_space.find_node(&parent_node_id).unwrap()) - } -} - -/// Given a path as a string, find all the nodes that match against it. Note this function -/// uses a default path resolver based on common browse names. If you need something else use -/// `find_nodes_relative_path()` after you have created a relative path. -pub fn find_nodes_relative_path_simple( - address_space: &AddressSpace, - node_id: &NodeId, - relative_path: &str, -) -> Result, StatusCode> { - let relative_path = - RelativePath::from_str(relative_path, &RelativePathElement::default_node_resolver) - .map_err(|_| StatusCode::BadUnexpectedError)?; - find_nodes_relative_path(address_space, node_id, &relative_path) -} - -/// Given a `RelativePath`, find all the nodes that match against it. -pub fn find_nodes_relative_path( - address_space: &AddressSpace, - node_id: &NodeId, - relative_path: &RelativePath, -) -> Result, StatusCode> { - match address_space.find_node(node_id) { - None => { - trace!("find_nodes_relative_path cannot find node {}", node_id); - Err(StatusCode::BadNodeIdUnknown) - } - Some(_) => { - let elements = relative_path.elements.as_ref().unwrap(); - if elements.is_empty() { - warn!("find_nodes_relative_path elements are empty"); - Err(StatusCode::BadNothingToDo) - } else { - let mut matching_nodes = vec![node_id.clone()]; - let mut next_matching_nodes = Vec::with_capacity(100); - - // Traverse the relative path elements. Each time around, we will find the matching - // elements at that level using the next element - for element in elements.iter() { - if element.target_name.is_null() { - warn!("find_nodes_relative_path browse name is invalid (null)"); - return Err(StatusCode::BadBrowseNameInvalid); - } - - next_matching_nodes.clear(); - - matching_nodes.drain(..).for_each(|node_id| { - trace!("Following relative path on node {}", node_id); - // Iterate current set of nodes and put the results into next - if let Some(mut result) = - follow_relative_path(address_space, &node_id, element) - { - trace!(" Found matching nodes {:#?}", result); - next_matching_nodes.append(&mut result); - } else { - trace!(" Found no matching nodes"); - } - }); - if next_matching_nodes.is_empty() { - break; - } else { - matching_nodes.append(&mut next_matching_nodes); - } - } - - if matching_nodes.is_empty() { - warn!("find_nodes_relative_path bad no match"); - Err(StatusCode::BadNoMatch) - } else { - Ok(matching_nodes) - } - } - } - } -} - -fn follow_relative_path( - address_space: &AddressSpace, - node_id: &NodeId, - relative_path: &RelativePathElement, -) -> Option> { - let reference_filter = { - if let Ok(reference_type_id) = relative_path.reference_type_id.as_reference_type_id() { - Some((reference_type_id, relative_path.include_subtypes)) - } else { - None - } - }; - let references = if relative_path.is_inverse { - address_space.find_inverse_references(node_id, reference_filter) - } else { - address_space.find_references(node_id, reference_filter) - }; - if let Some(references) = references { - let compare_target_name = !relative_path.target_name.is_null(); - let mut result = Vec::with_capacity(references.len()); - for reference in &references { - if let Some(node) = address_space.find_node(&reference.target_node) { - let node = node.as_node(); - if !compare_target_name || node.browse_name() == relative_path.target_name { - result.push(reference.target_node.clone()); - } - } - } - // Vector may contain duplicates, so reduce those to a unique set - let result = result.into_iter().collect::>(); - // Now the result as a vec - Some(result.into_iter().collect()) - } else { - None - } -} diff --git a/lib/src/server/address_space/utils.rs b/lib/src/server/address_space/utils.rs new file mode 100644 index 000000000..7de88da7c --- /dev/null +++ b/lib/src/server/address_space/utils.rs @@ -0,0 +1,259 @@ +use crate::{ + server::node_manager::{ParsedReadValueId, ParsedWriteValue, RequestContext, TypeTree}, + types::{ + AttributeId, DataTypeId, DataValue, NumericRange, QualifiedName, StatusCode, + TimestampsToReturn, Variant, WriteMask, + }, +}; + +use super::{HasNodeId, NodeType, UserAccessLevel, Variable}; + +pub fn is_readable(context: &RequestContext, node: &NodeType) -> Result<(), StatusCode> { + if !user_access_level(context, node).contains(UserAccessLevel::CURRENT_READ) { + Err(StatusCode::BadUserAccessDenied) + } else { + Ok(()) + } +} + +pub fn is_writable( + context: &RequestContext, + node: &NodeType, + attribute_id: AttributeId, +) -> Result<(), StatusCode> { + if let (NodeType::Variable(_), AttributeId::Value) = (node, attribute_id) { + if !user_access_level(context, node).contains(UserAccessLevel::CURRENT_WRITE) { + return Err(StatusCode::BadUserAccessDenied); + } + + Ok(()) + } else { + let mask_value = match attribute_id { + // The default address space does not support modifying node class or node id, + // Custom node managers are allowed to. + AttributeId::BrowseName => WriteMask::BROWSE_NAME, + AttributeId::DisplayName => WriteMask::DISPLAY_NAME, + AttributeId::Description => WriteMask::DESCRIPTION, + AttributeId::WriteMask => WriteMask::WRITE_MASK, + AttributeId::UserWriteMask => WriteMask::USER_WRITE_MASK, + AttributeId::IsAbstract => WriteMask::IS_ABSTRACT, + AttributeId::Symmetric => WriteMask::SYMMETRIC, + AttributeId::InverseName => WriteMask::INVERSE_NAME, + AttributeId::ContainsNoLoops => WriteMask::CONTAINS_NO_LOOPS, + AttributeId::EventNotifier => WriteMask::EVENT_NOTIFIER, + AttributeId::Value => WriteMask::VALUE_FOR_VARIABLE_TYPE, + AttributeId::DataType => WriteMask::DATA_TYPE, + AttributeId::ValueRank => WriteMask::VALUE_RANK, + AttributeId::ArrayDimensions => WriteMask::ARRAY_DIMENSIONS, + AttributeId::AccessLevel => WriteMask::ACCESS_LEVEL, + AttributeId::UserAccessLevel => WriteMask::USER_ACCESS_LEVEL, + AttributeId::MinimumSamplingInterval => WriteMask::MINIMUM_SAMPLING_INTERVAL, + AttributeId::Historizing => WriteMask::HISTORIZING, + AttributeId::Executable => WriteMask::EXECUTABLE, + AttributeId::UserExecutable => WriteMask::USER_EXECUTABLE, + AttributeId::DataTypeDefinition => WriteMask::DATA_TYPE_DEFINITION, + AttributeId::RolePermissions => WriteMask::ROLE_PERMISSIONS, + AttributeId::AccessRestrictions => WriteMask::ACCESS_RESTRICTIONS, + AttributeId::AccessLevelEx => WriteMask::ACCESS_LEVEL_EX, + _ => return Err(StatusCode::BadNotWritable), + }; + + let write_mask = node.as_node().write_mask(); + if write_mask.is_none() || write_mask.is_some_and(|wm| !wm.contains(mask_value)) { + return Err(StatusCode::BadNotWritable); + } + Ok(()) + } +} + +pub fn user_access_level(context: &RequestContext, node: &NodeType) -> UserAccessLevel { + let user_access_level = if let NodeType::Variable(ref node) = node { + node.user_access_level() + } else { + UserAccessLevel::CURRENT_READ + }; + context.authenticator.effective_user_access_level( + &context.token, + user_access_level, + &node.node_id(), + ) +} + +pub fn validate_node_read( + node: &NodeType, + context: &RequestContext, + node_to_read: &ParsedReadValueId, +) -> Result<(), StatusCode> { + is_readable(context, node)?; + + if node_to_read.attribute_id != AttributeId::Value + && node_to_read.index_range != NumericRange::None + { + return Err(StatusCode::BadIndexRangeDataMismatch); + } + + if !is_supported_data_encoding(&node_to_read.data_encoding) { + debug!( + "read_node_value result for read node id {}, attribute {:?} is invalid data encoding", + node_to_read.node_id, node_to_read.attribute_id + ); + return Err(StatusCode::BadDataEncodingInvalid); + } + + Ok(()) +} + +pub fn validate_value_to_write( + variable: &Variable, + value: &Variant, + type_tree: &TypeTree, +) -> Result<(), StatusCode> { + let value_rank = variable.value_rank(); + let node_data_type = variable.data_type(); + + if matches!(value, Variant::Empty) { + return Ok(()); + } + + if let Some(value_data_type) = value.scalar_data_type() { + // Value is scalar, check if the data type matches + let data_type_matches = type_tree.is_subtype_of(&value_data_type, &node_data_type); + if !data_type_matches { + // Check if the value to write is a byte string and the receiving node type a byte array. + // This code is a mess just for some weird edge case in the spec that a write from + // a byte string to a byte array should succeed + match value { + Variant::ByteString(_) => { + if node_data_type == DataTypeId::Byte.into() { + match value_rank { + -2 | -3 | 1 => Ok(()), + _ => Err(StatusCode::BadTypeMismatch), + } + } else { + Err(StatusCode::BadTypeMismatch) + } + } + _ => Ok(()), + } + } else { + Ok(()) + } + } else if let Some(value_data_type) = value.array_data_type() { + // TODO check that value is array of same dimensions + if !type_tree.is_subtype_of(&value_data_type, &node_data_type) { + return Err(StatusCode::BadTypeMismatch); + } + Ok(()) + } else { + Err(StatusCode::BadTypeMismatch) + } +} + +pub fn validate_node_write( + node: &NodeType, + context: &RequestContext, + node_to_write: &ParsedWriteValue, + type_tree: &TypeTree, +) -> Result<(), StatusCode> { + is_writable(context, node, node_to_write.attribute_id)?; + + if node_to_write.attribute_id != AttributeId::Value && node_to_write.index_range.has_range() { + return Err(StatusCode::BadWriteNotSupported); + } + + let Some(value) = node_to_write.value.value.as_ref() else { + return Err(StatusCode::BadTypeMismatch); + }; + + // TODO: We should do type validation for every attribute, not just value. + if let (NodeType::Variable(var), AttributeId::Value) = (node, node_to_write.attribute_id) { + validate_value_to_write(var, &value, type_tree)?; + } + + Ok(()) +} + +pub fn is_supported_data_encoding(data_encoding: &QualifiedName) -> bool { + if data_encoding.is_null() { + true + } else { + data_encoding.namespace_index == 0 && data_encoding.name.eq("Default Binary") + } +} + +pub fn read_node_value( + node: &NodeType, + context: &RequestContext, + node_to_read: &ParsedReadValueId, + max_age: f64, + timestamps_to_return: TimestampsToReturn, +) -> DataValue { + let mut result_value = DataValue::null(); + + let Some(attribute) = node.as_node().get_attribute_max_age( + timestamps_to_return, + node_to_read.attribute_id, + node_to_read.index_range.clone(), + &node_to_read.data_encoding, + max_age, + ) else { + result_value.status = Some(StatusCode::BadAttributeIdInvalid); + return result_value; + }; + + let value = if node_to_read.attribute_id == AttributeId::UserAccessLevel { + match attribute.value { + Some(Variant::Byte(val)) => { + let access_level = UserAccessLevel::from_bits_truncate(val); + let access_level = context.authenticator.effective_user_access_level( + &context.token, + access_level, + &node.node_id(), + ); + Some(Variant::from(access_level.bits())) + } + Some(v) => Some(v), + _ => None, + } + } else { + attribute.value + }; + + let value = if node_to_read.attribute_id == AttributeId::UserExecutable { + match value { + Some(Variant::Boolean(val)) => Some(Variant::from( + val && context + .authenticator + .is_user_executable(&context.token, &node.node_id()), + )), + r => r, + } + } else { + value + }; + + result_value.value = value; + result_value.status = attribute.status; + if matches!(node, NodeType::Variable(_)) && node_to_read.attribute_id == AttributeId::Value { + match timestamps_to_return { + TimestampsToReturn::Source => { + result_value.source_timestamp = attribute.source_timestamp; + result_value.source_picoseconds = attribute.source_picoseconds; + } + TimestampsToReturn::Server => { + result_value.server_timestamp = attribute.server_timestamp; + result_value.server_picoseconds = attribute.server_picoseconds; + } + TimestampsToReturn::Both => { + result_value.source_timestamp = attribute.source_timestamp; + result_value.source_picoseconds = attribute.source_picoseconds; + result_value.server_timestamp = attribute.server_timestamp; + result_value.server_picoseconds = attribute.server_picoseconds; + } + TimestampsToReturn::Neither | TimestampsToReturn::Invalid => { + // Nothing needs to change + } + } + } + result_value +} diff --git a/lib/src/server/address_space/variable.rs b/lib/src/server/address_space/variable.rs index 6921d6215..ce25fc560 100644 --- a/lib/src/server/address_space/variable.rs +++ b/lib/src/server/address_space/variable.rs @@ -5,20 +5,16 @@ //! Contains the implementation of `Variable` and `VariableBuilder`. use std::convert::{Into, TryFrom}; -use std::sync::Arc; -use crate::sync::*; use crate::types::service_types::VariableAttributes; - -use crate::server::{ - address_space::{ - base::Base, - node::{Node, NodeBase}, - AccessLevel, UserAccessLevel, - }, - callbacks::{AttributeGetter, AttributeSetter}, +use crate::types::{ + AttributeId, AttributesMask, DataTypeId, DataValue, DateTime, NumericRange, StatusCode, + TimestampsToReturn, Variant, }; +use super::base::Base; +use super::{AccessLevel, Node, NodeBase, UserAccessLevel}; + // This is a builder object for constructing variable nodes programmatically. node_builder_impl!(VariableBuilder, Variable); @@ -27,19 +23,13 @@ node_builder_impl_property_of!(VariableBuilder); impl VariableBuilder { /// Sets the value of the variable. - pub fn value(mut self, value: V) -> Self - where - V: Into, - { + pub fn value(mut self, value: impl Into) -> Self { let _ = self.node.set_value(NumericRange::None, value); self } /// Sets the data type of the variable. - pub fn data_type(mut self, data_type: T) -> Self - where - T: Into, - { + pub fn data_type(mut self, data_type: impl Into) -> Self { self.node.set_data_type(data_type); self } @@ -74,6 +64,12 @@ impl VariableBuilder { self } + /// Set the write mask for this variable. + pub fn write_mask(mut self, write_mask: WriteMask) -> Self { + self.node.set_write_mask(write_mask); + self + } + /// Makes the variable writable (by default it isn't) pub fn writable(mut self) -> Self { self.node @@ -111,18 +107,18 @@ impl VariableBuilder { /// Sets a value getter function for the variable. Whenever the value of a variable /// needs to be fetched (e.g. from a monitored item subscription), this trait will be called /// to get the value. - pub fn value_getter(mut self, getter: Arc>) -> Self { + /* pub fn value_getter(mut self, getter: Arc>) -> Self { self.node.set_value_getter(getter); self - } + } */ /// Sets a value setter function for the variable. Whenever the value of a variable is set via /// a service, this trait will be called to set the value. It is up to the implementation /// to decide what to do if that happens. - pub fn value_setter(mut self, setter: Arc>) -> Self { + /* pub fn value_setter(mut self, setter: Arc>) -> Self { self.node.set_value_setter(setter); self - } + } */ /// Add a reference to the variable indicating it has a type of another node. pub fn has_type_definition(self, type_id: T) -> Self @@ -155,19 +151,15 @@ impl VariableBuilder { #[derive(Derivative)] #[derivative(Debug)] pub struct Variable { - base: Base, - data_type: NodeId, - historizing: bool, - value_rank: i32, - value: DataValue, - access_level: u8, - user_access_level: u8, - array_dimensions: Option>, - minimum_sampling_interval: Option, - #[derivative(Debug = "ignore")] - value_setter: Option>>, - #[derivative(Debug = "ignore")] - value_getter: Option>>, + pub(super) base: Base, + pub(super) data_type: NodeId, + pub(super) historizing: bool, + pub(super) value_rank: i32, + pub(super) value: DataValue, + pub(super) access_level: u8, + pub(super) user_access_level: u8, + pub(super) array_dimensions: Option>, + pub(super) minimum_sampling_interval: Option, } impl Default for Variable { @@ -182,8 +174,6 @@ impl Default for Variable { user_access_level: AccessLevel::CURRENT_READ.bits(), array_dimensions: None, minimum_sampling_interval: None, - value_getter: None, - value_setter: None, } } } @@ -450,12 +440,12 @@ impl Variable { pub fn value( &self, - timestamps_to_return: TimestampsToReturn, + _timestamps_to_return: TimestampsToReturn, index_range: NumericRange, - data_encoding: &QualifiedName, + _data_encoding: &QualifiedName, max_age: f64, ) -> DataValue { - if let Some(ref value_getter) = self.value_getter { + /* if let Some(ref value_getter) = self.value_getter { let mut value_getter = value_getter.lock(); value_getter .get( @@ -472,35 +462,35 @@ impl Variable { Some(value) }) .unwrap_or_default() - } else { - let data_value = &self.value; - let mut result = DataValue { - server_picoseconds: data_value.server_picoseconds, - server_timestamp: data_value.server_timestamp, - source_picoseconds: data_value.source_picoseconds, - source_timestamp: data_value.source_timestamp, - value: None, - status: None, - }; - - // Get the value - if let Some(ref value) = data_value.value { - match value.range_of(index_range) { - Ok(value) => { - result.value = Some(value); - result.status = data_value.status; - } - Err(err) => { - result.status = Some(err); - } + } else { */ + let data_value = &self.value; + let mut result = DataValue { + server_picoseconds: data_value.server_picoseconds, + server_timestamp: data_value.server_timestamp, + source_picoseconds: data_value.source_picoseconds, + source_timestamp: data_value.source_timestamp, + value: None, + status: None, + }; + + // Get the value + if let Some(ref value) = data_value.value { + match value.range_of(index_range) { + Ok(value) => { + result.value = Some(value); + result.status = data_value.status; + } + Err(err) => { + result.status = Some(err); } } - if max_age > 0.0 && max_age <= i32::MAX as f64 { - // Update the server timestamp to now as a "best effort" attempt to get the latest value - result.server_timestamp = Some(DateTime::now()); - } - result } + if max_age > 0.0 && max_age <= i32::MAX as f64 { + // Update the server timestamp to now as a "best effort" attempt to get the latest value + result.server_timestamp = Some(DateTime::now()); + } + result + //} } /// Sets the variable's `Variant` value. The timestamps for the change are updated to now. @@ -525,7 +515,7 @@ impl Variable { }; // The value is set to the value getter - if let Some(ref value_setter) = self.value_setter { + /* if let Some(ref value_setter) = self.value_setter { let mut value_setter = value_setter.lock(); value_setter.set( &self.node_id(), @@ -533,14 +523,14 @@ impl Variable { index_range, value.into(), ) + } else { */ + let now = DateTime::now(); + if index_range.has_range() { + self.set_value_range(value, index_range, StatusCode::Good, &now, &now) } else { - let now = DateTime::now(); - if index_range.has_range() { - self.set_value_range(value, index_range, StatusCode::Good, &now, &now) - } else { - self.set_value_direct(value, StatusCode::Good, &now, &now) - } + self.set_value_direct(value, StatusCode::Good, &now, &now) } + //} } // Set a range value @@ -552,6 +542,14 @@ impl Variable { server_timestamp: &DateTime, source_timestamp: &DateTime, ) -> Result<(), StatusCode> { + if matches!(index_range, NumericRange::None) { + self.value.value = Some(value); + self.value.status = Some(status_code); + self.value.server_timestamp = Some(*server_timestamp); + self.value.source_timestamp = Some(*source_timestamp); + return Ok(()); + } + match self.value.value { Some(ref mut full_value) => { // Overwrite a partial section of the value @@ -583,15 +581,20 @@ impl Variable { Ok(()) } - /// Sets a getter function that will be called to get the value of this variable. - pub fn set_value_getter(&mut self, value_getter: Arc>) { - self.value_getter = Some(value_getter); + /// Sets the variable type's `DataValue` + pub fn set_data_value(&mut self, value: DataValue) { + self.value = value; } + /// Sets a getter function that will be called to get the value of this variable. + // pub fn set_value_getter(&mut self, value_getter: Arc>) { + // self.value_getter = Some(value_getter); + // } + /// Sets a setter function that will be called to set the value of this variable. - pub fn set_value_setter(&mut self, value_setter: Arc>) { - self.value_setter = Some(value_setter); - } + // pub fn set_value_setter(&mut self, value_setter: Arc>) { + // self.value_setter = Some(value_setter); + // } /// Gets the minimum sampling interval, if the attribute was set pub fn minimum_sampling_interval(&self) -> Option { diff --git a/lib/src/server/address_space/variable_type.rs b/lib/src/server/address_space/variable_type.rs index 5c720681d..66127a915 100644 --- a/lib/src/server/address_space/variable_type.rs +++ b/lib/src/server/address_space/variable_type.rs @@ -6,7 +6,13 @@ use std::convert::TryFrom; -use crate::types::service_types::VariableTypeAttributes; +use crate::{ + types::service_types::VariableTypeAttributes, + types::{ + AttributeId, AttributesMask, DataValue, NumericRange, StatusCode, TimestampsToReturn, + Variant, + }, +}; use super::{base::Base, node::Node, node::NodeBase}; @@ -15,15 +21,47 @@ node_builder_impl!(VariableTypeBuilder, VariableType); node_builder_impl_generates_event!(VariableTypeBuilder); node_builder_impl_subtype!(VariableTypeBuilder); +impl VariableTypeBuilder { + pub fn is_abstract(mut self, is_abstract: bool) -> Self { + self.node.set_is_abstract(is_abstract); + self + } + + pub fn write_mask(mut self, write_mask: WriteMask) -> Self { + self.node.set_write_mask(write_mask); + self + } + + pub fn data_type(mut self, data_type: impl Into) -> Self { + self.node.set_data_type(data_type); + self + } + + pub fn value(mut self, value: impl Into) -> Self { + self.node.set_value(value); + self + } + + pub fn array_dimensions(mut self, array_dimensions: &[u32]) -> Self { + self.node.set_array_dimensions(array_dimensions); + self + } + + pub fn value_rank(mut self, value_rank: i32) -> Self { + self.node.set_value_rank(value_rank); + self + } +} + /// A `VariableType` is a type of node within the `AddressSpace`. #[derive(Debug)] pub struct VariableType { - base: Base, - data_type: NodeId, - is_abstract: bool, - value_rank: i32, - value: Option, - array_dimensions: Option>, + pub(super) base: Base, + pub(super) data_type: NodeId, + pub(super) is_abstract: bool, + pub(super) value_rank: i32, + pub(super) value: Option, + pub(super) array_dimensions: Option>, } impl Default for VariableType { @@ -231,4 +269,9 @@ impl VariableType { { self.value = Some(DataValue::new_now(value)); } + + /// Sets the variable type's `DataValue` + pub fn set_data_value(&mut self, value: DataValue) { + self.value = Some(value); + } } diff --git a/lib/src/server/address_space/view.rs b/lib/src/server/address_space/view.rs index e50bd1682..55675a309 100644 --- a/lib/src/server/address_space/view.rs +++ b/lib/src/server/address_space/view.rs @@ -4,7 +4,13 @@ //! Contains the implementation of `View` and `ViewBuilder`. -use crate::types::service_types::ViewAttributes; +use crate::{ + types::service_types::ViewAttributes, + types::{ + AttributeId, AttributesMask, DataValue, NumericRange, StatusCode, TimestampsToReturn, + Variant, + }, +}; use super::{base::Base, node::Node, node::NodeBase, EventNotifier}; @@ -22,14 +28,19 @@ impl ViewBuilder { self.node.set_event_notifier(event_notifier); self } + + pub fn write_mask(mut self, write_mask: WriteMask) -> Self { + self.node.set_write_mask(write_mask); + self + } } /// A `View` is a type of node within the `AddressSpace`. #[derive(Debug)] pub struct View { - base: Base, - event_notifier: EventNotifier, - contains_no_loops: bool, + pub(super) base: Base, + pub(super) event_notifier: EventNotifier, + pub(super) contains_no_loops: bool, } impl Default for View { diff --git a/lib/src/server/authenticator.rs b/lib/src/server/authenticator.rs new file mode 100644 index 000000000..f9c95b352 --- /dev/null +++ b/lib/src/server/authenticator.rs @@ -0,0 +1,203 @@ +use async_trait::async_trait; + +use crate::{ + crypto::Thumbprint, + types::{MessageSecurityMode, NodeId, StatusCode}, +}; + +use super::{ + address_space::UserAccessLevel, config::ANONYMOUS_USER_TOKEN_ID, ServerEndpoint, + ServerUserToken, +}; +use std::{collections::BTreeMap, fmt::Debug}; + +/// Debug-safe wrapper around a password. +#[derive(Clone, PartialEq, Eq)] +pub struct Password(String); + +impl Debug for Password { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("Password").field(&"****").finish() + } +} + +impl Password { + pub fn new(password: String) -> Self { + Self(password) + } + + pub fn get(&self) -> &str { + &self.0 + } +} + +/// A unique identifier for a _user_. Distinct from a client/session, a user can +/// have multiple sessions at the same time, and is typically the value we use to +/// control access. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct UserToken(pub String); + +/// Key used to identify a user. +/// Goes beyond just the identity token, since some services require +/// information about the application URI and security mode as well. +#[derive(Debug, Clone)] +pub struct UserSecurityKey { + pub token: UserToken, + pub security_mode: MessageSecurityMode, + pub application_uri: String, +} + +impl UserToken { + pub fn is_anonymous(&self) -> bool { + self.0 == ANONYMOUS_USER_TOKEN_ID + } +} + +#[allow(unused)] +#[async_trait] +/// The AuthManager trait is used to let servers control access to the server. +/// It serves two main purposes: +/// +/// - It validates user credentials and returns a user token. Two clients with the +/// same user token are considered the _same_ user, and have some ability to interfere +/// with each other. +/// - It uses user tokens to check access levels. +/// +/// Note that the only async methods are the ones validating access tokens. This means +/// that these methods should load and store any information you need to check user +/// access level down the line. +/// +/// This is currently the only way to restrict access to core resources. For resources in +/// your own custom node managers you are free to use whatever access regime you want. +pub trait AuthManager: Send + Sync + 'static { + /// Validate whether an anonymous user is allowed to access the given endpoint. + /// This does not return a user token, all anonymous users share the same special token. + async fn authenticate_anonymous_token( + &self, + endpoint: &ServerEndpoint, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadIdentityTokenRejected) + } + + /// Validate the given username and password for `endpoint`. + /// This should return a user token associated with the user, for example the username itself. + async fn authenticate_username_identity_token( + &self, + endpoint: &ServerEndpoint, + username: &str, + password: &Password, + ) -> Result { + Err(StatusCode::BadIdentityTokenRejected) + } + + /// Validate the signing thumbprint for `endpoint`. + /// This should return a user token associated with the user. + async fn authenticate_x509_identity_token( + &self, + endpoint: &ServerEndpoint, + signing_thumbprint: &Thumbprint, + ) -> Result { + Err(StatusCode::BadIdentityTokenRejected) + } + + /// Return the effective user access level for the given node ID + fn effective_user_access_level( + &self, + token: &UserToken, + user_access_level: UserAccessLevel, + node_id: &NodeId, + ) -> UserAccessLevel { + user_access_level + } + + fn is_user_executable(&self, token: &UserToken, method_id: &NodeId) -> bool { + true + } +} + +/// A simple authenticator that keeps a map of valid users in memory. +/// In production applications you will almost always want to create your own +/// custom authenticator. +pub struct DefaultAuthenticator { + users: BTreeMap, +} + +impl DefaultAuthenticator { + pub fn new(users: BTreeMap) -> Self { + Self { users } + } +} + +#[async_trait] +impl AuthManager for DefaultAuthenticator { + async fn authenticate_anonymous_token( + &self, + endpoint: &ServerEndpoint, + ) -> Result<(), StatusCode> { + if !endpoint.supports_anonymous() { + error!( + "Endpoint \"{}\" does not support anonymous authentication", + endpoint.path + ); + return Err(StatusCode::BadIdentityTokenRejected); + } + Ok(()) + } + + async fn authenticate_username_identity_token( + &self, + endpoint: &ServerEndpoint, + username: &str, + password: &Password, + ) -> Result { + let token_password = password.get(); + for user_token_id in &endpoint.user_token_ids { + if let Some(server_user_token) = self.users.get(user_token_id) { + if server_user_token.is_user_pass() && server_user_token.user == username.as_ref() { + // test for empty password + let valid = if server_user_token.pass.is_none() { + // Empty password for user + token_password.is_empty() + } else { + // Password compared as UTF-8 bytes + let server_password = server_user_token.pass.as_ref().unwrap().as_bytes(); + server_password == token_password.as_bytes() + }; + if !valid { + error!( + "Cannot authenticate \"{}\", password is invalid", + server_user_token.user + ); + return Err(StatusCode::BadUserAccessDenied); + } else { + return Ok(UserToken(user_token_id.clone())); + } + } + } + } + error!( + "Cannot authenticate \"{}\", user not found for endpoint", + username + ); + Err(StatusCode::BadUserAccessDenied) + } + + async fn authenticate_x509_identity_token( + &self, + endpoint: &ServerEndpoint, + signing_thumbprint: &Thumbprint, + ) -> Result { + // Check the endpoint to see if this token is supported + for user_token_id in &endpoint.user_token_ids { + if let Some(server_user_token) = self.users.get(user_token_id) { + if let Some(ref user_thumbprint) = server_user_token.thumbprint { + // The signing cert matches a user's identity, so it is valid + if user_thumbprint == signing_thumbprint { + return Ok(UserToken(user_token_id.clone())); + } + } + } + } + Err(StatusCode::BadIdentityTokenInvalid) + } +} diff --git a/lib/src/server/benches/address_space.rs b/lib/src/server/benches/address_space.rs deleted file mode 100644 index 09c96d5f5..000000000 --- a/lib/src/server/benches/address_space.rs +++ /dev/null @@ -1,38 +0,0 @@ -#[macro_use] -extern crate criterion; - -use criterion::Criterion; - -use crate::server::address_space::{address_space::AddressSpace, references::References}; -use crate::types::node_ids::ReferenceTypeId; - -fn populate_address_space() { - let _address_space = AddressSpace::new(); -} - -fn address_space_benchmark(c: &mut Criterion) { - // This benchmark tests how long it takes to populate the address space - c.bench_function("address_space", |b| b.iter(|| populate_address_space())); -} - -fn reference_type_matches(references: &References) { - let r1 = ReferenceTypeId::References.into(); - let r2 = ReferenceTypeId::AlwaysGeneratesEvent.into(); - let r3 = ReferenceTypeId::HierarchicalReferences.into(); - // AlwaysGeneratesEvent is a subtype of References via NonHierarchicalReferences - assert!(references.reference_type_matches(&r1, &r2, true)); - // AlwaysGeneratesEvent is not a subtype of HierarchicalReferences - assert!(!references.reference_type_matches(&r3, &r2, true)); -} - -fn reference_type_benchmark(c: &mut Criterion) { - // This bench mark test how long it takes to test if one reference type is a subtype of another - c.bench_function("reference_type_benchmark", |b| { - let address_space = AddressSpace::new(); - let references = address_space.references(); - b.iter(|| reference_type_matches(references)); - }); -} - -criterion_group!(benches, address_space_benchmark, reference_type_benchmark); -criterion_main!(benches); diff --git a/lib/src/server/builder.rs b/lib/src/server/builder.rs index 72e77c5e1..d1f9af491 100644 --- a/lib/src/server/builder.rs +++ b/lib/src/server/builder.rs @@ -1,53 +1,66 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock +use std::{path::PathBuf, sync::Arc}; -use std::path::PathBuf; +use tokio_util::sync::CancellationToken; -use crate::core::config::Config; +use crate::{ + core::config::Config, crypto::SecurityPolicy, server::constants, types::MessageSecurityMode, +}; use super::{ - config::{ServerConfig, ServerEndpoint, ServerUserToken, ANONYMOUS_USER_TOKEN_ID}, - constants, - server::Server, + authenticator::AuthManager, + node_manager::{ + memory::{ + CoreNodeManagerBuilder, DiagnosticsNodeManagerBuilder, InMemoryNodeManagerBuilder, + }, + NodeManagerBuilder, + }, + Limits, Server, ServerConfig, ServerEndpoint, ServerHandle, ServerUserToken, + ANONYMOUS_USER_TOKEN_ID, }; -const DEFAULT_ENDPOINT_PATH: &str = "/"; - -/// The `ServerBuilder` is a builder for producing a [`Server`]. It is an alternative to constructing -/// a [`ServerConfig`] from file or from scratch. -/// -/// [`Server`]: ../server/struct.Server.html -/// [`ServerConfig`]: ../config/struct.ServerConfig.html pub struct ServerBuilder { - config: ServerConfig, + pub(crate) config: ServerConfig, + pub(crate) node_managers: Vec>, + pub(crate) authenticator: Option>, + pub(crate) token: CancellationToken, +} + +impl Default for ServerBuilder { + fn default() -> Self { + let builder = Self { + config: Default::default(), + node_managers: Default::default(), + authenticator: None, + token: CancellationToken::new(), + }; + builder + .with_node_manager(InMemoryNodeManagerBuilder::new(CoreNodeManagerBuilder)) + .with_node_manager(DiagnosticsNodeManagerBuilder) + } } impl ServerBuilder { pub fn new() -> Self { - Self { - config: ServerConfig::default(), - } + Self::default() } - /// Reads the config in as a starting point + /// Create a server builder from a config object. pub fn from_config(config: ServerConfig) -> Self { - Self { config } + Self { + config, + ..Default::default() + } } - /// Creates a simple endpoint that accepts anonymous connections - pub fn new_anonymous(application_name: T) -> Self - where - T: Into, - { - let user_token_ids = vec![ANONYMOUS_USER_TOKEN_ID.to_string()]; + /// Creates a simple endpoint that accepts anonymous connections. + pub fn new_anonymous(application_name: impl Into) -> Self { Self::new() .application_name(application_name) - .endpoint( + .add_endpoint( "none", - ServerEndpoint::new_none(DEFAULT_ENDPOINT_PATH, &user_token_ids), + ServerEndpoint::new_none("/", &[ANONYMOUS_USER_TOKEN_ID.to_string()]), ) - .discovery_urls(vec![DEFAULT_ENDPOINT_PATH.into()]) + .discovery_urls(vec!["/".to_owned()]) } /// Creates and yields a builder which is configured with the sample server configuration. @@ -55,17 +68,12 @@ impl ServerBuilder { pub fn new_sample() -> Self { warn!("Sample configuration is for testing purposes only. Use a proper configuration in your production environment"); - let path = DEFAULT_ENDPOINT_PATH; - - let user_token_ids = [ + let user_token_ids = vec![ + ANONYMOUS_USER_TOKEN_ID, "sample_password_user", "sample_x509_user", - ANONYMOUS_USER_TOKEN_ID, - ] - .iter() - .map(|u| u.to_string()) - .collect::>(); - + ]; + let endpoint_path = "/"; Self::new() .application_name("OPC UA Sample Server") .application_uri("urn:OPC UA Sample Server") @@ -74,318 +82,422 @@ impl ServerBuilder { .certificate_path("own/cert.der") .private_key_path("private/private.pem") .pki_dir("./pki") - .discovery_server_url(Some(constants::DEFAULT_DISCOVERY_SERVER_URL.to_string())) - .user_token( + .discovery_server_url(constants::DEFAULT_DISCOVERY_SERVER_URL) + .add_user_token( "sample_password_user", ServerUserToken { user: "sample1".to_string(), pass: Some("sample1pwd".to_string()), - x509: None, - thumbprint: None, + ..Default::default() }, ) - .user_token( + .add_user_token( "sample_x509_user", ServerUserToken { user: "sample_x509".to_string(), - pass: None, x509: Some("./users/sample-x509.der".to_string()), - thumbprint: None, + ..Default::default() }, ) - .user_token( - "unused_user", - ServerUserToken { - user: "unused".to_string(), - pass: Some("unused1".to_string()), - x509: None, - thumbprint: None, - }, + .add_endpoint( + "none", + ( + endpoint_path, + SecurityPolicy::None, + MessageSecurityMode::None, + &user_token_ids as &[&str], + ), ) - .endpoints(vec![ - ("none", ServerEndpoint::new_none(path, &user_token_ids)), + .add_endpoint( + "basic128rsa15_sign", ( - "basic128rsa15_sign", - ServerEndpoint::new_basic128rsa15_sign(path, &user_token_ids), + endpoint_path, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::Sign, + &user_token_ids as &[&str], ), + ) + .add_endpoint( + "basic128rsa15_sign_encrypt", ( - "basic128rsa15_sign_encrypt", - ServerEndpoint::new_basic128rsa15_sign_encrypt(path, &user_token_ids), + endpoint_path, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids as &[&str], ), + ) + .add_endpoint( + "basic256_sign", ( - "aes128-sha256-rsaoaep_sign", - ServerEndpoint::new_aes128_sha256_rsaoaep_sign(path, &user_token_ids), + endpoint_path, + SecurityPolicy::Basic256, + MessageSecurityMode::Sign, + &user_token_ids as &[&str], ), + ) + .add_endpoint( + "basic256_sign_encrypt", ( - "aes128-sha256-rsaoaep_sign_encrypt", - ServerEndpoint::new_aes128_sha256_rsaoaep_sign_encrypt(path, &user_token_ids), + endpoint_path, + SecurityPolicy::Basic256, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids as &[&str], ), + ) + .add_endpoint( + "basic256sha256_sign", ( - "aes256-sha256-rsapss_sign", - ServerEndpoint::new_aes256_sha256_rsapss_sign(path, &user_token_ids), + endpoint_path, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::Sign, + &user_token_ids as &[&str], ), + ) + .add_endpoint( + "basic256sha256_sign_encrypt", ( - "aes256-sha256-rsapss_sign_encrypt", - ServerEndpoint::new_aes256_sha256_rsapss_sign_encrypt(path, &user_token_ids), + endpoint_path, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids as &[&str], ), + ) + .add_endpoint( + "endpoint_aes128sha256rsaoaep_sign", ( - "basic256_sign", - ServerEndpoint::new_basic256_sign(path, &user_token_ids), + endpoint_path, + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::Sign, + &user_token_ids as &[&str], ), + ) + .add_endpoint( + "endpoint_aes128sha256rsaoaep_sign_encrypt", ( - "basic256_sign_encrypt", - ServerEndpoint::new_basic256_sign_encrypt(path, &user_token_ids), + endpoint_path, + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids as &[&str], ), + ) + .add_endpoint( + "endpoint_aes256sha256rsapss_sign", ( - "basic256sha256_sign", - ServerEndpoint::new_basic256sha256_sign(path, &user_token_ids), + endpoint_path, + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::Sign, + &user_token_ids as &[&str], ), + ) + .add_endpoint( + "endpoint_aes256sha256rsapss_sign_encrypt", ( - "basic256sha256_sign_encrypt", - ServerEndpoint::new_basic256sha256_sign_encrypt(path, &user_token_ids), + endpoint_path, + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids as &[&str], ), - ("no_access", ServerEndpoint::new_none("/noaccess", &[])), - ]) - .discovery_urls(vec![DEFAULT_ENDPOINT_PATH.into()]) + ) + } + + /// Get the currently configured config. + pub fn config(&self) -> &ServerConfig { + &self.config } - /// Yields a [`Server`] from the values set by the builder. If the builder is not in a valid state - /// it will return `None`. + /// Load config from a local file. + /// Will panic if this fails, if you prefer to propagate errors use /// - /// [`Server`]: ../server/struct.Server.html - pub fn server(self) -> Option { - if self.is_valid() { - Some(Server::new(self.config())) - } else { - None - } + /// `with_config(ServerConfig::load(&"my_config.conf".into())?)` + pub fn with_config_from(mut self, path: impl Into) -> Self { + self.config = ServerConfig::load(&path.into()).expect("Failed to load config"); + self + } + + /// Set the entire config object, which may be loaded from somewhere else. + pub fn with_config(mut self, config: ServerConfig) -> Self { + self.config = config; + self } - /// Yields a [`ServerConfig`] from the values set by the builder. + /// Get a mutable reference to the currently configured config. + pub fn config_mut(&mut self) -> &mut ServerConfig { + &mut self.config + } + + /// Get a mutable reference to the limits object. + pub fn limits_mut(&mut self) -> &mut Limits { + &mut self.config.limits + } + + /// Add a node manager builder to the list of node managers. + /// Once the server is created you can retrieve it from + /// `handle.node_managers()`. This allows node managers to contain + /// core server types without late initialization. + pub fn with_node_manager(mut self, node_manager: impl NodeManagerBuilder + 'static) -> Self { + self.node_managers.push(Box::new(node_manager)); + self + } + + /// Clear all node managers. /// - /// [`ServerConfig`]: ../config/struct.ServerConfig.html - pub fn config(self) -> ServerConfig { - self.config + /// Warning: your server will not be compliant without presenting the core namespace. + /// If you remove the core node manager you must implement the core namespace yourself. + pub fn without_node_managers(mut self) -> Self { + self.node_managers.clear(); + self } - /// Test if the builder can yield a server with the configuration supplied. - pub fn is_valid(&self) -> bool { - self.config.is_valid() + /// Set a custom authenticator. + pub fn with_authenticator(mut self, authenticator: Arc) -> Self { + self.authenticator = Some(authenticator); + self } - /// Sets the application name. - pub fn application_name(mut self, application_name: T) -> Self - where - T: Into, - { + /// Server application name. + pub fn application_name(mut self, application_name: impl Into) -> Self { self.config.application_name = application_name.into(); self } - /// Sets the application uri - pub fn application_uri(mut self, application_uri: T) -> Self - where - T: Into, - { + /// Server application URI. + pub fn application_uri(mut self, application_uri: impl Into) -> Self { self.config.application_uri = application_uri.into(); self } - /// Sets the product uri. - pub fn product_uri(mut self, product_uri: T) -> Self - where - T: Into, - { + /// Server product URI. + pub fn product_uri(mut self, product_uri: impl Into) -> Self { self.config.product_uri = product_uri.into(); self } - /// Sets whether the server should generate its own key pair if there is none found in the pki - /// directory. + /// Autocreates public / private keypair if they do not exist. pub fn create_sample_keypair(mut self, create_sample_keypair: bool) -> Self { self.config.create_sample_keypair = create_sample_keypair; self } - /// Sets a custom server certificate path. The path is required to be provided as a partial - /// path relative to the PKI directory. If set, this path will be used to read the server - /// certificate from disk. The certificate can be in either the .der or .pem format. - pub fn certificate_path(mut self, certificate_path: T) -> Self - where - T: Into, - { + /// Path to a custom certificate, to be used instead of the default .der certificate + pub fn certificate_path(mut self, certificate_path: impl Into) -> Self { self.config.certificate_path = Some(certificate_path.into()); self } - /// Sets a custom private key path. The path is required to be provided as a partial path - /// relative to the PKI directory. If set, this path will be used to read the private key - /// from disk. - pub fn private_key_path(mut self, private_key_path: T) -> Self - where - T: Into, - { + /// Path to a custom private key, used instead of the default private key. + pub fn private_key_path(mut self, private_key_path: impl Into) -> Self { self.config.private_key_path = Some(private_key_path.into()); self } - /// Sets the pki directory where server's own key pair is stored and where `/trusted` and - /// `/rejected` server certificates are stored. - pub fn pki_dir(mut self, pki_dir: T) -> Self - where - T: Into, - { - self.config.pki_dir = pki_dir.into(); + /// Auto trust client certificates. Typically should only be used for testing + /// or samples, as it is potentially unsafe. + pub fn trust_client_certs(mut self, trust_client_certs: bool) -> Self { + self.config.certificate_validation.trust_client_certs = trust_client_certs; self } - /// Adds an endpoint to the list of endpoints the server knows of. - pub fn endpoint(mut self, endpoint_id: T, endpoint: ServerEndpoint) -> Self - where - T: Into, - { - self.config.endpoints.insert(endpoint_id.into(), endpoint); + /// Validate the valid from/to fields of a certificate. + pub fn check_cert_time(mut self, check_cert_time: bool) -> Self { + self.config.certificate_validation.check_time = check_cert_time; self } - /// Adds multiple endpoints to the list of endpoints the server knows of. - pub fn endpoints(mut self, endpoints: Vec<(T, ServerEndpoint)>) -> Self - where - T: Into, - { - for e in endpoints { - self.config.endpoints.insert(e.0.into(), e.1); - } + /// PKI folder, either absolute or relative to executable. + pub fn pki_dir(mut self, pki_dir: impl Into) -> Self { + self.config.pki_dir = pki_dir.into(); self } - /// Adds a user token to the server. - pub fn user_token(mut self, user_token_id: T, user_token: ServerUserToken) -> Self - where - T: Into, - { - self.config - .user_tokens - .insert(user_token_id.into(), user_token); + /// URL to a discovery server. Adding this makes the server attempt to register + /// itself with this discovery server. + pub fn discovery_server_url(mut self, url: impl Into) -> Self { + self.config.discovery_server_url = Some(url.into()); self } - /// Sets the discovery server url that this server shall attempt to register itself with. - pub fn discovery_server_url(mut self, discovery_server_url: Option) -> Self { - self.config.discovery_server_url = discovery_server_url; + /// Timeout for new connections to send a `HELLO` message, in seconds. + /// After this timeout expires without a valid hello message, the connection + /// is closed. + pub fn hello_timeout(mut self, timeout: u32) -> Self { + self.config.tcp_config.hello_timeout = timeout; self } - /// Sets the hostname and port to listen on - pub fn host_and_port(mut self, host: T, port: u16) -> Self - where - T: Into, - { + /// Hostname to listen to incoming TCP connections on. + pub fn host(mut self, host: impl Into) -> Self { self.config.tcp_config.host = host.into(); + self + } + + /// Port number used to listen for incoming TCP connections. + pub fn port(mut self, port: u16) -> Self { self.config.tcp_config.port = port; self } - /// Discovery endpoint urls - the urls of this server used by clients to get endpoints. - /// If the url is relative, e.g. "/" then the code will make a url for you using the port/host - /// settings as they are at the time this function is executed. + /// General server limits. + pub fn limits(mut self, limits: Limits) -> Self { + self.config.limits = limits; + self + } + + /// Supported locale IDs. + pub fn locale_ids(mut self, locale_ids: Vec) -> Self { + self.config.locale_ids = locale_ids; + self + } + + /// Add a user to the list of known user tokens. Used by the default + /// authenticator, you can use a custom one instead. + pub fn add_user_token(mut self, key: impl Into, token: ServerUserToken) -> Self { + self.config.user_tokens.insert(key.into(), token); + self + } + + /// List of discovery endpoint URLs which may or may not be the same as the service + /// endpoints. pub fn discovery_urls(mut self, discovery_urls: Vec) -> Self { - self.config.discovery_urls = discovery_urls - .iter() - .map(|discovery_url| { - if discovery_url.starts_with('/') { - // Turn into an opc url - format!( - "opc.tcp://{}:{}/", - self.config.tcp_config.host, self.config.tcp_config.port - ) - } else { - discovery_url.clone() - } - }) - .collect(); + self.config.discovery_urls = discovery_urls; self } - /// Set the maximum number of subscriptions in a session - pub fn max_subscriptions(mut self, max_subscriptions: usize) -> Self { - self.config.limits.max_subscriptions = max_subscriptions; + /// Default endpoint ID. + pub fn default_endpoint(mut self, endpoint_id: impl Into) -> Self { + self.config.default_endpoint = Some(endpoint_id.into()); self } - /// Set the maximum number of monitored items per subscription - pub fn max_monitored_items_per_sub(mut self, max_monitored_items_per_sub: usize) -> Self { - self.config.limits.max_monitored_items_per_sub = max_monitored_items_per_sub; + /// Add an endpoint to the list of endpoints supported by the server. + pub fn add_endpoint( + mut self, + id: impl Into, + endpoint: impl Into, + ) -> Self { + self.config.endpoints.insert(id.into(), endpoint.into()); self } - /// Set the max array length in elements + /// Interval in milliseconds between each time the subscriptions are polled. + pub fn subscription_poll_interval_ms(mut self, interval: u64) -> Self { + self.config.subscription_poll_interval_ms = interval; + self + } + + /// Default publish request timeout. + pub fn publish_timeout_default_ms(mut self, timeout: u64) -> Self { + self.config.publish_timeout_default_ms = timeout; + self + } + + /// Max message timeout for non-publish requests. + /// Will not be applied for requests that are handled synchronously. + /// Set to 0 for no timeout, meaning that a timeout will only be applied if + /// the client requests one. + /// If this is greater than zero and the client requests a timeout of 0, + /// this will be used. + pub fn max_timeout_ms(mut self, timeout: u32) -> Self { + self.config.max_timeout_ms = timeout; + self + } + + /// Maximum lifetime of secure channel tokens. The client will request a number, + /// this just sets an upper limit on that value. + /// Note that there is no lower limit, if a client sets an expiry of 0, + /// we will just instantly time out. + pub fn max_secure_channel_token_lifetime_ms(mut self, lifetime: u32) -> Self { + self.config.max_secure_channel_token_lifetime_ms = lifetime; + self + } + + /// Try to construct a server from this builder, may fail if the configuration + /// is invalid. + pub fn build(self) -> Result<(Server, ServerHandle), String> { + Server::new_from_builder(self) + } + + /// Maximum length of arrays when encoding or decoding messages. pub fn max_array_length(mut self, max_array_length: usize) -> Self { self.config.limits.max_array_length = max_array_length; self } - /// Set the max string length in characters, i.e. if you set max to 1000 characters, then with - /// UTF-8 encoding potentially that's 4000 bytes. + /// Maximum length of strings in bytes when encoding or decoding messages. pub fn max_string_length(mut self, max_string_length: usize) -> Self { self.config.limits.max_string_length = max_string_length; self } - /// Set the max bytestring length in bytes + /// Maximum byte string length in bytes when encoding or decoding messages. pub fn max_byte_string_length(mut self, max_byte_string_length: usize) -> Self { self.config.limits.max_byte_string_length = max_byte_string_length; self } - /// Set the maximum message size + /// Maximum allowed message size in bytes. pub fn max_message_size(mut self, max_message_size: usize) -> Self { self.config.limits.max_message_size = max_message_size; self } - /// Set the max chunk count + /// Maximum allowed chunk count per message. pub fn max_chunk_count(mut self, max_chunk_count: usize) -> Self { self.config.limits.max_chunk_count = max_chunk_count; self } - // Set the send buffer size + /// Maximum send buffer size, can be negotiated lower with clients. pub fn send_buffer_size(mut self, send_buffer_size: usize) -> Self { self.config.limits.send_buffer_size = send_buffer_size; self } - // Set the receive buffer size + /// Maximum receive buffer size, can be negotiated lower with clients. pub fn receive_buffer_size(mut self, receive_buffer_size: usize) -> Self { self.config.limits.receive_buffer_size = receive_buffer_size; self } - /// Sets the server to automatically trust client certs. This subverts the - /// authentication during handshake, so only do this if you understand the risks. - pub fn trust_client_certs(mut self) -> Self { - self.config.certificate_validation.trust_client_certs = true; + /// Maximum number of browse continuation points per session. + pub fn max_browse_continuation_points(mut self, max_browse_continuation_points: usize) -> Self { + self.config.limits.max_browse_continuation_points = max_browse_continuation_points; + self + } + + /// Maximum number of history continuation points per session. + pub fn max_history_continuation_points( + mut self, + max_history_continuation_points: usize, + ) -> Self { + self.config.limits.max_history_continuation_points = max_history_continuation_points; + self + } + + /// Maximum number of query continuation points per session. + pub fn max_query_continuation_points(mut self, max_query_continuation_points: usize) -> Self { + self.config.limits.max_query_continuation_points = max_query_continuation_points; self } - /// Set that clients can modify the address space, i.e. they can add or remove nodes through - /// the node management service. By default, they cannot. - pub fn clients_can_modify_address_space(mut self) -> Self { - self.config.limits.clients_can_modify_address_space = true; + /// Maximum number of active sessions. + pub fn max_sessions(mut self, max_sessions: usize) -> Self { + self.config.limits.max_sessions = max_sessions; self } - /// Configures the server to use a single-threaded executor. The default executor uses a - /// thread pool with a worker thread for each CPU core available on the system. - pub fn single_threaded_executor(mut self) -> Self { - self.config.performance.single_threaded_executor = true; + /// Maximum time in milliseconds a session can be inactive before it is timed out and removed. + /// The client can request a lower value than this. + pub fn max_session_timeout_ms(mut self, max_session_timeout_ms: u64) -> Self { + self.config.max_session_timeout_ms = max_session_timeout_ms; self } - /// Configures the server to use a multi-threaded executor. - pub fn multi_threaded_executor(mut self) -> Self { - self.config.performance.single_threaded_executor = false; + /// Set the cancellation token used by the server. You only need to + /// set the token if you need to use a token from somewhere else to cancel, + /// otherwise you can get the token after building the server with + /// `handle.token()`. + pub fn token(mut self, token: CancellationToken) -> Self { + self.token = token; self } } diff --git a/lib/src/server/callbacks.rs b/lib/src/server/callbacks.rs deleted file mode 100644 index 01836187e..000000000 --- a/lib/src/server/callbacks.rs +++ /dev/null @@ -1,101 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Callbacks that a server implementation may register with the library - -use std::sync::Arc; - -use crate::sync::*; -use crate::types::{ - service_types::{CallMethodRequest, CallMethodResult, TimestampsToReturn}, - status_code::StatusCode, - AttributeId, DataValue, NodeId, NumericRange, QualifiedName, -}; - -use super::session::{Session, SessionManager}; - -/// An attribute getter trait is used to obtain the data value associated with the particular attribute id -/// This allows server implementations to supply a value on demand, usually in response to a polling action -/// such as a monitored item in a subscription. -/// -/// `node_id` is the node to which the node belongs -/// `attribute_id` is the attribute of the node to fetch a value for -/// -/// Use `max_age` according to the OPC UA Part 4, Table 52 specification to determine how to return -/// a value: -/// -/// * 0 = a new value -/// * time in ms for a value less than the specified age -/// * i32::max() or higher to fetch a cached value. -/// -pub trait AttributeGetter { - /// Returns a data value of the specified attribute or none. - fn get( - &mut self, - node_id: &NodeId, - timestamps_to_return: TimestampsToReturn, - attribute_id: AttributeId, - index_range: NumericRange, - data_encoding: &QualifiedName, - max_age: f64, - ) -> Result, StatusCode>; -} - -// An attribute setter. Sets the value on the specified attribute -pub trait AttributeSetter { - /// Sets the attribute on the specified node - fn set( - &mut self, - node_id: &NodeId, - attribute_id: AttributeId, - index_range: NumericRange, - data_value: DataValue, - ) -> Result<(), StatusCode>; -} - -/// Called by RegisterNodes service -pub trait RegisterNodes { - /// Called when a client calls the RegisterNodes service. This implementation should return a list - /// of the same size and order containing node ids corresponding to the input, or aliases. The implementation - /// should return `BadNodeIdInvalid` if any of the node ids in the input are invalid. - /// - /// The call is also given the session that the request was made on. The implementation should - /// NOT hold a strong reference to this session, but it can make a weak reference if it desires. - /// - /// There is no guarantee that the corresponding `OnUnregisterNodes` will be called by the client, - /// therefore use the weak session references and a periodic check to perform any housekeeping. - fn register_nodes( - &mut self, - session: Arc>, - nodes_to_register: &[NodeId], - ) -> Result, StatusCode>; -} - -/// Called by UnregisterNodes service -pub trait UnregisterNodes { - /// Called when a client calls the UnregisterNodes service. See `OnRegisterNodes` trait for more - /// information. A client may not call this function, e.g. if connection breaks so do not - /// count on receiving this to perform any housekeeping. - /// - /// The function should not validate the nodes in the request and should just ignore any - /// unregistered nodes. - fn unregister_nodes( - &mut self, - session: Arc>, - nodes_to_unregister: &[NodeId], - ) -> Result<(), StatusCode>; -} - -/// Called by the Method service when it invokes a method -pub trait Method { - /// A method is registered via the address space to a method id and optionally an object id. - /// When a client sends a CallRequest / CallMethod request, the registered object will - /// be invoked to handle the call. - fn call( - &mut self, - session_id: &NodeId, - session_manager: Arc>, - request: &CallMethodRequest, - ) -> Result; -} diff --git a/lib/src/server/comms/mod.rs b/lib/src/server/comms/mod.rs deleted file mode 100644 index ffc30dbbb..000000000 --- a/lib/src/server/comms/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Provides communication services for the server such as the transport layer and secure -//! channel implementation - -mod secure_channel_service; - -pub mod tcp_transport; -pub mod transport; diff --git a/lib/src/server/comms/secure_channel_service.rs b/lib/src/server/comms/secure_channel_service.rs deleted file mode 100644 index 4de0f84fb..000000000 --- a/lib/src/server/comms/secure_channel_service.rs +++ /dev/null @@ -1,199 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::result::Result; - -use crate::core::{comms::prelude::*, supported_message::SupportedMessage}; - -use crate::crypto::SecurityPolicy; -use crate::types::{status_code::StatusCode, *}; - -struct SecureChannelState { - // Issued flag - issued: bool, - // Renew count, debugging - renew_count: usize, - // Last secure channel id - last_secure_channel_id: u32, - /// Last token id number - last_token_id: u32, -} - -impl SecureChannelState { - pub fn new() -> SecureChannelState { - SecureChannelState { - last_secure_channel_id: 0, - issued: false, - renew_count: 0, - last_token_id: 0, - } - } - - pub fn create_secure_channel_id(&mut self) -> u32 { - self.last_secure_channel_id += 1; - self.last_secure_channel_id - } - - pub fn create_token_id(&mut self) -> u32 { - self.last_token_id += 1; - self.last_token_id - } -} - -pub struct SecureChannelService { - // Secure channel info for the session - secure_channel_state: SecureChannelState, -} - -impl SecureChannelService { - pub fn new() -> SecureChannelService { - SecureChannelService { - secure_channel_state: SecureChannelState::new(), - } - } - - pub fn open_secure_channel( - &mut self, - secure_channel: &mut SecureChannel, - security_header: &SecurityHeader, - client_protocol_version: u32, - message: &SupportedMessage, - ) -> Result { - let request = match message { - SupportedMessage::OpenSecureChannelRequest(request) => { - trace!("Got secure channel request {:?}", request); - request - } - _ => { - error!( - "message is not an open secure channel request, got {:?}", - message - ); - return Err(StatusCode::BadUnexpectedError); - } - }; - - let security_header = match security_header { - SecurityHeader::Asymmetric(security_header) => security_header, - _ => { - error!("Secure channel request message does not have asymmetric security header"); - return Err(StatusCode::BadUnexpectedError); - } - }; - - // Must compare protocol version to the one from HELLO - if request.client_protocol_version != client_protocol_version { - error!( - "Client sent a different protocol version than it did in the HELLO - {} vs {}", - request.client_protocol_version, client_protocol_version - ); - return Ok(ServiceFault::new( - &request.request_header, - StatusCode::BadProtocolVersionUnsupported, - ) - .into()); - } - - // Test the request type - let secure_channel_id = match request.request_type { - SecurityTokenRequestType::Issue => { - trace!("Request type == Issue"); - // check to see if renew has been called before or not - if self.secure_channel_state.renew_count > 0 { - error!("Asked to issue token on session that has called renew before"); - } - self.secure_channel_state.create_secure_channel_id() - } - SecurityTokenRequestType::Renew => { - trace!("Request type == Renew"); - - // Check for a duplicate nonce. It is invalid for the renew to use the same nonce - // as was used for last issue/renew. It doesn't matter when policy is none. - if secure_channel.security_policy() != SecurityPolicy::None - && request.client_nonce.as_ref() == secure_channel.remote_nonce() - { - error!("Client reused a nonce for a renew"); - return Ok(ServiceFault::new( - &request.request_header, - StatusCode::BadNonceInvalid, - ) - .into()); - } - - // check to see if the secure channel has been issued before or not - if !self.secure_channel_state.issued { - error!("Asked to renew token on session that has never issued token"); - return Err(StatusCode::BadUnexpectedError); - } - self.secure_channel_state.renew_count += 1; - secure_channel.secure_channel_id() - } - }; - - // Check the requested security mode - debug!("Message security mode == {:?}", request.security_mode); - match request.security_mode { - MessageSecurityMode::None - | MessageSecurityMode::Sign - | MessageSecurityMode::SignAndEncrypt => { - // TODO validate NONCE - } - _ => { - error!("Security mode is invalid"); - return Ok(ServiceFault::new( - &request.request_header, - StatusCode::BadSecurityModeRejected, - ) - .into()); - } - } - - // Process the request - self.secure_channel_state.issued = true; - - // Create a new secure channel info - let security_mode = request.security_mode; - secure_channel.set_security_mode(security_mode); - secure_channel.set_token_id(self.secure_channel_state.create_token_id()); - secure_channel.set_secure_channel_id(secure_channel_id); - secure_channel.set_remote_cert_from_byte_string(&security_header.sender_certificate)?; - - match secure_channel.set_remote_nonce_from_byte_string(&request.client_nonce) { - Ok(_) => secure_channel.create_random_nonce(), - Err(err) => { - error!("Was unable to set their nonce, check logic"); - return Ok(ServiceFault::new(&request.request_header, err).into()); - } - } - - let security_policy = secure_channel.security_policy(); - if security_policy != SecurityPolicy::None - && (security_mode == MessageSecurityMode::Sign - || security_mode == MessageSecurityMode::SignAndEncrypt) - { - secure_channel.derive_keys(); - } - - let response = OpenSecureChannelResponse { - response_header: ResponseHeader::new_good(&request.request_header), - server_protocol_version: 0, - security_token: ChannelSecurityToken { - channel_id: secure_channel.secure_channel_id(), - token_id: secure_channel.token_id(), - created_at: DateTime::now(), - revised_lifetime: request.requested_lifetime, - }, - server_nonce: secure_channel.local_nonce_as_byte_string(), - }; - Ok(response.into()) - } - - pub fn close_secure_channel( - &mut self, - _: &SupportedMessage, - ) -> Result { - info!("CloseSecureChannelRequest received, session closing"); - Err(StatusCode::BadConnectionClosed) - } -} diff --git a/lib/src/server/comms/tcp_transport.rs b/lib/src/server/comms/tcp_transport.rs deleted file mode 100644 index d5b40f5b1..000000000 --- a/lib/src/server/comms/tcp_transport.rs +++ /dev/null @@ -1,683 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! The TCP transport module handles receiving and sending of binary data in chunks, handshake, -//! session creation and dispatching of messages via message handler. -//! -//! Internally it uses tokio but the facade is mostly synchronous with the exception of publish -//! responses. i.e. the client is expected to call and wait for a response to their request. -//! Publish requests are sent based on the number of subscriptions and the responses / handling are -//! left to asynchronous event handlers. -use chrono::{self, Utc}; -use futures::StreamExt; -use std::{net::SocketAddr, sync::Arc}; -use tokio::{ - self, - io::AsyncWriteExt, - net::{ - tcp::{OwnedReadHalf, OwnedWriteHalf}, - TcpStream, - }, - sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, - time::{interval_at, Duration, Instant}, -}; - -use tokio::time::timeout; -use tokio_util::codec::FramedRead; - -use crate::core::{ - comms::{ - message_writer::MessageWriter, - secure_channel::SecureChannel, - tcp_codec::{self, TcpCodec}, - }, - prelude::*, -}; -use crate::crypto::CertificateStore; -use crate::sync::*; -use crate::types::status_code::StatusCode; - -use crate::server::{ - address_space::types::AddressSpace, - comms::{secure_channel_service::SecureChannelService, transport::*}, - services::message_handler::MessageHandler, - session::SessionManager, - state::ServerState, - subscriptions::subscription::TickReason, -}; - -/// Messages that may be sent to the writer. -#[derive(Debug)] -enum Message { - // Message for writer to quit right now. - Quit, - // A supported message with a request id - Message(u32, SupportedMessage), -} - -#[derive(Clone)] -pub struct MessageSender { - sender: UnboundedSender, -} - -impl MessageSender { - pub fn send_quit(&self) { - let _ = self.sender.send(Message::Quit); - } - - pub fn send_message(&self, request_id: u32, message: SupportedMessage) { - let _ = self.sender.send(Message::Message(request_id, message)); - } -} - -struct ReadState { - /// The associated connection - pub transport: Arc>, - /// Sender of responses - pub sender: UnboundedSender, - /// Time to wait for a HELLO from the client - pub hello_timeout: u32, - /// Reader from which messages will be decoded - pub reader: OwnedReadHalf, -} - -struct WriteState { - /// The associated connection - pub transport: Arc>, - /// Secure channel state - pub secure_channel: Arc>, - /// Writing portion of socket - pub writer: OwnedWriteHalf, - /// Write buffer (protected since it might be accessed by publish response / event activity) - pub send_buffer: Arc>, -} - -/// This is the thing that handles input and output for the open connection associated with the -/// session. -pub struct TcpTransport { - /// Server state, address space etc. - server_state: Arc>, - /// Transport id (for debugging) - transport_id: NodeId, - /// Secure channel state - secure_channel: Arc>, - /// Address space - address_space: Arc>, - /// The current transport state - transport_state: TransportState, - /// Client address - client_address: Option, - /// Secure channel handler - secure_channel_service: SecureChannelService, - /// Message handler - message_handler: MessageHandler, - /// Client protocol version set during HELLO - client_protocol_version: u32, - /// Last decoded sequence number - last_received_sequence_number: u32, - /// A message may consist of one or more chunks which are stored here until complete. - pending_chunks: Vec, - /// Sessions associated with this connection. Normally there would be one, but potentially there could be more - session_manager: Arc>, -} - -impl Transport for TcpTransport { - fn state(&self) -> TransportState { - self.transport_state - } - - // Terminates the connection and the session - fn finish(&mut self, status_code: StatusCode) { - if !self.is_finished() { - debug!( - "Transport is being placed in finished state, code {}", - status_code - ); - self.transport_state = TransportState::Finished(status_code); - // Clear sessions - let mut session_manager = trace_write_lock!(self.session_manager); - session_manager.clear(self.address_space.clone()); - } else { - trace!("Transport is being placed in finished state when it is already finished, ignoring code {}", status_code); - } - } - - fn client_address(&self) -> Option { - self.client_address - } - - fn session_manager(&self) -> Arc> { - self.session_manager.clone() - } -} - -impl TcpTransport { - pub fn new( - certificate_store: Arc>, - server_state: Arc>, - address_space: Arc>, - session_manager: Arc>, - ) -> TcpTransport { - let decoding_options = { - let server_state = trace_read_lock!(server_state); - let config = trace_read_lock!(server_state.config); - config.decoding_options() - }; - let secure_channel = Arc::new(RwLock::new(SecureChannel::new( - certificate_store.clone(), - Role::Server, - decoding_options, - ))); - - let message_handler = MessageHandler::new( - secure_channel.clone(), - certificate_store, - server_state.clone(), - session_manager.clone(), - address_space.clone(), - ); - - let secure_channel_service = SecureChannelService::new(); - let transport_id = NodeId::next_numeric(0); - - TcpTransport { - server_state, - transport_id, - address_space, - transport_state: TransportState::New, - client_address: None, - message_handler, - secure_channel, - secure_channel_service, - client_protocol_version: 0, - last_received_sequence_number: 0, - pending_chunks: Vec::with_capacity(2), - session_manager, - } - } - - /// This is the entry point for the session. This function is asynchronous - it spawns tokio - /// tasks to handle the session execution loop so this function will returns immediately. - pub fn run(connection: Arc>, socket: TcpStream, looping_interval_ms: f64) { - info!( - "Socket info:\n Linger - {},\n TTL - {}", - if let Ok(v) = socket.linger() { - match v { - Some(d) => format!("{}ms", d.as_millis()), - None => "No linger".to_string(), - } - } else { - "No Linger (err)".to_string() - }, - if let Ok(v) = socket.ttl() { - format!("{}", v) - } else { - "No TTL".to_string() - } - ); - - // Store the address of the client - let (send_buffer_size, receive_buffer_size) = { - let mut connection = trace_write_lock!(connection); - connection.client_address = Some(socket.peer_addr().unwrap()); - connection.transport_state = TransportState::WaitingHello; - let server_state = trace_read_lock!(connection.server_state); - ( - server_state.send_buffer_size, - server_state.receive_buffer_size, - ) - }; - - // Spawn the tasks we need to run - tokio::spawn(Self::spawn_session_handler_task( - connection, - socket, - looping_interval_ms, - send_buffer_size, - receive_buffer_size, - )); - } - - async fn write_bytes_task(mut write_state: WriteState) -> WriteState { - let bytes_to_write = { - let mut send_buffer = trace_lock!(write_state.send_buffer); - send_buffer.bytes_to_write() - }; - let result = write_state.writer.write_all(&bytes_to_write).await; - if let Err(err) = result { - error!("Write IO error {:?}", err); - let mut transport = trace_write_lock!(write_state.transport); - transport.finish(StatusCode::BadCommunicationError); - } - write_state - } - - async fn spawn_session_handler_task( - transport: Arc>, - socket: TcpStream, - looping_interval_ms: f64, - send_buffer_size: usize, - receive_buffer_size: usize, - ) { - // The reader task will send responses, the writer task will receive responses - let (tx, rx) = unbounded_channel(); - let send_buffer = Arc::new(Mutex::new(MessageWriter::new(send_buffer_size, 0, 0))); - - let (reader, writer) = socket.into_split(); - let (hello_timeout, secure_channel) = { - let transport = trace_read_lock!(transport); - let server_state = trace_read_lock!(transport.server_state); - let server_config = trace_read_lock!(server_state.config); - info!( - "Session transport {} started at {}", - transport.transport_id, - Utc::now() - ); - ( - server_config.tcp_config.hello_timeout, - transport.secure_channel.clone(), - ) - }; - - let read_state = ReadState { - reader, - hello_timeout, - transport: transport.clone(), - sender: tx.clone(), - }; - - // Spawn all the tasks that monitor the session - the subscriptions, finished state, - // reading and writing. - let final_status = tokio::select! { - _ = Self::spawn_subscriptions_task(transport.clone(), tx.clone(), looping_interval_ms) => { - log::trace!("Closing connection because the subscription task failed"); - Ok(()) - } - status = Self::spawn_writing_loop_task(writer, rx, secure_channel, transport.clone(), send_buffer) => { - log::trace!("Closing connection after the write task ended"); - status - } - status = Self::spawn_reading_loop_task(read_state, send_buffer_size, receive_buffer_size) => { - log::trace!("Closing connection after the read task ended"); - status - } - }.err().unwrap_or(StatusCode::Good); - - log::info!("Closing connection with status {}", final_status); - // Both the read and write halves of the tcp stream are dropped at this point, - // and the connection is closed - let mut transport = trace_write_lock!(transport); - transport.finish(final_status); - } - - /// Spawns the writing loop task. The writing loop takes messages to send off of a queue - /// and sends them to the stream. - async fn spawn_writing_loop_task( - writer: OwnedWriteHalf, - mut receiver: UnboundedReceiver, - secure_channel: Arc>, - transport: Arc>, - send_buffer: Arc>, - ) -> Result<(), StatusCode> { - let mut write_state = WriteState { - transport: transport.clone(), - writer, - send_buffer, - secure_channel, - }; - - // The writing task waits for messages that are to be sent - while let Some(message) = receiver.recv().await { - trace!("Writing loop received message: {:?}", message); - let (request_id, response) = match message { - Message::Quit => { - debug!("Server writer received a quit so it will quit"); - return Ok(()); - } - Message::Message(request_id, response) => { - if let SupportedMessage::Invalid(_) = response { - error!("Writer terminating - received an invalid message"); - return Err(StatusCode::BadCommunicationError); - } - (request_id, response) - } - }; - - { - let secure_channel = trace_read_lock!(write_state.secure_channel); - let mut send_buffer = trace_lock!(write_state.send_buffer); - match response { - SupportedMessage::AcknowledgeMessage(ack) => { - send_buffer.write_ack(&ack)?; - } - msg => { - send_buffer.write(request_id, msg, &secure_channel)?; - } - } - } - write_state = Self::write_bytes_task(write_state).await; - } - Ok(()) - } - - async fn wait_for_hello( - reader: &mut FramedRead, - hello_timeout: u32, - ) -> Result { - let duration = Duration::from_secs(u64::from(hello_timeout)); - match timeout(duration, reader.next()).await { - // We process a timeout(stream_element(tcp_message)) - Err(_timeout) => { - warn!("Session has been waiting for a hello for more than the timeout period and will now close"); - Err(StatusCode::BadTimeout) - } - Ok(Some(Ok(tcp_codec::Message::Hello(hello)))) => Ok(hello), - Ok(Some(Ok(bad_msg))) => { - log::error!("Expected a hello message, got {:?} instead", bad_msg); - Err(StatusCode::BadCommunicationError) - } - Ok(Some(Err(communication_err))) => { - error!( - "Communication error while waiting for Hello message: {}", - communication_err - ); - Err(StatusCode::BadCommunicationError) - } - Ok(None) => Err(StatusCode::BadConnectionClosed), - } - } - - /// Spawns the reading loop where a reader task continuously reads messages, chunks from the - /// input and process them. The reading task will terminate upon error. - async fn spawn_reading_loop_task( - read_state: ReadState, - send_buffer_size: usize, - receive_buffer_size: usize, - ) -> Result<(), StatusCode> { - let (transport, mut sender) = { (read_state.transport.clone(), read_state.sender.clone()) }; - - let decoding_options = { - let transport = trace_read_lock!(transport); - let secure_channel = trace_read_lock!(transport.secure_channel); - secure_channel.decoding_options() - }; - - // The reader reads frames from the codec, which are messages - let mut framed_read = - FramedRead::new(read_state.reader, TcpCodec::new(decoding_options.clone())); - - let hello = Self::wait_for_hello(&mut framed_read, read_state.hello_timeout).await?; - trace_write_lock!(transport).process_hello( - hello, - &mut sender, - &decoding_options, - send_buffer_size, - receive_buffer_size, - )?; - - while let Some(next_msg) = framed_read.next().await { - match next_msg { - Ok(tcp_codec::Message::Chunk(chunk)) => { - log::trace!("Received message chunk: {:?}", chunk); - let mut transport = trace_write_lock!(transport); - transport.process_chunk(chunk, &mut sender)? - } - Ok(unexpected) => { - log::error!("Received unexpected message: {:?}", unexpected); - return Err(StatusCode::BadCommunicationError); - } - Err(err) => { - error!("Server reader error {:?}", err); - return Err(StatusCode::BadCommunicationError); - } - } - } - Ok(()) - } - - /// Start the subscription timer to service subscriptions - async fn spawn_subscriptions_task( - transport: Arc>, - sender: UnboundedSender, - looping_interval_ms: f64, - ) -> Result<(), StatusCode> { - // Subscription events are passed sent from the monitor task to the receiver - debug!("Starting subscription timer loop"); - - // Create the monitoring timer - this monitors for publish requests and ticks the subscriptions - let interval_duration = Duration::from_millis(looping_interval_ms as u64); - - // Creates a repeating interval future that checks subscriptions. - let mut timer = interval_at(Instant::now(), interval_duration); - - loop { - timer.tick().await; - - let transport = trace_read_lock!(transport); - let session_manager = trace_read_lock!(transport.session_manager); - - for (_node_id, session) in session_manager.sessions.iter() { - let mut session = trace_write_lock!(session); - let address_space = trace_read_lock!(transport.address_space); - let now = Utc::now(); - - // Request queue might contain stale publish requests - session.expire_stale_publish_requests(&now); - - // Process subscriptions - session.tick_subscriptions(&now, &address_space, TickReason::TickTimerFired)?; - - // Check if there are publish responses to send for transmission - if let Some(publish_responses) = - session.subscriptions_mut().take_publish_responses() - { - for publish_response in publish_responses { - trace!( - "<-- Sending a Publish Response{}, {:?}", - publish_response.request_id, - &publish_response.response - ); - // Messages will be sent by the writing task - sender - .send(Message::Message( - publish_response.request_id, - publish_response.response, - )) - .map_err(|e| { - error!("Unable to send publish response to writer task: {}", e); - StatusCode::BadUnexpectedError - })?; - } - } - } - } - } - - /// Test if the connection should abort - pub fn is_server_abort(&self) -> bool { - let server_state = trace_read_lock!(self.server_state); - server_state.is_abort() - } - - fn process_hello( - &mut self, - hello: HelloMessage, - sender: &mut UnboundedSender, - decoding_options: &DecodingOptions, - send_buffer_size: usize, - receive_buffer_size: usize, - ) -> std::result::Result<(), StatusCode> { - let server_protocol_version = 0; - let endpoints = { - let server_state = trace_read_lock!(self.server_state); - server_state.endpoints(&hello.endpoint_url, &None) - } - .unwrap(); - - trace!("Server received HELLO {:?}", hello); - if !hello.is_endpoint_url_valid(&endpoints) { - error!("HELLO endpoint url is invalid"); - return Err(StatusCode::BadTcpEndpointUrlInvalid); - } - if !hello.is_valid_buffer_sizes() { - error!("HELLO buffer sizes are invalid"); - return Err(StatusCode::BadCommunicationError); - } - - // Validate protocol version - if hello.protocol_version > server_protocol_version { - return Err(StatusCode::BadProtocolVersionUnsupported); - } - - let client_protocol_version = hello.protocol_version; - - // Send acknowledge - let mut acknowledge = AcknowledgeMessage { - message_header: MessageHeader::new(MessageType::Acknowledge), - protocol_version: server_protocol_version, - receive_buffer_size: receive_buffer_size as u32, - send_buffer_size: send_buffer_size as u32, - max_message_size: decoding_options.max_message_size as u32, - max_chunk_count: decoding_options.max_chunk_count as u32, - }; - acknowledge.message_header.message_size = acknowledge.byte_len() as u32; - let acknowledge: SupportedMessage = acknowledge.into(); - - // New state - self.transport_state = TransportState::ProcessMessages; - self.client_protocol_version = client_protocol_version; - - debug!("Sending ACK"); - let _ = sender.send(Message::Message(0, acknowledge)); - Ok(()) - } - - fn turn_received_chunks_into_message( - &mut self, - chunks: &[MessageChunk], - ) -> std::result::Result { - // Validate that all chunks have incrementing sequence numbers and valid chunk types - let secure_channel = trace_read_lock!(self.secure_channel); - self.last_received_sequence_number = Chunker::validate_chunks( - self.last_received_sequence_number + 1, - &secure_channel, - chunks, - )?; - // Now decode - Chunker::decode(chunks, &secure_channel, None) - } - - fn process_chunk( - &mut self, - chunk: MessageChunk, - sender: &mut UnboundedSender, - ) -> std::result::Result<(), StatusCode> { - let decoding_options = { - let secure_channel = trace_read_lock!(self.secure_channel); - secure_channel.decoding_options() - }; - - let message_header = chunk.message_header(&decoding_options)?; - - if message_header.is_final == MessageIsFinalType::FinalError { - info!("Discarding chunks as after receiving one marked as final error"); - self.pending_chunks.clear(); - Ok(()) - } else { - // Decrypt / verify chunk if necessary - let chunk = { - let mut secure_channel = trace_write_lock!(self.secure_channel); - secure_channel.verify_and_remove_security(&chunk.data)? - }; - - // TODO check how many chunks are pending, produce error and drop connection if it exceeds - // supported chunk limit - - // Put the chunk on the list - self.pending_chunks.push(chunk); - - // The final chunk will trigger turning all pending chunks into a request - if message_header.is_final == MessageIsFinalType::Final { - self.process_final_chunk(&message_header, sender) - } else { - Ok(()) - } - } - } - - fn process_final_chunk( - &mut self, - message_header: &MessageChunkHeader, - sender: &mut UnboundedSender, - ) -> Result<(), StatusCode> { - // Drain pending chunks and turn them into a message - let chunks: Vec = self.pending_chunks.drain(..).collect(); - let chunk_info = { - let secure_channel = trace_read_lock!(self.secure_channel); - chunks[0].chunk_info(&secure_channel)? - }; - - // Handle the request, and then send the response back to the caller - let request = self.turn_received_chunks_into_message(&chunks)?; - let request_id = chunk_info.sequence_header.request_id; - - let sender = MessageSender { - sender: sender.clone(), - }; - - match message_header.message_type { - MessageChunkType::OpenSecureChannel => self.process_open_secure_channel( - request_id, - &request, - &chunk_info.security_header, - &sender, - ), - MessageChunkType::CloseSecureChannel => { - self.process_close_secure_channel(request_id, &request, &sender) - } - MessageChunkType::Message => self.process_message(request_id, &request, &sender), - } - } - - fn process_open_secure_channel( - &mut self, - request_id: u32, - request: &SupportedMessage, - security_header: &SecurityHeader, - sender: &MessageSender, - ) -> Result<(), StatusCode> { - let mut secure_channel = trace_write_lock!(self.secure_channel); - let response = self.secure_channel_service.open_secure_channel( - &mut secure_channel, - security_header, - self.client_protocol_version, - request, - )?; - let _ = sender.send_message(request_id, response); - Ok(()) - } - - fn process_close_secure_channel( - &mut self, - request_id: u32, - request: &SupportedMessage, - sender: &MessageSender, - ) -> Result<(), StatusCode> { - let response = self.secure_channel_service.close_secure_channel(request)?; - let _ = sender.send_message(request_id, response); - Ok(()) - } - - fn process_message( - &mut self, - request_id: u32, - request: &SupportedMessage, - sender: &MessageSender, - ) -> Result<(), StatusCode> { - let _ = self - .message_handler - .handle_message(request_id, request, sender)?; - Ok(()) - } -} diff --git a/lib/src/server/comms/transport.rs b/lib/src/server/comms/transport.rs deleted file mode 100644 index 540bbca3f..000000000 --- a/lib/src/server/comms/transport.rs +++ /dev/null @@ -1,46 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Defines the traits and other agnostic properties that all OPC UA transports will share. -//! Provides a level of abstraction for the server to call through when it doesn't require specific -//! knowledge of the transport it is using. - -use std::{net::SocketAddr, sync::Arc}; - -use crate::sync::*; -use crate::types::status_code::StatusCode; - -use crate::server::session::SessionManager; - -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum TransportState { - New, - WaitingHello, - ProcessMessages, - Finished(StatusCode), -} - -/// Represents a transport layer, the thing responsible for maintaining an open channel and transferring -/// data between the server and the client. -pub trait Transport { - // Get the current state of the transport - fn state(&self) -> TransportState; - // Test if the transport has received its HELLO - fn has_received_hello(&self) -> bool { - !matches!( - self.state(), - TransportState::New | TransportState::WaitingHello - ) - } - /// Terminate the session and put the connection in a finished state - fn finish(&mut self, status_code: StatusCode); - // Test if the transport is finished - fn is_finished(&self) -> bool { - matches!(self.state(), TransportState::Finished(_)) - } - /// Returns the address of the client (peer) of this connection - fn client_address(&self) -> Option; - /// Returns the session map for the connection - fn session_manager(&self) -> Arc>; -} diff --git a/lib/src/server/config.rs b/lib/src/server/config.rs deleted file mode 100644 index 989f463fc..000000000 --- a/lib/src/server/config.rs +++ /dev/null @@ -1,794 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Provides configuration settings for the server including serialization and deserialization from file. -use std::{ - collections::{BTreeMap, BTreeSet}, - path::{Path, PathBuf}, - str::FromStr, -}; - -use crate::{ - core::{comms::url::url_matches_except_host, config::Config}, - crypto::{CertificateStore, SecurityPolicy, Thumbprint}, - types::{service_types::ApplicationType, DecodingOptions, MessageSecurityMode, UAString}, -}; - -use super::constants; - -pub const ANONYMOUS_USER_TOKEN_ID: &str = "ANONYMOUS"; - -const RECEIVE_BUFFER_SIZE: usize = u16::MAX as usize; -const SEND_BUFFER_SIZE: usize = u16::MAX as usize; - -#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] -pub struct TcpConfig { - /// Timeout for hello on a session in seconds - pub hello_timeout: u32, - /// The hostname to supply in the endpoints - pub host: String, - /// The port number of the service - pub port: u16, -} - -#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] -pub struct ServerUserToken { - /// User name - pub user: String, - /// Password - #[serde(skip_serializing_if = "Option::is_none")] - pub pass: Option, - // X509 file path (as a string) - #[serde(skip_serializing_if = "Option::is_none")] - pub x509: Option, - #[serde(skip)] - pub thumbprint: Option, -} - -impl ServerUserToken { - /// Create a user pass token - pub fn user_pass(user: T, pass: T) -> Self - where - T: Into, - { - ServerUserToken { - user: user.into(), - pass: Some(pass.into()), - x509: None, - thumbprint: None, - } - } - - /// Create an X509 token. - pub fn x509(user: T, cert_path: &Path) -> Self - where - T: Into, - { - ServerUserToken { - user: user.into(), - pass: None, - x509: Some(cert_path.to_string_lossy().to_string()), - thumbprint: None, - } - } - - /// Read an X509 user token's certificate from disk and then hold onto the thumbprint for it. - pub fn read_thumbprint(&mut self) { - if self.is_x509() && self.thumbprint.is_none() { - // As part of validation, we're going to try and load the x509 certificate from disk, and - // obtain its thumbprint. This will be used when a session is activated. - if let Some(ref x509_path) = self.x509 { - let path = PathBuf::from(x509_path); - if let Ok(x509) = CertificateStore::read_cert(&path) { - self.thumbprint = Some(x509.thumbprint()); - } - } - } - } - - /// Test if the token is valid. This does not care for x509 tokens if the cert is present on - /// the disk or not. - pub fn is_valid(&self, id: &str) -> bool { - let mut valid = true; - if id == ANONYMOUS_USER_TOKEN_ID { - error!( - "User token {} is invalid because id is a reserved value, use another value.", - id - ); - valid = false; - } - if self.user.is_empty() { - error!("User token {} has an empty user name.", id); - valid = false; - } - if self.pass.is_some() && self.x509.is_some() { - error!( - "User token {} holds a password and certificate info - it cannot be both.", - id - ); - valid = false; - } else if self.pass.is_none() && self.x509.is_none() { - error!( - "User token {} fails to provide a password or certificate info.", - id - ); - valid = false; - } - valid - } - - pub fn is_user_pass(&self) -> bool { - self.x509.is_none() - } - - pub fn is_x509(&self) -> bool { - self.x509.is_some() - } -} - -#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] -pub struct Limits { - /// Indicates if clients are able to modify the address space through the node management service - /// set. This is a very broad flag and is likely to require more fine grained per user control - /// in a later revision. By default, this value is `false` - pub clients_can_modify_address_space: bool, - /// Maximum number of subscriptions in a session, 0 for no limit - pub max_subscriptions: usize, - /// Maximum number of monitored items per subscription, 0 for no limit - pub max_monitored_items_per_sub: usize, - /// Maximum number of values in a monitored item queue - pub max_monitored_item_queue_size: usize, - /// Max array length in elements - pub max_array_length: usize, - /// Max string length in characters - pub max_string_length: usize, - /// Max bytestring length in bytes - pub max_byte_string_length: usize, - /// Specifies the minimum sampling interval for this server in seconds. - pub min_sampling_interval: f64, - /// Specifies the minimum publishing interval for this server in seconds. - pub min_publishing_interval: f64, - /// Maximum message length in bytes - pub max_message_size: usize, - /// Maximum chunk count - pub max_chunk_count: usize, - /// Send buffer size in bytes - pub send_buffer_size: usize, - /// Receive buffer size in bytes - pub receive_buffer_size: usize, -} - -impl Default for Limits { - fn default() -> Self { - let decoding_options = DecodingOptions::default(); - Self { - max_array_length: decoding_options.max_array_length, - max_string_length: decoding_options.max_string_length, - max_byte_string_length: decoding_options.max_byte_string_length, - max_subscriptions: constants::DEFAULT_MAX_SUBSCRIPTIONS, - max_monitored_items_per_sub: constants::DEFAULT_MAX_MONITORED_ITEMS_PER_SUB, - max_monitored_item_queue_size: constants::MAX_DATA_CHANGE_QUEUE_SIZE, - max_message_size: decoding_options.max_message_size, - max_chunk_count: decoding_options.max_chunk_count, - clients_can_modify_address_space: false, - min_sampling_interval: constants::MIN_SAMPLING_INTERVAL, - min_publishing_interval: constants::MIN_PUBLISHING_INTERVAL, - send_buffer_size: SEND_BUFFER_SIZE, - receive_buffer_size: RECEIVE_BUFFER_SIZE, - } - } -} - -#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] -pub struct CertificateValidation { - /// Auto trusts client certificates. For testing/samples only unless you're sure what you're - /// doing. - pub trust_client_certs: bool, - /// Check the valid from/to fields of a certificate - pub check_time: bool, -} - -impl Default for CertificateValidation { - fn default() -> Self { - Self { - trust_client_certs: false, - check_time: true, - } - } -} - -#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] -pub struct ServerEndpoint { - /// Endpoint path - pub path: String, - /// Security policy - pub security_policy: String, - /// Security mode - pub security_mode: String, - /// Security level, higher being more secure - pub security_level: u8, - /// Password security policy when a client supplies a user name identity token - pub password_security_policy: Option, - /// User tokens - pub user_token_ids: BTreeSet, -} - -/// Convenience method to make an endpoint from a tuple -impl<'a> From<(&'a str, SecurityPolicy, MessageSecurityMode, &'a [&'a str])> for ServerEndpoint { - fn from(v: (&'a str, SecurityPolicy, MessageSecurityMode, &'a [&'a str])) -> ServerEndpoint { - ServerEndpoint { - path: v.0.into(), - security_policy: v.1.to_string(), - security_mode: v.2.to_string(), - security_level: Self::security_level(v.1, v.2), - password_security_policy: None, - user_token_ids: v.3.iter().map(|id| id.to_string()).collect(), - } - } -} - -impl ServerEndpoint { - pub fn new( - path: T, - security_policy: SecurityPolicy, - security_mode: MessageSecurityMode, - user_token_ids: &[String], - ) -> Self - where - T: Into, - { - ServerEndpoint { - path: path.into(), - security_policy: security_policy.to_string(), - security_mode: security_mode.to_string(), - security_level: Self::security_level(security_policy, security_mode), - password_security_policy: None, - user_token_ids: user_token_ids.iter().cloned().collect(), - } - } - - /// Recommends a security level for the supplied security policy - fn security_level(security_policy: SecurityPolicy, security_mode: MessageSecurityMode) -> u8 { - let security_level = match security_policy { - SecurityPolicy::Basic128Rsa15 => 1, - SecurityPolicy::Aes128Sha256RsaOaep => 2, - SecurityPolicy::Basic256 => 3, - SecurityPolicy::Basic256Sha256 => 4, - SecurityPolicy::Aes256Sha256RsaPss => 5, - _ => 0, - }; - if security_mode == MessageSecurityMode::SignAndEncrypt { - security_level + 10 - } else { - security_level - } - } - - pub fn new_none(path: T, user_token_ids: &[String]) -> Self - where - T: Into, - { - Self::new( - path, - SecurityPolicy::None, - MessageSecurityMode::None, - user_token_ids, - ) - } - - pub fn new_basic128rsa15_sign(path: T, user_token_ids: &[String]) -> Self - where - T: Into, - { - Self::new( - path, - SecurityPolicy::Basic128Rsa15, - MessageSecurityMode::Sign, - user_token_ids, - ) - } - - pub fn new_basic128rsa15_sign_encrypt(path: T, user_token_ids: &[String]) -> Self - where - T: Into, - { - Self::new( - path, - SecurityPolicy::Basic128Rsa15, - MessageSecurityMode::SignAndEncrypt, - user_token_ids, - ) - } - - pub fn new_basic256_sign(path: T, user_token_ids: &[String]) -> Self - where - T: Into, - { - Self::new( - path, - SecurityPolicy::Basic256, - MessageSecurityMode::Sign, - user_token_ids, - ) - } - - pub fn new_basic256_sign_encrypt(path: T, user_token_ids: &[String]) -> Self - where - T: Into, - { - Self::new( - path, - SecurityPolicy::Basic256, - MessageSecurityMode::SignAndEncrypt, - user_token_ids, - ) - } - - pub fn new_basic256sha256_sign(path: T, user_token_ids: &[String]) -> Self - where - T: Into, - { - Self::new( - path, - SecurityPolicy::Basic256Sha256, - MessageSecurityMode::Sign, - user_token_ids, - ) - } - - pub fn new_basic256sha256_sign_encrypt(path: T, user_token_ids: &[String]) -> Self - where - T: Into, - { - Self::new( - path, - SecurityPolicy::Basic256Sha256, - MessageSecurityMode::SignAndEncrypt, - user_token_ids, - ) - } - - pub fn new_aes128_sha256_rsaoaep_sign(path: T, user_token_ids: &[String]) -> Self - where - T: Into, - { - Self::new( - path, - SecurityPolicy::Aes128Sha256RsaOaep, - MessageSecurityMode::Sign, - user_token_ids, - ) - } - - pub fn new_aes128_sha256_rsaoaep_sign_encrypt(path: T, user_token_ids: &[String]) -> Self - where - T: Into, - { - Self::new( - path, - SecurityPolicy::Aes128Sha256RsaOaep, - MessageSecurityMode::SignAndEncrypt, - user_token_ids, - ) - } - - pub fn new_aes256_sha256_rsapss_sign(path: T, user_token_ids: &[String]) -> Self - where - T: Into, - { - Self::new( - path, - SecurityPolicy::Aes256Sha256RsaPss, - MessageSecurityMode::Sign, - user_token_ids, - ) - } - - pub fn new_aes256_sha256_rsapss_sign_encrypt(path: T, user_token_ids: &[String]) -> Self - where - T: Into, - { - Self::new( - path, - SecurityPolicy::Aes256Sha256RsaPss, - MessageSecurityMode::SignAndEncrypt, - user_token_ids, - ) - } - - pub fn is_valid(&self, id: &str, user_tokens: &BTreeMap) -> bool { - let mut valid = true; - - // Validate that the user token ids exist - for id in &self.user_token_ids { - // Skip anonymous - if id == ANONYMOUS_USER_TOKEN_ID { - continue; - } - if !user_tokens.contains_key(id) { - error!("Cannot find user token with id {}", id); - valid = false; - } - } - - if let Some(ref password_security_policy) = self.password_security_policy { - let password_security_policy = - SecurityPolicy::from_str(password_security_policy).unwrap(); - if password_security_policy == SecurityPolicy::Unknown { - error!("Endpoint {} is invalid. Password security policy \"{}\" is invalid. Valid values are None, Basic128Rsa15, Basic256, Basic256Sha256", id, password_security_policy); - valid = false; - } - } - - // Validate the security policy and mode - let security_policy = SecurityPolicy::from_str(&self.security_policy).unwrap(); - let security_mode = MessageSecurityMode::from(self.security_mode.as_ref()); - if security_policy == SecurityPolicy::Unknown { - error!("Endpoint {} is invalid. Security policy \"{}\" is invalid. Valid values are None, Basic128Rsa15, Basic256, Basic256Sha256, Aes128Sha256RsaOaep, Aes256Sha256RsaPss,", id, self.security_policy); - valid = false; - } else if security_mode == MessageSecurityMode::Invalid { - error!("Endpoint {} is invalid. Security mode \"{}\" is invalid. Valid values are None, Sign, SignAndEncrypt", id, self.security_mode); - valid = false; - } else if (security_policy == SecurityPolicy::None - && security_mode != MessageSecurityMode::None) - || (security_policy != SecurityPolicy::None - && security_mode == MessageSecurityMode::None) - { - error!("Endpoint {} is invalid. Security policy and security mode must both contain None or neither of them should (1).", id); - valid = false; - } else if security_policy != SecurityPolicy::None - && security_mode == MessageSecurityMode::None - { - error!("Endpoint {} is invalid. Security policy and security mode must both contain None or neither of them should (2).", id); - valid = false; - } - valid - } - - pub fn security_policy(&self) -> SecurityPolicy { - SecurityPolicy::from_str(&self.security_policy).unwrap() - } - - pub fn message_security_mode(&self) -> MessageSecurityMode { - MessageSecurityMode::from(self.security_mode.as_ref()) - } - - pub fn endpoint_url(&self, base_endpoint: &str) -> String { - format!("{}{}", base_endpoint, self.path) - } - - /// Returns the effective password security policy for the endpoint. This is the explicitly set password - /// security policy, or just the regular security policy. - pub fn password_security_policy(&self) -> SecurityPolicy { - let mut password_security_policy = self.security_policy(); - if let Some(ref security_policy) = self.password_security_policy { - match SecurityPolicy::from_str(security_policy).unwrap() { - SecurityPolicy::Unknown => { - panic!( - "Password security policy {} is unrecognized", - security_policy - ); - } - security_policy => { - password_security_policy = security_policy; - } - } - } - password_security_policy - } - - /// Test if the endpoint supports anonymous users - pub fn supports_anonymous(&self) -> bool { - self.supports_user_token_id(ANONYMOUS_USER_TOKEN_ID) - } - - /// Tests if this endpoint supports user pass tokens. It does this by looking to see - /// if any of the users allowed to access this endpoint are user pass users. - pub fn supports_user_pass(&self, server_tokens: &BTreeMap) -> bool { - for user_token_id in &self.user_token_ids { - if user_token_id != ANONYMOUS_USER_TOKEN_ID { - if let Some(user_token) = server_tokens.get(user_token_id) { - if user_token.is_user_pass() { - return true; - } - } - } - } - false - } - - /// Tests if this endpoint supports x509 tokens. It does this by looking to see - /// if any of the users allowed to access this endpoint are x509 users. - pub fn supports_x509(&self, server_tokens: &BTreeMap) -> bool { - for user_token_id in &self.user_token_ids { - if user_token_id != ANONYMOUS_USER_TOKEN_ID { - if let Some(user_token) = server_tokens.get(user_token_id) { - if user_token.is_x509() { - return true; - } - } - } - } - false - } - - pub fn supports_user_token_id(&self, id: &str) -> bool { - self.user_token_ids.contains(id) - } -} - -#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] -pub struct Performance { - /// Use a single-threaded executor. The default executor uses a thread pool with a worker - /// thread for each CPU core available on the system. - pub single_threaded_executor: bool, -} - -#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] -pub struct ServerConfig { - /// An id for this server - pub application_name: String, - /// A description for this server - pub application_uri: String, - /// Product url - pub product_uri: String, - /// Autocreates public / private keypair if they don't exist. For testing/samples only - /// since you do not have control of the values - pub create_sample_keypair: bool, - /// Path to a custom certificate, to be used instead of the default .der certificate - pub certificate_path: Option, - /// Path to a custom private key, to be used instead of the default private key - pub private_key_path: Option, - /// Checks the certificate's time validity - pub certificate_validation: CertificateValidation, - /// PKI folder, either absolute or relative to executable - pub pki_dir: PathBuf, - /// Url to a discovery server - adding this string causes the server to assume you wish to - /// register the server with a discovery server. - pub discovery_server_url: Option, - /// tcp configuration information - pub tcp_config: TcpConfig, - /// Server OPA UA limits - pub limits: Limits, - /// Server Performance - pub performance: Performance, - /// Supported locale ids - pub locale_ids: Vec, - /// User tokens - pub user_tokens: BTreeMap, - /// discovery endpoint url which may or may not be the same as the service endpoints below. - pub discovery_urls: Vec, - /// Default endpoint id - pub default_endpoint: Option, - /// Endpoints supported by the server - pub endpoints: BTreeMap, -} - -impl Config for ServerConfig { - fn is_valid(&self) -> bool { - let mut valid = true; - if self.application_name.is_empty() { - warn!("No application was set"); - } - if self.application_uri.is_empty() { - warn!("No application uri was set"); - } - if self.product_uri.is_empty() { - warn!("No product uri was set"); - } - if self.endpoints.is_empty() { - error!("Server configuration is invalid. It defines no endpoints"); - valid = false; - } - for (id, endpoint) in &self.endpoints { - if !endpoint.is_valid(id, &self.user_tokens) { - valid = false; - } - } - if let Some(ref default_endpoint) = self.default_endpoint { - if !self.endpoints.contains_key(default_endpoint) { - valid = false; - } - } - for (id, user_token) in &self.user_tokens { - if !user_token.is_valid(id) { - valid = false; - } - } - if self.limits.max_array_length == 0 { - error!("Server configuration is invalid. Max array length is invalid"); - valid = false; - } - if self.limits.max_string_length == 0 { - error!("Server configuration is invalid. Max string length is invalid"); - valid = false; - } - if self.limits.max_byte_string_length == 0 { - error!("Server configuration is invalid. Max byte string length is invalid"); - valid = false; - } - if self.discovery_urls.is_empty() { - error!("Server configuration is invalid. Discovery urls not set"); - valid = false; - } - valid - } - - fn application_name(&self) -> UAString { - UAString::from(&self.application_name) - } - - fn application_uri(&self) -> UAString { - UAString::from(&self.application_uri) - } - - fn product_uri(&self) -> UAString { - UAString::from(&self.product_uri) - } - - fn application_type(&self) -> ApplicationType { - ApplicationType::Server - } - - fn discovery_urls(&self) -> Option> { - let discovery_urls: Vec = - self.discovery_urls.iter().map(UAString::from).collect(); - Some(discovery_urls) - } -} - -impl Default for ServerConfig { - fn default() -> Self { - let mut pki_dir = std::env::current_dir().unwrap(); - pki_dir.push(Self::PKI_DIR); - - ServerConfig { - application_name: String::new(), - application_uri: String::new(), - product_uri: String::new(), - create_sample_keypair: false, - certificate_path: None, - private_key_path: None, - pki_dir, - certificate_validation: CertificateValidation::default(), - discovery_server_url: None, - tcp_config: TcpConfig { - host: "127.0.0.1".to_string(), - port: constants::DEFAULT_RUST_OPC_UA_SERVER_PORT, - hello_timeout: constants::DEFAULT_HELLO_TIMEOUT_SECONDS, - }, - limits: Limits::default(), - user_tokens: BTreeMap::new(), - locale_ids: vec!["en".to_string()], - discovery_urls: Vec::new(), - default_endpoint: None, - endpoints: BTreeMap::new(), - performance: Performance { - single_threaded_executor: false, - }, - } - } -} - -impl ServerConfig { - /// The default PKI directory - pub const PKI_DIR: &'static str = "pki"; - - pub fn new( - application_name: T, - user_tokens: BTreeMap, - endpoints: BTreeMap, - ) -> Self - where - T: Into, - { - let host = "127.0.0.1".to_string(); - let port = constants::DEFAULT_RUST_OPC_UA_SERVER_PORT; - - let application_name = application_name.into(); - let application_uri = format!("urn:{}", application_name); - let product_uri = format!("urn:{}", application_name); - let discovery_server_url = Some(constants::DEFAULT_DISCOVERY_SERVER_URL.to_string()); - let discovery_urls = vec![format!("opc.tcp://{}:{}/", host, port)]; - let locale_ids = vec!["en".to_string()]; - - let mut pki_dir = std::env::current_dir().unwrap(); - pki_dir.push(Self::PKI_DIR); - - ServerConfig { - application_name, - application_uri, - product_uri, - create_sample_keypair: false, - certificate_path: None, - private_key_path: None, - certificate_validation: CertificateValidation { - trust_client_certs: false, - check_time: true, - }, - pki_dir, - discovery_server_url, - tcp_config: TcpConfig { - host, - port, - hello_timeout: constants::DEFAULT_HELLO_TIMEOUT_SECONDS, - }, - limits: Limits::default(), - locale_ids, - user_tokens, - discovery_urls, - default_endpoint: None, - endpoints, - performance: Performance { - single_threaded_executor: false, - }, - } - } - - pub fn decoding_options(&self) -> DecodingOptions { - DecodingOptions { - client_offset: chrono::Duration::zero(), - max_message_size: self.limits.max_message_size, - max_chunk_count: self.limits.max_chunk_count, - max_string_length: self.limits.max_string_length, - max_byte_string_length: self.limits.max_byte_string_length, - max_array_length: self.limits.max_array_length, - ..Default::default() - } - } - - pub fn add_endpoint(&mut self, id: &str, endpoint: ServerEndpoint) { - self.endpoints.insert(id.to_string(), endpoint); - } - - pub fn read_x509_thumbprints(&mut self) { - self.user_tokens - .iter_mut() - .for_each(|(_, token)| token.read_thumbprint()); - } - - /// Returns a opc.tcp://server:port url that paths can be appended onto - pub fn base_endpoint_url(&self) -> String { - format!( - "opc.tcp://{}:{}", - self.tcp_config.host, self.tcp_config.port - ) - } - - /// Find the default endpoint - pub fn default_endpoint(&self) -> Option<&ServerEndpoint> { - if let Some(ref default_endpoint) = self.default_endpoint { - self.endpoints.get(default_endpoint) - } else { - None - } - } - - /// Find the first endpoint that matches the specified url, security policy and message - /// security mode. - pub fn find_endpoint( - &self, - endpoint_url: &str, - security_policy: SecurityPolicy, - security_mode: MessageSecurityMode, - ) -> Option<&ServerEndpoint> { - let base_endpoint_url = self.base_endpoint_url(); - let endpoint = self.endpoints.iter().find(|&(_, e)| { - // Test end point's security_policy_uri and matching url - if url_matches_except_host(&e.endpoint_url(&base_endpoint_url), endpoint_url) { - if e.security_policy() == security_policy - && e.message_security_mode() == security_mode - { - trace!("Found matching endpoint for url {} - {:?}", endpoint_url, e); - true - } else { - false - } - } else { - false - } - }); - endpoint.map(|endpoint| endpoint.1) - } -} diff --git a/lib/src/server/config/capabilities.rs b/lib/src/server/config/capabilities.rs new file mode 100644 index 000000000..4a2246936 --- /dev/null +++ b/lib/src/server/config/capabilities.rs @@ -0,0 +1,32 @@ +use crate::types::NodeId; + +#[derive(Debug, Clone, Default)] +/// History capabilities. +/// As all history is implemented by custom node managers, +/// this should be set according to what your node managers support. +pub struct HistoryServerCapabilities { + pub access_history_data: bool, + pub access_history_events: bool, + pub delete_at_time: bool, + pub delete_event: bool, + pub delete_raw: bool, + pub insert_annotation: bool, + pub insert_data: bool, + pub insert_event: bool, + pub max_return_data_values: u32, + pub max_return_event_values: u32, + pub replace_data: bool, + pub replace_event: bool, + pub server_timestamp_supported: bool, + pub update_data: bool, + pub update_event: bool, + /// Supported history aggregates + pub aggregates: Vec, +} + +#[derive(Debug, Clone, Default)] +/// Server capabilities object. +pub struct ServerCapabilities { + pub history: HistoryServerCapabilities, + pub profiles: Vec, +} diff --git a/lib/src/server/config/endpoint.rs b/lib/src/server/config/endpoint.rs new file mode 100644 index 000000000..621510614 --- /dev/null +++ b/lib/src/server/config/endpoint.rs @@ -0,0 +1,348 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + str::FromStr, +}; + +use crate::{crypto::SecurityPolicy, types::MessageSecurityMode}; + +use super::server::{ServerUserToken, ANONYMOUS_USER_TOKEN_ID}; + +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] +pub struct ServerEndpoint { + /// Endpoint path + pub path: String, + /// Security policy + pub security_policy: String, + /// Security mode + pub security_mode: String, + /// Security level, higher being more secure + pub security_level: u8, + /// Password security policy when a client supplies a user name identity token + pub password_security_policy: Option, + /// User tokens + pub user_token_ids: BTreeSet, +} + +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Hash, Eq)] +pub struct EndpointIdentifier { + /// Endpoint path + pub path: String, + /// Security policy + pub security_policy: String, + /// Security mode + pub security_mode: String, +} + +impl From<&ServerEndpoint> for EndpointIdentifier { + fn from(value: &ServerEndpoint) -> Self { + Self { + path: value.path.clone(), + security_policy: value.security_policy.clone(), + security_mode: value.security_mode.clone(), + } + } +} + +/// Convenience method to make an endpoint from a tuple +impl<'a> From<(&'a str, SecurityPolicy, MessageSecurityMode, &'a [&'a str])> for ServerEndpoint { + fn from(v: (&'a str, SecurityPolicy, MessageSecurityMode, &'a [&'a str])) -> ServerEndpoint { + ServerEndpoint { + path: v.0.into(), + security_policy: v.1.to_string(), + security_mode: v.2.to_string(), + security_level: Self::security_level(v.1, v.2), + password_security_policy: None, + user_token_ids: v.3.iter().map(|id| id.to_string()).collect(), + } + } +} + +impl ServerEndpoint { + pub fn new( + path: T, + security_policy: SecurityPolicy, + security_mode: MessageSecurityMode, + user_token_ids: &[String], + ) -> Self + where + T: Into, + { + ServerEndpoint { + path: path.into(), + security_policy: security_policy.to_string(), + security_mode: security_mode.to_string(), + security_level: Self::security_level(security_policy, security_mode), + password_security_policy: None, + user_token_ids: user_token_ids.iter().cloned().collect(), + } + } + + /// Recommends a security level for the supplied security policy + fn security_level(security_policy: SecurityPolicy, security_mode: MessageSecurityMode) -> u8 { + let security_level = match security_policy { + SecurityPolicy::Basic128Rsa15 => 1, + SecurityPolicy::Aes128Sha256RsaOaep => 2, + SecurityPolicy::Basic256 => 3, + SecurityPolicy::Basic256Sha256 => 4, + SecurityPolicy::Aes256Sha256RsaPss => 5, + _ => 0, + }; + if security_mode == MessageSecurityMode::SignAndEncrypt { + security_level + 10 + } else { + security_level + } + } + + pub fn new_none(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::None, + MessageSecurityMode::None, + user_token_ids, + ) + } + + pub fn new_basic128rsa15_sign(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::Sign, + user_token_ids, + ) + } + + pub fn new_basic128rsa15_sign_encrypt(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::SignAndEncrypt, + user_token_ids, + ) + } + + pub fn new_basic256_sign(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic256, + MessageSecurityMode::Sign, + user_token_ids, + ) + } + + pub fn new_basic256_sign_encrypt(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic256, + MessageSecurityMode::SignAndEncrypt, + user_token_ids, + ) + } + + pub fn new_basic256sha256_sign(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::Sign, + user_token_ids, + ) + } + + pub fn new_basic256sha256_sign_encrypt(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::SignAndEncrypt, + user_token_ids, + ) + } + + pub fn new_aes128_sha256_rsaoaep_sign(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::Sign, + user_token_ids, + ) + } + + pub fn new_aes128_sha256_rsaoaep_sign_encrypt(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::SignAndEncrypt, + user_token_ids, + ) + } + + pub fn new_aes256_sha256_rsapss_sign(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::Sign, + user_token_ids, + ) + } + + pub fn new_aes256_sha256_rsapss_sign_encrypt(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::SignAndEncrypt, + user_token_ids, + ) + } + + pub fn is_valid(&self, id: &str, user_tokens: &BTreeMap) -> bool { + let mut valid = true; + + // Validate that the user token ids exist + for id in &self.user_token_ids { + // Skip anonymous + if id == ANONYMOUS_USER_TOKEN_ID { + continue; + } + if !user_tokens.contains_key(id) { + error!("Cannot find user token with id {}", id); + valid = false; + } + } + + if let Some(ref password_security_policy) = self.password_security_policy { + let password_security_policy = + SecurityPolicy::from_str(password_security_policy).unwrap(); + if password_security_policy == SecurityPolicy::Unknown { + error!("Endpoint {} is invalid. Password security policy \"{}\" is invalid. Valid values are None, Basic128Rsa15, Basic256, Basic256Sha256", id, password_security_policy); + valid = false; + } + } + + // Validate the security policy and mode + let security_policy = SecurityPolicy::from_str(&self.security_policy).unwrap(); + let security_mode = MessageSecurityMode::from(self.security_mode.as_ref()); + if security_policy == SecurityPolicy::Unknown { + error!("Endpoint {} is invalid. Security policy \"{}\" is invalid. Valid values are None, Basic128Rsa15, Basic256, Basic256Sha256, Aes128Sha256RsaOaep, Aes256Sha256RsaPss,", id, self.security_policy); + valid = false; + } else if security_mode == MessageSecurityMode::Invalid { + error!("Endpoint {} is invalid. Security mode \"{}\" is invalid. Valid values are None, Sign, SignAndEncrypt", id, self.security_mode); + valid = false; + } else if (security_policy == SecurityPolicy::None + && security_mode != MessageSecurityMode::None) + || (security_policy != SecurityPolicy::None + && security_mode == MessageSecurityMode::None) + { + error!("Endpoint {} is invalid. Security policy and security mode must both contain None or neither of them should (1).", id); + valid = false; + } else if security_policy != SecurityPolicy::None + && security_mode == MessageSecurityMode::None + { + error!("Endpoint {} is invalid. Security policy and security mode must both contain None or neither of them should (2).", id); + valid = false; + } + valid + } + + pub fn security_policy(&self) -> SecurityPolicy { + SecurityPolicy::from_str(&self.security_policy).unwrap() + } + + pub fn message_security_mode(&self) -> MessageSecurityMode { + MessageSecurityMode::from(self.security_mode.as_ref()) + } + + pub fn endpoint_url(&self, base_endpoint: &str) -> String { + format!("{}{}", base_endpoint, self.path) + } + + /// Returns the effective password security policy for the endpoint. This is the explicitly set password + /// security policy, or just the regular security policy. + pub fn password_security_policy(&self) -> SecurityPolicy { + let mut password_security_policy = self.security_policy(); + if let Some(ref security_policy) = self.password_security_policy { + match SecurityPolicy::from_str(security_policy).unwrap() { + SecurityPolicy::Unknown => { + panic!( + "Password security policy {} is unrecognized", + security_policy + ); + } + security_policy => { + password_security_policy = security_policy; + } + } + } + password_security_policy + } + + /// Test if the endpoint supports anonymous users + pub fn supports_anonymous(&self) -> bool { + self.supports_user_token_id(ANONYMOUS_USER_TOKEN_ID) + } + + /// Tests if this endpoint supports user pass tokens. It does this by looking to see + /// if any of the users allowed to access this endpoint are user pass users. + pub fn supports_user_pass(&self, server_tokens: &BTreeMap) -> bool { + for user_token_id in &self.user_token_ids { + if user_token_id != ANONYMOUS_USER_TOKEN_ID { + if let Some(user_token) = server_tokens.get(user_token_id) { + if user_token.is_user_pass() { + return true; + } + } + } + } + false + } + + /// Tests if this endpoint supports x509 tokens. It does this by looking to see + /// if any of the users allowed to access this endpoint are x509 users. + pub fn supports_x509(&self, server_tokens: &BTreeMap) -> bool { + for user_token_id in &self.user_token_ids { + if user_token_id != ANONYMOUS_USER_TOKEN_ID { + if let Some(user_token) = server_tokens.get(user_token_id) { + if user_token.is_x509() { + return true; + } + } + } + } + false + } + + pub fn supports_user_token_id(&self, id: &str) -> bool { + self.user_token_ids.contains(id) + } +} diff --git a/lib/src/server/config/limits.rs b/lib/src/server/config/limits.rs new file mode 100644 index 000000000..b7822a4a1 --- /dev/null +++ b/lib/src/server/config/limits.rs @@ -0,0 +1,307 @@ +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] +pub struct Limits { + /// Max array length in elements + #[serde(default = "defaults::max_array_length")] + pub max_array_length: usize, + /// Max string length in characters + #[serde(default = "defaults::max_string_length")] + pub max_string_length: usize, + /// Max bytestring length in bytes + #[serde(default = "defaults::max_byte_string_length")] + pub max_byte_string_length: usize, + /// Maximum message length in bytes + #[serde(default = "defaults::max_message_size")] + pub max_message_size: usize, + /// Maximum chunk count + #[serde(default = "defaults::max_chunk_count")] + pub max_chunk_count: usize, + /// Send buffer size in bytes + #[serde(default = "defaults::send_buffer_size")] + pub send_buffer_size: usize, + /// Receive buffer size in bytes + #[serde(default = "defaults::receive_buffer_size")] + pub receive_buffer_size: usize, + /// Limits specific to subscriptions. + #[serde(default)] + pub subscriptions: SubscriptionLimits, + /// Limits on service calls. + #[serde(default)] + pub operational: OperationalLimits, + /// Maximum number of browse continuation points per session. + #[serde(default = "defaults::max_browse_continuation_points")] + pub max_browse_continuation_points: usize, + /// Maximum number of history continuation points per session. + #[serde(default = "defaults::max_history_continuation_points")] + pub max_history_continuation_points: usize, + /// Maximum number of query continuation points per session. + #[serde(default = "defaults::max_query_continuation_points")] + pub max_query_continuation_points: usize, + /// Maximum number of registered sessions before new ones are rejected. + #[serde(default = "defaults::max_sessions")] + pub max_sessions: usize, +} + +impl Default for Limits { + fn default() -> Self { + Self { + max_array_length: defaults::max_array_length(), + max_string_length: defaults::max_string_length(), + max_byte_string_length: defaults::max_byte_string_length(), + max_message_size: defaults::max_message_size(), + max_chunk_count: defaults::max_chunk_count(), + send_buffer_size: defaults::send_buffer_size(), + receive_buffer_size: defaults::receive_buffer_size(), + subscriptions: Default::default(), + max_browse_continuation_points: defaults::max_browse_continuation_points(), + max_history_continuation_points: defaults::max_history_continuation_points(), + max_query_continuation_points: defaults::max_query_continuation_points(), + operational: OperationalLimits::default(), + max_sessions: defaults::max_sessions(), + } + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] +pub struct SubscriptionLimits { + #[serde(default = "defaults::max_subscriptions_per_session")] + pub max_subscriptions_per_session: usize, + #[serde(default = "defaults::max_pending_publish_requests")] + pub max_pending_publish_requests: usize, + #[serde(default = "defaults::max_publish_requests_per_subscription")] + pub max_publish_requests_per_subscription: usize, + /// Specifies the minimum sampling interval for this server in seconds. + #[serde(default = "defaults::min_sampling_interval_ms")] + pub min_sampling_interval_ms: f64, + /// Specifies the minimum publishing interval for this server in seconds. + #[serde(default = "defaults::min_publishing_interval_ms")] + pub min_publishing_interval_ms: f64, + #[serde(default = "defaults::max_keep_alive_count")] + pub max_keep_alive_count: u32, + #[serde(default = "defaults::default_keep_alive_count")] + pub default_keep_alive_count: u32, + /// Maximum number of monitored items per subscription, 0 for no limit + #[serde(default = "defaults::max_monitored_items_per_sub")] + pub max_monitored_items_per_sub: usize, + /// Maximum number of values in a monitored item queue + #[serde(default = "defaults::max_monitored_item_queue_size")] + pub max_monitored_item_queue_size: usize, + /// Maximum lifetime count (3 times as large as max keep alive) + #[serde(default = "defaults::max_lifetime_count")] + pub max_lifetime_count: u32, + /// Maximum number of notifications per publish message. + #[serde(default = "defaults::max_notifications_per_publish")] + pub max_notifications_per_publish: u64, + /// Maximum number of queued notifications per subscription. 0 for unlimited. + #[serde(default = "defaults::max_queued_notifications")] + pub max_queued_notifications: usize, +} + +impl Default for SubscriptionLimits { + fn default() -> Self { + Self { + max_subscriptions_per_session: defaults::max_subscriptions_per_session(), + max_pending_publish_requests: defaults::max_pending_publish_requests(), + max_publish_requests_per_subscription: defaults::max_publish_requests_per_subscription( + ), + min_sampling_interval_ms: defaults::min_sampling_interval_ms(), + min_publishing_interval_ms: defaults::min_publishing_interval_ms(), + max_keep_alive_count: defaults::max_keep_alive_count(), + default_keep_alive_count: defaults::default_keep_alive_count(), + max_monitored_items_per_sub: defaults::max_monitored_items_per_sub(), + max_monitored_item_queue_size: defaults::max_monitored_item_queue_size(), + max_lifetime_count: defaults::max_lifetime_count(), + max_notifications_per_publish: defaults::max_notifications_per_publish(), + max_queued_notifications: defaults::max_queued_notifications(), + } + } +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] +pub struct OperationalLimits { + #[serde(default = "defaults::max_nodes_per_translate_browse_paths_to_node_ids")] + pub max_nodes_per_translate_browse_paths_to_node_ids: usize, + #[serde(default = "defaults::max_nodes_per_read")] + pub max_nodes_per_read: usize, + #[serde(default = "defaults::max_nodes_per_write")] + pub max_nodes_per_write: usize, + #[serde(default = "defaults::max_nodes_per_method_call")] + pub max_nodes_per_method_call: usize, + #[serde(default = "defaults::max_nodes_per_browse")] + pub max_nodes_per_browse: usize, + #[serde(default = "defaults::max_nodes_per_register_nodes")] + pub max_nodes_per_register_nodes: usize, + #[serde(default = "defaults::max_monitored_items_per_call")] + pub max_monitored_items_per_call: usize, + #[serde(default = "defaults::max_nodes_per_history_read_data")] + pub max_nodes_per_history_read_data: usize, + #[serde(default = "defaults::max_nodes_per_history_read_events")] + pub max_nodes_per_history_read_events: usize, + #[serde(default = "defaults::max_nodes_per_history_update")] + pub max_nodes_per_history_update: usize, + #[serde(default = "defaults::max_references_per_browse_node")] + pub max_references_per_browse_node: usize, + #[serde(default = "defaults::max_node_descs_per_query")] + pub max_node_descs_per_query: usize, + #[serde(default = "defaults::max_data_sets_query_return")] + pub max_data_sets_query_return: usize, + #[serde(default = "defaults::max_references_query_return")] + pub max_references_query_return: usize, + #[serde(default = "defaults::max_nodes_per_node_management")] + pub max_nodes_per_node_management: usize, + #[serde(default = "defaults::max_references_per_references_management")] + pub max_references_per_references_management: usize, + #[serde(default = "defaults::max_subscriptions_per_call")] + pub max_subscriptions_per_call: usize, +} + +impl Default for OperationalLimits { + fn default() -> Self { + Self { + max_nodes_per_translate_browse_paths_to_node_ids: + defaults::max_nodes_per_translate_browse_paths_to_node_ids(), + max_nodes_per_read: defaults::max_nodes_per_read(), + max_nodes_per_write: defaults::max_nodes_per_write(), + max_nodes_per_method_call: defaults::max_nodes_per_method_call(), + max_nodes_per_browse: defaults::max_nodes_per_browse(), + max_nodes_per_register_nodes: defaults::max_nodes_per_register_nodes(), + max_monitored_items_per_call: defaults::max_monitored_items_per_call(), + max_nodes_per_history_read_data: defaults::max_nodes_per_history_read_data(), + max_nodes_per_history_read_events: defaults::max_nodes_per_history_read_events(), + max_nodes_per_history_update: defaults::max_nodes_per_history_update(), + max_references_per_browse_node: defaults::max_references_per_browse_node(), + max_node_descs_per_query: defaults::max_node_descs_per_query(), + max_data_sets_query_return: defaults::max_data_sets_query_return(), + max_references_query_return: defaults::max_references_query_return(), + max_nodes_per_node_management: defaults::max_nodes_per_node_management(), + max_references_per_references_management: + defaults::max_references_per_references_management(), + max_subscriptions_per_call: defaults::max_subscriptions_per_call(), + } + } +} + +mod defaults { + use crate::server::constants; + pub fn max_array_length() -> usize { + crate::types::constants::MAX_ARRAY_LENGTH + } + pub fn max_string_length() -> usize { + crate::types::constants::MAX_STRING_LENGTH + } + pub fn max_byte_string_length() -> usize { + crate::types::constants::MAX_BYTE_STRING_LENGTH + } + pub fn max_message_size() -> usize { + crate::types::constants::MAX_MESSAGE_SIZE + } + pub fn max_chunk_count() -> usize { + crate::types::constants::MAX_CHUNK_COUNT + } + pub fn send_buffer_size() -> usize { + constants::SEND_BUFFER_SIZE + } + pub fn receive_buffer_size() -> usize { + constants::RECEIVE_BUFFER_SIZE + } + pub fn max_browse_continuation_points() -> usize { + constants::MAX_BROWSE_CONTINUATION_POINTS + } + pub fn max_history_continuation_points() -> usize { + constants::MAX_HISTORY_CONTINUATION_POINTS + } + pub fn max_query_continuation_points() -> usize { + constants::MAX_QUERY_CONTINUATION_POINTS + } + pub fn max_sessions() -> usize { + constants::MAX_SESSIONS + } + + pub fn max_subscriptions_per_session() -> usize { + constants::MAX_SUBSCRIPTIONS_PER_SESSION + } + pub fn max_pending_publish_requests() -> usize { + constants::MAX_PENDING_PUBLISH_REQUESTS + } + pub fn max_publish_requests_per_subscription() -> usize { + constants::MAX_PUBLISH_REQUESTS_PER_SUBSCRIPTION + } + pub fn min_sampling_interval_ms() -> f64 { + constants::MIN_SAMPLING_INTERVAL_MS + } + pub fn min_publishing_interval_ms() -> f64 { + constants::MIN_PUBLISHING_INTERVAL_MS + } + pub fn max_keep_alive_count() -> u32 { + constants::MAX_KEEP_ALIVE_COUNT + } + pub fn default_keep_alive_count() -> u32 { + constants::DEFAULT_KEEP_ALIVE_COUNT + } + pub fn max_monitored_items_per_sub() -> usize { + constants::DEFAULT_MAX_MONITORED_ITEMS_PER_SUB + } + pub fn max_monitored_item_queue_size() -> usize { + constants::MAX_DATA_CHANGE_QUEUE_SIZE + } + pub fn max_lifetime_count() -> u32 { + constants::MAX_KEEP_ALIVE_COUNT * 3 + } + pub fn max_notifications_per_publish() -> u64 { + constants::MAX_NOTIFICATIONS_PER_PUBLISH + } + pub fn max_queued_notifications() -> usize { + constants::MAX_QUEUED_NOTIFICATIONS + } + + pub fn max_nodes_per_translate_browse_paths_to_node_ids() -> usize { + constants::MAX_NODES_PER_TRANSLATE_BROWSE_PATHS_TO_NODE_IDS + } + pub fn max_nodes_per_read() -> usize { + constants::MAX_NODES_PER_READ + } + pub fn max_nodes_per_write() -> usize { + constants::MAX_NODES_PER_WRITE + } + pub fn max_nodes_per_method_call() -> usize { + constants::MAX_NODES_PER_METHOD_CALL + } + pub fn max_nodes_per_browse() -> usize { + constants::MAX_NODES_PER_BROWSE + } + pub fn max_nodes_per_register_nodes() -> usize { + constants::MAX_NODES_PER_REGISTER_NODES + } + pub fn max_monitored_items_per_call() -> usize { + constants::MAX_MONITORED_ITEMS_PER_CALL + } + pub fn max_nodes_per_history_read_data() -> usize { + constants::MAX_NODES_PER_HISTORY_READ_DATA + } + pub fn max_nodes_per_history_read_events() -> usize { + constants::MAX_NODES_PER_HISTORY_READ_EVENTS + } + pub fn max_nodes_per_history_update() -> usize { + constants::MAX_NODES_PER_HISTORY_UPDATE + } + pub fn max_references_per_browse_node() -> usize { + constants::MAX_REFERENCES_PER_BROWSE_NODE + } + pub fn max_node_descs_per_query() -> usize { + constants::MAX_NODE_DESCS_PER_QUERY + } + pub fn max_data_sets_query_return() -> usize { + constants::MAX_DATA_SETS_QUERY_RETURN + } + pub fn max_references_query_return() -> usize { + constants::MAX_REFERENCES_QUERY_RETURN + } + pub fn max_nodes_per_node_management() -> usize { + constants::MAX_NODES_PER_NODE_MANAGEMENT + } + pub fn max_references_per_references_management() -> usize { + constants::MAX_REFERENCES_PER_REFERENCE_MANAGEMENT + } + pub fn max_subscriptions_per_call() -> usize { + constants::MAX_SUBSCRIPTIONS_PER_CALL + } +} diff --git a/lib/src/server/config/mod.rs b/lib/src/server/config/mod.rs new file mode 100644 index 000000000..4519e4608 --- /dev/null +++ b/lib/src/server/config/mod.rs @@ -0,0 +1,9 @@ +mod capabilities; +mod endpoint; +mod limits; +mod server; + +pub use capabilities::{HistoryServerCapabilities, ServerCapabilities}; +pub use endpoint::{EndpointIdentifier, ServerEndpoint}; +pub use limits::{Limits, OperationalLimits, SubscriptionLimits}; +pub use server::{ServerConfig, ServerUserToken, ANONYMOUS_USER_TOKEN_ID}; diff --git a/lib/src/server/config/server.rs b/lib/src/server/config/server.rs new file mode 100644 index 000000000..78d9b14bf --- /dev/null +++ b/lib/src/server/config/server.rs @@ -0,0 +1,468 @@ +// OPCUA for Rust +// SPDX-License-Identifier: MPL-2.0 +// Copyright (C) 2017-2024 Adam Lock + +//! Provides configuration settings for the server including serialization and deserialization from file. + +use std::{ + collections::BTreeMap, + path::{Path, PathBuf}, +}; + +use crate::{ + core::{comms::url::url_matches_except_host, config::Config}, + crypto::{CertificateStore, SecurityPolicy, Thumbprint}, + server::constants, + types::{service_types::ApplicationType, DecodingOptions, MessageSecurityMode, UAString}, + types::{ApplicationDescription, LocalizedText}, +}; + +use super::{endpoint::ServerEndpoint, limits::Limits}; + +pub const ANONYMOUS_USER_TOKEN_ID: &str = "ANONYMOUS"; + +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] +pub struct TcpConfig { + /// Timeout for hello on a session in seconds + pub hello_timeout: u32, + /// The hostname to supply in the endpoints + pub host: String, + /// The port number of the service + pub port: u16, +} + +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Default)] +pub struct ServerUserToken { + /// User name + pub user: String, + /// Password + #[serde(skip_serializing_if = "Option::is_none")] + pub pass: Option, + // X509 file path (as a string) + #[serde(skip_serializing_if = "Option::is_none")] + pub x509: Option, + #[serde(skip)] + pub thumbprint: Option, +} + +impl ServerUserToken { + /// Create a user pass token + pub fn user_pass(user: T, pass: T) -> Self + where + T: Into, + { + ServerUserToken { + user: user.into(), + pass: Some(pass.into()), + x509: None, + thumbprint: None, + } + } + + /// Create an X509 token. + pub fn x509(user: T, cert_path: &Path) -> Self + where + T: Into, + { + ServerUserToken { + user: user.into(), + pass: None, + x509: Some(cert_path.to_string_lossy().to_string()), + thumbprint: None, + } + } + + /// Read an X509 user token's certificate from disk and then hold onto the thumbprint for it. + pub fn read_thumbprint(&mut self) { + if self.is_x509() && self.thumbprint.is_none() { + // As part of validation, we're going to try and load the x509 certificate from disk, and + // obtain its thumbprint. This will be used when a session is activated. + if let Some(ref x509_path) = self.x509 { + let path = PathBuf::from(x509_path); + if let Ok(x509) = CertificateStore::read_cert(&path) { + self.thumbprint = Some(x509.thumbprint()); + } + } + } + } + + /// Test if the token is valid. This does not care for x509 tokens if the cert is present on + /// the disk or not. + pub fn is_valid(&self, id: &str) -> bool { + let mut valid = true; + if id == ANONYMOUS_USER_TOKEN_ID { + error!( + "User token {} is invalid because id is a reserved value, use another value.", + id + ); + valid = false; + } + if self.user.is_empty() { + error!("User token {} has an empty user name.", id); + valid = false; + } + if self.pass.is_some() && self.x509.is_some() { + error!( + "User token {} holds a password and certificate info - it cannot be both.", + id + ); + valid = false; + } else if self.pass.is_none() && self.x509.is_none() { + error!( + "User token {} fails to provide a password or certificate info.", + id + ); + valid = false; + } + valid + } + + pub fn is_user_pass(&self) -> bool { + self.x509.is_none() + } + + pub fn is_x509(&self) -> bool { + self.x509.is_some() + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] +pub struct CertificateValidation { + /// Auto trusts client certificates. For testing/samples only unless you're sure what you're + /// doing. + pub trust_client_certs: bool, + /// Check the valid from/to fields of a certificate + pub check_time: bool, +} + +impl Default for CertificateValidation { + fn default() -> Self { + Self { + trust_client_certs: false, + check_time: true, + } + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] +pub struct ServerConfig { + /// An id for this server + pub application_name: String, + /// A description for this server + pub application_uri: String, + /// Product url + pub product_uri: String, + /// Autocreates public / private keypair if they don't exist. For testing/samples only + /// since you do not have control of the values + #[serde(default)] + pub create_sample_keypair: bool, + /// Path to a custom certificate, to be used instead of the default .der certificate + #[serde(default)] + pub certificate_path: Option, + /// Path to a custom private key, to be used instead of the default private key + #[serde(default)] + pub private_key_path: Option, + /// Checks the certificate's time validity + #[serde(default)] + pub certificate_validation: CertificateValidation, + /// PKI folder, either absolute or relative to executable + pub pki_dir: PathBuf, + /// Url to a discovery server - adding this string causes the server to assume you wish to + /// register the server with a discovery server. + #[serde(default)] + pub discovery_server_url: Option, + /// tcp configuration information + pub tcp_config: TcpConfig, + /// Server OPA UA limits + #[serde(default)] + pub limits: Limits, + /// Supported locale ids + #[serde(default)] + pub locale_ids: Vec, + /// User tokens + pub user_tokens: BTreeMap, + /// discovery endpoint url which may or may not be the same as the service endpoints below. + pub discovery_urls: Vec, + /// Default endpoint id + #[serde(default)] + pub default_endpoint: Option, + /// Endpoints supported by the server + pub endpoints: BTreeMap, + /// Interval in milliseconds between each time the subscriptions are polled. + #[serde(default = "defaults::subscription_poll_interval_ms")] + pub subscription_poll_interval_ms: u64, + /// Default publish request timeout. + #[serde(default = "defaults::publish_timeout_default_ms")] + pub publish_timeout_default_ms: u64, + /// Max message timeout for non-publish requests. + /// Will not be applied for requests that are handled synchronously. + /// Set to 0 for no timeout, meaning that a timeout will only be applied if + /// the client requests one. + /// If this is greater than zero and the client requests a timeout of 0, + /// this will be used. + #[serde(default = "defaults::max_timeout_ms")] + pub max_timeout_ms: u32, + /// Maximum lifetime of secure channel tokens. The client will request a number, + /// this just sets an upper limit on that value. + /// Note that there is no lower limit, if a client sets an expiry of 0, + /// we will just instantly time out. + #[serde(default = "defaults::max_secure_channel_token_lifetime_ms")] + pub max_secure_channel_token_lifetime_ms: u32, + /// Maximum time before a session will be timed out. The client will request + /// a number, this just sets the upper limit on that value. + /// Note that there is no lower limit, if a client sets an expiry of 0 + /// we will instantly time out. + #[serde(default = "defaults::max_session_timeout_ms")] + pub max_session_timeout_ms: u64, +} + +mod defaults { + use crate::server::constants; + + pub fn subscription_poll_interval_ms() -> u64 { + constants::SUBSCRIPTION_TIMER_RATE_MS + } + + pub fn publish_timeout_default_ms() -> u64 { + constants::DEFAULT_PUBLISH_TIMEOUT_MS + } + + pub fn max_timeout_ms() -> u32 { + 300_000 + } + + pub fn max_secure_channel_token_lifetime_ms() -> u32 { + 300_000 + } + + pub fn max_session_timeout_ms() -> u64 { + constants::MAX_SESSION_TIMEOUT + } +} + +impl Config for ServerConfig { + fn is_valid(&self) -> bool { + let mut valid = true; + if self.application_name.is_empty() { + warn!("No application was set"); + } + if self.application_uri.is_empty() { + warn!("No application uri was set"); + } + if self.product_uri.is_empty() { + warn!("No product uri was set"); + } + if self.endpoints.is_empty() { + error!("Server configuration is invalid. It defines no endpoints"); + valid = false; + } + for (id, endpoint) in &self.endpoints { + if !endpoint.is_valid(id, &self.user_tokens) { + valid = false; + } + } + if let Some(ref default_endpoint) = self.default_endpoint { + if !self.endpoints.contains_key(default_endpoint) { + valid = false; + } + } + for (id, user_token) in &self.user_tokens { + if !user_token.is_valid(id) { + valid = false; + } + } + if self.limits.max_array_length == 0 { + error!("Server configuration is invalid. Max array length is invalid"); + valid = false; + } + if self.limits.max_string_length == 0 { + error!("Server configuration is invalid. Max string length is invalid"); + valid = false; + } + if self.limits.max_byte_string_length == 0 { + error!("Server configuration is invalid. Max byte string length is invalid"); + valid = false; + } + if self.discovery_urls.is_empty() { + error!("Server configuration is invalid. Discovery urls not set"); + valid = false; + } + valid + } + + fn application_name(&self) -> UAString { + UAString::from(&self.application_name) + } + + fn application_uri(&self) -> UAString { + UAString::from(&self.application_uri) + } + + fn product_uri(&self) -> UAString { + UAString::from(&self.product_uri) + } + + fn application_type(&self) -> ApplicationType { + ApplicationType::Server + } + + fn discovery_urls(&self) -> Option> { + let discovery_urls: Vec = + self.discovery_urls.iter().map(UAString::from).collect(); + Some(discovery_urls) + } + + fn application_description(&self) -> ApplicationDescription { + ApplicationDescription { + application_uri: self.application_uri(), + application_name: LocalizedText::new("", self.application_name().as_ref()), + application_type: self.application_type(), + product_uri: self.product_uri(), + gateway_server_uri: UAString::null(), + discovery_profile_uri: UAString::null(), + discovery_urls: self.discovery_urls(), + } + } +} + +impl Default for ServerConfig { + fn default() -> Self { + let mut pki_dir = std::env::current_dir().unwrap(); + pki_dir.push(Self::PKI_DIR); + + ServerConfig { + application_name: String::new(), + application_uri: String::new(), + product_uri: String::new(), + create_sample_keypair: false, + certificate_path: None, + private_key_path: None, + pki_dir, + certificate_validation: CertificateValidation::default(), + discovery_server_url: None, + tcp_config: TcpConfig { + host: "127.0.0.1".to_string(), + port: constants::DEFAULT_RUST_OPC_UA_SERVER_PORT, + hello_timeout: constants::DEFAULT_HELLO_TIMEOUT_SECONDS, + }, + limits: Limits::default(), + user_tokens: BTreeMap::new(), + locale_ids: vec!["en".to_string()], + discovery_urls: Vec::new(), + default_endpoint: None, + endpoints: BTreeMap::new(), + subscription_poll_interval_ms: defaults::subscription_poll_interval_ms(), + publish_timeout_default_ms: defaults::publish_timeout_default_ms(), + max_timeout_ms: defaults::max_timeout_ms(), + max_secure_channel_token_lifetime_ms: defaults::max_secure_channel_token_lifetime_ms(), + max_session_timeout_ms: defaults::max_session_timeout_ms(), + } + } +} + +impl ServerConfig { + /// The default PKI directory + pub const PKI_DIR: &'static str = "pki"; + + pub fn new( + application_name: T, + user_tokens: BTreeMap, + endpoints: BTreeMap, + ) -> Self + where + T: Into, + { + let host = "127.0.0.1".to_string(); + let port = constants::DEFAULT_RUST_OPC_UA_SERVER_PORT; + + let application_name = application_name.into(); + let application_uri = format!("urn:{}", application_name); + let product_uri = format!("urn:{}", application_name); + let discovery_server_url = Some(constants::DEFAULT_DISCOVERY_SERVER_URL.to_string()); + let discovery_urls = vec![format!("opc.tcp://{}:{}/", host, port)]; + let locale_ids = vec!["en".to_string()]; + + let mut pki_dir = std::env::current_dir().unwrap(); + pki_dir.push(Self::PKI_DIR); + + ServerConfig { + application_name, + application_uri, + product_uri, + certificate_validation: CertificateValidation { + trust_client_certs: false, + check_time: true, + }, + pki_dir, + discovery_server_url, + tcp_config: TcpConfig { + host, + port, + hello_timeout: constants::DEFAULT_HELLO_TIMEOUT_SECONDS, + }, + locale_ids, + user_tokens, + discovery_urls, + endpoints, + ..Default::default() + } + } + + pub fn decoding_options(&self) -> DecodingOptions { + DecodingOptions { + client_offset: chrono::Duration::zero(), + max_message_size: self.limits.max_message_size, + max_chunk_count: self.limits.max_chunk_count, + max_string_length: self.limits.max_string_length, + max_byte_string_length: self.limits.max_byte_string_length, + max_array_length: self.limits.max_array_length, + ..Default::default() + } + } + + pub fn add_endpoint(&mut self, id: &str, endpoint: ServerEndpoint) { + self.endpoints.insert(id.to_string(), endpoint); + } + + pub fn read_x509_thumbprints(&mut self) { + self.user_tokens + .iter_mut() + .for_each(|(_, token)| token.read_thumbprint()); + } + + /// Find the default endpoint + pub fn default_endpoint(&self) -> Option<&ServerEndpoint> { + if let Some(ref default_endpoint) = self.default_endpoint { + self.endpoints.get(default_endpoint) + } else { + None + } + } + + /// Find the first endpoint that matches the specified url, security policy and message + /// security mode. + pub fn find_endpoint( + &self, + endpoint_url: &str, + base_endpoint_url: &str, + security_policy: SecurityPolicy, + security_mode: MessageSecurityMode, + ) -> Option<&ServerEndpoint> { + let endpoint = self.endpoints.iter().find(|&(_, e)| { + // Test end point's security_policy_uri and matching url + if url_matches_except_host(&e.endpoint_url(&base_endpoint_url), endpoint_url) { + if e.security_policy() == security_policy + && e.message_security_mode() == security_mode + { + trace!("Found matching endpoint for url {} - {:?}", endpoint_url, e); + true + } else { + false + } + } else { + false + } + }); + endpoint.map(|endpoint| endpoint.1) + } +} diff --git a/lib/src/server/continuation_point.rs b/lib/src/server/continuation_point.rs deleted file mode 100644 index 60241ca20..000000000 --- a/lib/src/server/continuation_point.rs +++ /dev/null @@ -1,29 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Provides a browse continuation point type for tracking a browse operation initiated by a client. - -use std::sync::Arc; - -use crate::sync::*; -use crate::types::{service_types::ReferenceDescription, ByteString, DateTimeUtc}; - -use crate::prelude::AddressSpace; - -#[derive(Clone, Debug)] -pub struct BrowseContinuationPoint { - pub id: ByteString, - pub address_space_last_modified: DateTimeUtc, - pub max_references_per_node: usize, - pub starting_index: usize, - pub reference_descriptions: Arc>>, -} - -impl BrowseContinuationPoint { - /// Test if the continuation point valid which is only true if address space has not been - /// modified since the point was made. - pub fn is_valid_browse_continuation_point(&self, address_space: &AddressSpace) -> bool { - self.address_space_last_modified >= address_space.last_modified() - } -} diff --git a/lib/src/server/diagnostics.rs b/lib/src/server/diagnostics.rs deleted file mode 100644 index 436df65e5..000000000 --- a/lib/src/server/diagnostics.rs +++ /dev/null @@ -1,133 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Provides diagnostics structures and functions for gathering information about the running -//! state of a server. - -use crate::types::service_types::ServerDiagnosticsSummaryDataType; -use crate::{deregister_runtime_component, register_runtime_component}; - -use crate::core::RUNTIME; - -use super::{session::Session, subscriptions::subscription::Subscription}; - -/// Structure that captures di agnostics information for the server -#[derive(Clone, Serialize, Debug)] -pub struct ServerDiagnostics { - /// This is a live summary of the server diagnostics - server_diagnostics_summary: ServerDiagnosticsSummaryDataType, -} - -const SERVER_DIAGNOSTICS: &str = "ServerDiagnostics"; - -impl Default for ServerDiagnostics { - fn default() -> Self { - register_runtime_component!(SERVER_DIAGNOSTICS); - Self { - server_diagnostics_summary: ServerDiagnosticsSummaryDataType::default(), - } - } -} - -impl Drop for ServerDiagnostics { - fn drop(&mut self) { - deregister_runtime_component!(SERVER_DIAGNOSTICS); - } -} - -impl ServerDiagnostics { - /// Return a completed summary of the server diagnostics as they stand. This structure - /// is used to fill the address space stats about the server. - pub fn server_diagnostics_summary(&self) -> &ServerDiagnosticsSummaryDataType { - &self.server_diagnostics_summary - } - - /// Increment the number of requests that were rejected due to security constraints since the server was - /// started (or restarted). The requests include all Services defined in Part 4, also requests - /// to create sessions. - pub(crate) fn on_rejected_security_session(&mut self) { - self.server_diagnostics_summary - .security_rejected_session_count += 1; - } - - /// Increment the number of requests that were rejected since the server was started (or restarted). The - /// requests include all Services defined in Part 4, also requests to create sessions. This - /// number includes the securityRejectedRequestsCount. - pub(crate) fn on_rejected_session(&mut self) { - self.server_diagnostics_summary.rejected_session_count += 1; - } - - /// Increment the number of client sessions currently established in the server. - pub(crate) fn on_create_session(&mut self, _session: &Session) { - self.server_diagnostics_summary.current_session_count += 1; - self.server_diagnostics_summary.cumulated_session_count += 1; - debug!( - "Incrementing current session count to {}", - self.server_diagnostics_summary.current_session_count - ); - } - - /// Decrement the number of client sessions currently established in the server. - pub(crate) fn on_destroy_session(&mut self, _session: &Session) { - self.server_diagnostics_summary.current_session_count -= 1; - debug!( - "Decrementing current session count to {}", - self.server_diagnostics_summary.current_session_count - ); - } - - /// Increment the number of subscriptions currently established in the server. - pub(crate) fn on_create_subscription(&mut self, _subscription: &Subscription) { - self.server_diagnostics_summary.current_subscription_count += 1; - self.server_diagnostics_summary.cumulated_subscription_count += 1; - } - - /// Decrement the number of subscriptions currently established in the server. - pub(crate) fn on_destroy_subscription(&mut self, _subscription: &Subscription) { - self.server_diagnostics_summary.current_subscription_count -= 1; - } - - /// Increment the number of client sessions that were closed due to timeout since the server was started (or restarted). - pub(crate) fn on_session_timeout(&mut self) { - self.server_diagnostics_summary.session_timeout_count += 1; - } - - // --- These are not yet called by anything - - /* - /// Increment the number of server-created views in the server. - pub(crate) fn on_server_view(&mut self, _session: &Session) { - self.server_diagnostics_summary.server_view_count += 1; - unimplemented!(); - } - - /// Increment the number of client sessions that were closed due to errors since the server was started (or restarted). - pub(crate) fn on_session_abort(&mut self, _session: &Session) { - self.server_diagnostics_summary.session_abort_count += 1; - unimplemented!() - } - - /// Increment the number of publishing intervals currently supported in the server. - pub(crate) fn on_publishing_interval(&mut self) { - self.server_diagnostics_summary.publishing_interval_count += 1; - unimplemented!() - } - - /// Increment the number of requests that were rejected due to security constraints since the server was - /// started (or restarted). The requests include all Services defined in Part 4, also requests - /// to create sessions. - pub fn on_security_rejected_request(&mut self) { - self.server_diagnostics_summary.security_rejected_requests_count += 1; - unimplemented!() - } - - /// Increment the number of requests that were rejected since the server was started (or restarted). The - /// requests include all Services defined in Part 4, also requests to create sessions. This - /// number includes the securityRejectedRequestsCount. - pub(crate) fn on_rejected_request(&mut self) { - self.server_diagnostics_summary.rejected_requests_count += 1; - unimplemented!() - } - */ -} diff --git a/lib/src/server/discovery.rs b/lib/src/server/discovery.rs new file mode 100644 index 000000000..4271e31e3 --- /dev/null +++ b/lib/src/server/discovery.rs @@ -0,0 +1,108 @@ +use std::{path::PathBuf, time::Duration}; + +use crate::{ + client::{Client, ClientBuilder}, + types::RegisteredServer, +}; + +use futures::never::Never; + +#[cfg(windows)] +fn lds_pki_dir() -> String { + if let Ok(mut pki_dir) = std::env::var("ALLUSERSPROFILE") { + pki_dir.push_str(r#"\OPC Foundation\UA\pki"#); + pki_dir + } else { + r#"C:\ProgramData\OPC Foundation\UA\pki"#.to_string() + } +} + +#[cfg(not(windows))] +fn lds_pki_dir() -> String { + "/opt/opcfoundation/ualds/pki".to_owned() +} + +async fn register_with_discovery_server( + client: &mut Client, + discovery_server_url: &str, + registered_server: RegisteredServer, +) { + match client.find_servers(discovery_server_url).await { + Ok(servers) => { + debug!("Servers on the discovery endpoint - {:?}", servers); + match client + .register_server(discovery_server_url, registered_server) + .await + { + Ok(_) => {} + Err(err) => { + error!( + r#"Cannot register server with discovery server \"{}\". +The errors immediately preceding this message may be caused by this issue. +Check if the error "{}" indicates the reason why that the registration could not happen. + +Check that your server can connect to the discovery server and that your server's cert is trusted by +the discovery server and vice versa. The default discovery server PKI directory is {}."#, + discovery_server_url, + err, + lds_pki_dir() + ); + } + } + } + Err(err) => { + error!( + "Cannot find servers on discovery url {}, error = {}", + discovery_server_url, err + ); + } + } +} + +#[cfg(not(feature = "discovery-server-registration"))] +fn periodic_discovery_server_registration( + discovery_server_url: &str, + _registered_server: RegisteredServer, + _pki_dir: PathBuf, + _interval: Duration, +) -> Never { + info!( + "Discovery server registration is disabled, registration with {} will not happen", + discovery_server_url + ); + futures::future::pending().await; +} + +#[cfg(feature = "discovery-server-registration")] +pub(crate) async fn periodic_discovery_server_registration( + discovery_server_url: &str, + registered_server: RegisteredServer, + pki_dir: PathBuf, + interval: Duration, +) -> Never { + let mut interval = tokio::time::interval(interval); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + let client = ClientBuilder::new() + .application_name("DiscoveryClient") + .application_uri("urn:DiscoveryClient") + .pki_dir(pki_dir) + .session_retry_limit(1) + .client(); + + let Some(mut client) = client else { + error!("Failed to create a valid client for discovery server registration"); + return futures::future::pending().await; + }; + + loop { + interval.tick().await; + + register_with_discovery_server( + &mut client, + discovery_server_url, + registered_server.clone(), + ) + .await; + } +} diff --git a/lib/src/server/discovery/mod.rs b/lib/src/server/discovery/mod.rs deleted file mode 100644 index 20e3e1b38..000000000 --- a/lib/src/server/discovery/mod.rs +++ /dev/null @@ -1,102 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::path::PathBuf; - -use crate::client::ClientBuilder; - -use super::prelude::RegisteredServer; - -// Note these two functions are presently informational, but in the future they could -// be used to automatically set up trust between LDS and server if the server -// were set via configuration to do that. - -/// Returns the directory where the UA-LDS service stores its certs on Windows -fn windows_lds_pki_dir() -> String { - /// Default derived from https://github.com/OPCFoundation/UA-LDS/blob/master/win32/platform.c - const WINDOWS_LDS_PKI_DIR: &str = r#"C:\ProgramData\OPC Foundation\UA\pki"#; - - if cfg!(windows) { - // On Windows the logic can check the environment variable like UA-LDS does - if let Ok(mut pki_dir) = std::env::var("ALLUSERSPROFILE") { - pki_dir.push_str(r#"\OPC Foundation\UA\pki"#); - pki_dir - } else { - WINDOWS_LDS_PKI_DIR.to_string() - } - } else { - WINDOWS_LDS_PKI_DIR.to_string() - } -} - -/// Returns the directory where the UA-LDS service stores its certs on Linux -fn linux_lds_pki_dir() -> String { - /// Derived from https://github.com/OPCFoundation/UA-LDS/blob/master/linux/platform.c - const LINUX_LDS_PKI_DIR: &str = "/opt/opcfoundation/ualds/pki"; - LINUX_LDS_PKI_DIR.to_string() -} - -/// Registers the specified endpoints with the specified discovery server -pub async fn register_with_discovery_server( - discovery_server_url: &str, - registered_server: RegisteredServer, - pki_dir: PathBuf, -) { - debug!( - "register_with_discovery_server, for {}", - discovery_server_url - ); - - // Create a client, ensuring to retry only once - let client = ClientBuilder::new() - .application_name("DiscoveryClient") - .application_uri("urn:DiscoveryClient") - .pki_dir(pki_dir) - .session_retry_limit(1) - .client(); - - if let Some(mut client) = client { - // This follows the local discovery process described in part 12 of the spec, calling - // find_servers on it first. - - // Connect to the server and call find_servers to ensure it is a discovery server - match client.find_servers(discovery_server_url).await { - Ok(servers) => { - debug!("Servers on the discovery endpoint - {:?}", servers); - // Register the server - match client - .register_server(discovery_server_url, registered_server) - .await - { - Ok(_) => {} - Err(err) => { - error!( - r#"Cannot register server with discovery server \"{}\". -The errors immediately preceding this message may be caused by this issue. -Check if the error "{}" indicates the reason why that the registration could not happen. - -Check that your server can connect to the discovery server and that your server's cert is trusted by -the discovery server and vice versa. The discovery server's PKI directory is (Windows) -{} or (Linux) {}."#, - discovery_server_url, - err, - windows_lds_pki_dir(), - linux_lds_pki_dir() - ); - } - } - } - Err(err) => { - error!( - "Cannot find servers on discovery url {}, error = {:?}", - discovery_server_url, err - ); - } - } - } else { - error!("Cannot create a discovery server client config"); - } - - debug!("register_with_discovery_server, finished"); -} diff --git a/lib/src/server/events/audit/cancel_event.rs b/lib/src/server/events/audit/cancel_event.rs deleted file mode 100644 index f3e23a92f..000000000 --- a/lib/src/server/events/audit/cancel_event.rs +++ /dev/null @@ -1,56 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use crate::types::*; - -use crate::server::{address_space::address_space::AddressSpace, events::event::Event}; - -use super::{session_events::AuditSessionEventType, AuditEvent}; - -pub struct AuditCancelEventType { - base: AuditSessionEventType, - request_handle: u32, -} - -impl AuditEvent for AuditCancelEventType { - fn event_type_id() -> NodeId { - ObjectTypeId::AuditCancelEventType.into() - } - - fn log_message(&self) -> String { - self.base.log_message() - } -} - -impl Event for AuditCancelEventType { - type Err = (); - - fn is_valid(&self) -> bool { - self.base.is_valid() - } - - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - let node_id = self.base.raise(address_space)?; - let ns = node_id.namespace; - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "RequestHandle", - "RequestHandle", - DataTypeId::UInt32, - self.request_handle, - address_space, - ); - Ok(node_id) - } -} - -audit_session_event_impl!(AuditCancelEventType, base); - -impl AuditCancelEventType { - pub fn request_handle(mut self, request_handle: u32) -> Self { - self.request_handle = request_handle; - self - } -} diff --git a/lib/src/server/events/audit/certificate_events.rs b/lib/src/server/events/audit/certificate_events.rs deleted file mode 100644 index ddebb30ed..000000000 --- a/lib/src/server/events/audit/certificate_events.rs +++ /dev/null @@ -1,145 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use crate::types::*; - -use crate::server::{address_space::address_space::AddressSpace, events::event::Event}; - -use super::{security_event::AuditSecurityEventType, AuditEvent}; - -pub struct AuditCertificateEventType { - base: AuditSecurityEventType, - certificate: ByteString, -} - -impl Event for AuditCertificateEventType { - type Err = (); - - fn is_valid(&self) -> bool { - self.base.is_valid() - } - - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - let node_id = self.base.raise(address_space)?; - let ns = node_id.namespace; - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "Certificate", - "Certificate", - DataTypeId::ByteString, - self.certificate.clone(), - address_space, - ); - Ok(node_id) - } -} - -impl AuditEvent for AuditCertificateEventType { - fn event_type_id() -> NodeId { - ObjectTypeId::AuditCertificateEventType.into() - } - - fn log_message(&self) -> String { - self.base.log_message() - } -} - -audit_security_event_impl!(AuditCertificateEventType, base); - -impl AuditCertificateEventType { - pub fn new( - node_id: R, - event_type_id: E, - browse_name: S, - display_name: T, - time: DateTime, - ) -> Self - where - R: Into, - E: Into, - S: Into, - T: Into, - { - Self { - base: AuditSecurityEventType::new( - node_id, - event_type_id, - browse_name, - display_name, - time, - ), - certificate: ByteString::null(), - } - } - - pub fn certificate(mut self, certificate: ByteString) -> Self { - self.certificate = certificate; - self - } -} - -/// All the AuditCertificateXXXEventType derived frmo AuditCertificateEventType can be implemented from a macro -macro_rules! audit_certificate_event_impl { - ( $event:ident ) => { - audit_security_event_impl!($event, base); - - pub struct $event { - base: AuditCertificateEventType, - } - - impl Event for $event { - type Err = (); - - fn is_valid(&self) -> bool { - self.base.is_valid() - } - - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - self.base.raise(address_space) - } - } - - impl AuditEvent for $event { - fn event_type_id() -> NodeId { - ObjectTypeId::$event.into() - } - - fn log_message(&self) -> String { - self.base.log_message() - } - } - - impl $event { - pub fn new(node_id: R, time: DateTime) -> Self - where - R: Into, - { - let browse_name = stringify!($event); - let display_name = stringify!($event); - Self { - base: AuditCertificateEventType::new( - node_id, - Self::event_type_id(), - browse_name, - display_name, - time, - ), - } - } - - pub fn certificate(mut self, certificate: ByteString) -> Self { - self.base = self.base.certificate(certificate); - self - } - } - }; -} - -audit_certificate_event_impl!(AuditCertificateDataMismatchEventType); -audit_certificate_event_impl!(AuditCertificateExpiredEventType); -audit_certificate_event_impl!(AuditCertificateInvalidEventType); -audit_certificate_event_impl!(AuditCertificateUntrustedEventType); -audit_certificate_event_impl!(AuditCertificateRevokedEventType); -audit_certificate_event_impl!(AuditCertificateMismatchEventType); diff --git a/lib/src/server/events/audit/event.rs b/lib/src/server/events/audit/event.rs deleted file mode 100644 index 5c0a1e332..000000000 --- a/lib/src/server/events/audit/event.rs +++ /dev/null @@ -1,215 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use crate::types::*; - -use crate::server::{ - address_space::address_space::AddressSpace, - events::event::{BaseEventType, Event}, -}; - -use super::AuditEvent; - -/// Base type for audit events. Do not raise events of this type -pub(super) struct AuditEventType { - base: BaseEventType, - action_time_stamp: UtcTime, - status: bool, - server_id: UAString, - client_audit_entry_id: UAString, - client_user_id: UAString, -} - -impl AuditEvent for AuditEventType { - fn event_type_id() -> NodeId { - panic!(); - } - - fn log_message(&self) -> String { - // Dump out comma-separated key=value pairs in the order they were populated - self.base - .properties() - .iter() - .map(|(k, v)| format!("{}={}", k, v)) - .collect::>() - .join(",") - } -} - -impl Event for AuditEventType { - type Err = (); - - fn is_valid(&self) -> bool { - self.base.is_valid() - } - - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - if self.is_valid() { - let node_id = self.base.raise(address_space)?; - let ns = node_id.namespace; - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "ActionTimeStamp", - "ActionTimeStamp", - DataTypeId::UtcTime, - self.action_time_stamp, - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "Status", - "Status", - DataTypeId::Boolean, - self.status, - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "ServerId", - "ServerId", - DataTypeId::String, - self.server_id.clone(), - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "ClientAuditEntryId", - "ClientAuditEntryId", - DataTypeId::String, - self.client_audit_entry_id.clone(), - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "ClientUserId", - "ClientUserId", - DataTypeId::String, - self.client_user_id.clone(), - address_space, - ); - Ok(node_id) - } else { - error!("AuditEventType is invalid and will not be inserted"); - Err(()) - } - } -} - -base_event_impl!(AuditEventType, base); - -impl AuditEventType { - pub fn new( - node_id: R, - event_type_id: E, - browse_name: S, - display_name: T, - time: DateTime, - ) -> Self - where - R: Into, - E: Into, - S: Into, - T: Into, - { - let action_time_stamp = DateTime::now(); - let server_id = UAString::null(); - let parent_node = Self::parent_node(); - Self { - base: BaseEventType::new( - node_id, - event_type_id, - browse_name, - display_name, - parent_node, - time, - ), - status: false, - action_time_stamp, - server_id, - client_audit_entry_id: UAString::null(), - client_user_id: UAString::null(), - } - } - - pub fn client_audit_entry_id(mut self, client_audit_entry_id: T) -> Self - where - T: Into, - { - self.client_audit_entry_id = client_audit_entry_id.into(); - self - } - - pub fn client_user_id(mut self, client_user_id: T) -> Self - where - T: Into, - { - self.client_user_id = client_user_id.into(); - self - } - - pub fn status(mut self, status: bool) -> Self { - self.status = status; - self - } - - pub fn server_id(mut self, server_id: T) -> Self - where - T: Into, - { - self.server_id = server_id.into(); - self - } - - pub fn action_time_stamp(mut self, action_time_stamp: UtcTime) -> Self { - self.action_time_stamp = action_time_stamp; - self - } -} - -macro_rules! audit_event_impl { - ( $event:ident, $base:ident ) => { - base_event_impl!($event, $base); - - impl $event { - pub fn client_audit_entry_id(mut self, client_audit_entry_id: T) -> Self - where - T: Into, - { - self.$base = self.$base.client_audit_entry_id(client_audit_entry_id); - self - } - - pub fn client_user_id(mut self, client_user_id: T) -> Self - where - T: Into, - { - self.$base = self.$base.client_user_id(client_user_id); - self - } - - pub fn status(mut self, status: bool) -> Self { - self.$base = self.$base.status(status); - self - } - - pub fn server_id(mut self, server_id: T) -> Self - where - T: Into, - { - self.$base = self.$base.server_id(server_id); - self - } - - pub fn action_time_stamp(mut self, action_time_stamp: UtcTime) -> Self { - self.$base = self.$base.action_time_stamp(action_time_stamp); - self - } - } - }; -} diff --git a/lib/src/server/events/audit/mod.rs b/lib/src/server/events/audit/mod.rs deleted file mode 100644 index 3585bc28d..000000000 --- a/lib/src/server/events/audit/mod.rs +++ /dev/null @@ -1,67 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! This module implements the audit event types. -//! -//! Note: Due to Rust's lack of inheritance, these types use aggregation and helper macros to expose -//! builder functions on each type in the hierarchy. They're not optimal at all (impls call base impls call base impls in some cases), -//! but they should suffice for the purpose they'll be used for. - -use std::sync::Arc; - -use crate::sync::*; -use crate::types::*; - -use crate::server::{address_space::address_space::AddressSpace, events::event::Event}; - -pub trait AuditEvent: Event { - fn parent_node() -> NodeId { - // TODO Where do audit nodes get put in the address_space? - NodeId::null() - } - - /// Returns the kind of event type that this audit event represents. Abstract events should - /// panic. - fn event_type_id() -> NodeId; - - fn log_message(&self) -> String; -} - -#[macro_use] -pub mod event; -#[macro_use] -pub mod security_event; -#[macro_use] -pub mod session_events; -#[macro_use] -pub mod certificate_events; -pub mod cancel_event; -pub mod node_management_event; - -/// The audit log will be responsible for adding audit events to the address space, and potentially logging them -/// to file. All audit events should be raised through `AuditLog` to support any future logging capability. -pub(crate) struct AuditLog { - address_space: Arc>, -} - -impl AuditLog { - pub fn new(address_space: Arc>) -> AuditLog { - AuditLog { address_space } - } - - pub fn raise_and_log(&self, mut event: T) -> Result - where - T: AuditEvent + Event, - { - let mut address_space = trace_write_lock!(self.address_space); - let result = event.raise(&mut address_space).map_err(|_| ()); - if result.is_err() { - error!("Cannot raise an audit event, check audit event entry below to see if there are reasons for this"); - } - // At this point audit events just go out as log events but smarter logging implementations can always hive these - // events off to a separate file. Look at demo-server for an example of this. - info!("Audit Event: {}", event.log_message()); - result - } -} diff --git a/lib/src/server/events/audit/node_management_event.rs b/lib/src/server/events/audit/node_management_event.rs deleted file mode 100644 index eefec02f2..000000000 --- a/lib/src/server/events/audit/node_management_event.rs +++ /dev/null @@ -1,37 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use crate::types::*; - -use crate::server::{address_space::address_space::AddressSpace, events::event::Event}; - -use super::{event::AuditEventType, AuditEvent}; - -pub struct AuditNodeManagementEventType { - base: AuditEventType, -} - -impl Event for AuditNodeManagementEventType { - type Err = (); - - fn is_valid(&self) -> bool { - self.base.is_valid() - } - - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - self.base.raise(address_space) - } -} - -impl AuditEvent for AuditNodeManagementEventType { - fn event_type_id() -> NodeId { - ObjectTypeId::AuditNodeManagementEventType.into() - } - - fn log_message(&self) -> String { - self.base.log_message() - } -} - -audit_event_impl!(AuditNodeManagementEventType, base); diff --git a/lib/src/server/events/audit/security_event.rs b/lib/src/server/events/audit/security_event.rs deleted file mode 100644 index b4ad0895b..000000000 --- a/lib/src/server/events/audit/security_event.rs +++ /dev/null @@ -1,64 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use crate::types::*; - -use crate::server::{address_space::address_space::AddressSpace, events::event::Event}; - -use super::{event::AuditEventType, AuditEvent}; - -/// Base type for audit security events. Do not raise events of this type -pub(super) struct AuditSecurityEventType { - base: AuditEventType, -} - -impl AuditEvent for AuditSecurityEventType { - fn event_type_id() -> NodeId { - panic!() - } - - fn log_message(&self) -> String { - self.base.log_message() - } -} - -impl Event for AuditSecurityEventType { - type Err = (); - - fn is_valid(&self) -> bool { - self.base.is_valid() - } - - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - self.base.raise(address_space) - } -} - -audit_event_impl!(AuditSecurityEventType, base); - -impl AuditSecurityEventType { - pub fn new( - node_id: R, - event_type_id: E, - browse_name: S, - display_name: T, - time: DateTime, - ) -> Self - where - R: Into, - E: Into, - S: Into, - T: Into, - { - Self { - base: AuditEventType::new(node_id, event_type_id, browse_name, display_name, time), - } - } -} - -macro_rules! audit_security_event_impl { - ( $event:ident, $base:ident ) => { - audit_event_impl!($event, $base); - }; -} diff --git a/lib/src/server/events/audit/session_events.rs b/lib/src/server/events/audit/session_events.rs deleted file mode 100644 index 60f4ae73b..000000000 --- a/lib/src/server/events/audit/session_events.rs +++ /dev/null @@ -1,373 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use crate::crypto::X509; -use crate::types::*; - -use crate::server::{address_space::address_space::AddressSpace, events::event::Event}; - -use super::{security_event::AuditSecurityEventType, AuditEvent}; - -/// Base type for audit session events. Do not raise events of this type -pub struct AuditSessionEventType { - base: AuditSecurityEventType, - session_id: NodeId, -} - -pub enum AuditCloseSessionReason { - CloseSession, - Timeout, - Terminated, -} - -impl AuditCloseSessionReason { - pub fn source_name(&self) -> String { - match self { - AuditCloseSessionReason::CloseSession => "Session/CloseSession", - AuditCloseSessionReason::Timeout => "Session/Timeout", - AuditCloseSessionReason::Terminated => "Session/Terminated", - } - .into() - } -} - -impl AuditEvent for AuditSessionEventType { - fn event_type_id() -> NodeId { - ObjectTypeId::AuditSessionEventType.into() - } - - fn log_message(&self) -> String { - self.base.log_message() - } -} - -impl Event for AuditSessionEventType { - type Err = (); - - fn is_valid(&self) -> bool { - !self.session_id.is_null() && self.base.is_valid() - } - - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - let node_id = self.base.raise(address_space)?; - let ns = node_id.namespace; - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "SessionId", - "SessionId", - DataTypeId::NodeId, - self.session_id.clone(), - address_space, - ); - Ok(node_id) - } -} - -audit_security_event_impl!(AuditSessionEventType, base); - -impl AuditSessionEventType { - pub fn new( - node_id: R, - event_type_id: E, - browse_name: S, - display_name: T, - time: DateTime, - ) -> Self - where - R: Into, - E: Into, - S: Into, - T: Into, - { - Self { - base: AuditSecurityEventType::new( - node_id, - event_type_id, - browse_name, - display_name, - time, - ), - session_id: NodeId::null(), - } - } - - pub fn new_close_session(node_id: R, time: DateTime, reason: AuditCloseSessionReason) -> Self - where - R: Into, - { - Self::new( - node_id, - Self::event_type_id(), - "AuditSessionEventType", - "AuditSessionEventType", - time, - ) - .source_name(reason.source_name()) - } - - pub fn session_id(mut self, session_id: T) -> Self - where - T: Into, - { - self.session_id = session_id.into(); - self - } -} - -macro_rules! audit_session_event_impl { - ( $event:ident, $base:ident ) => { - audit_security_event_impl!($event, $base); - - impl $event { - pub fn session_id(mut self, session_id: T) -> $event - where - T: Into, - { - self.$base = self.$base.session_id(session_id); - self - } - } - }; -} - -//////////////////////////////////////////////////////////////////////////////////////////////////// - -pub struct AuditCreateSessionEventType { - base: AuditSessionEventType, - secure_channel_id: UAString, - client_certificate: ByteString, - client_certificate_thumbprint: UAString, - revised_session_timeout: Duration, -} - -impl AuditEvent for AuditCreateSessionEventType { - fn event_type_id() -> NodeId { - ObjectTypeId::AuditCreateSessionEventType.into() - } - - fn log_message(&self) -> String { - self.base.log_message() - } -} - -impl Event for AuditCreateSessionEventType { - type Err = (); - - fn is_valid(&self) -> bool { - !self.secure_channel_id.is_null() - && !self.client_certificate.is_null() - && !self.client_certificate_thumbprint.is_null() - && self.base.is_valid() - } - - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - let node_id = self.base.raise(address_space)?; - let ns = node_id.namespace; - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "SecureChannelId", - "SecureChannelId", - DataTypeId::String, - self.secure_channel_id.clone(), - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "ClientCertificate", - "ClientCertificate", - DataTypeId::ByteString, - self.client_certificate.clone(), - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "ClientCertificateThumbprint", - "ClientCertificateThumbprint", - DataTypeId::String, - self.client_certificate_thumbprint.clone(), - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "RevisedSessionTimeout", - "RevisedSessionTimeout", - DataTypeId::Duration, - self.revised_session_timeout, - address_space, - ); - Ok(node_id) - } -} - -audit_session_event_impl!(AuditCreateSessionEventType, base); - -impl AuditCreateSessionEventType { - pub fn new(node_id: R, time: DateTime) -> Self - where - R: Into, - { - let event_type_id = ObjectTypeId::AuditCreateSessionEventType; - Self { - base: AuditSessionEventType::new( - node_id, - event_type_id, - "AuditCreateSessionEventType", - "AuditCreateSessionEventType", - time, - ), - secure_channel_id: UAString::null(), - client_certificate: ByteString::null(), - client_certificate_thumbprint: UAString::null(), - revised_session_timeout: 0.0, - } - } - - pub fn secure_channel_id(mut self, secure_channel_id: T) -> Self - where - T: Into, - { - self.secure_channel_id = secure_channel_id.into(); - self - } - - pub fn client_certificate(mut self, client_certificate: &X509) -> Self { - self.client_certificate = client_certificate.as_byte_string(); - self.client_certificate_thumbprint = client_certificate.thumbprint().as_hex_string().into(); - self - } - - pub fn revised_session_timeout(mut self, revised_session_timeout: Duration) -> Self { - self.revised_session_timeout = revised_session_timeout; - self - } -} - -//////////////////////////////////////////////////////////////////////////////////////////////////// - -pub struct AuditActivateSessionEventType { - base: AuditSessionEventType, - client_software_certificates: Vec, - user_identity_token: UserIdentityToken, - secure_channel_id: UAString, -} - -impl AuditEvent for AuditActivateSessionEventType { - fn event_type_id() -> NodeId { - ObjectTypeId::AuditActivateSessionEventType.into() - } - - fn log_message(&self) -> String { - self.base.log_message() - } -} - -impl Event for AuditActivateSessionEventType { - type Err = (); - - fn is_valid(&self) -> bool { - self.base.is_valid() - } - - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - let node_id = self.base.raise(address_space)?; - let ns = node_id.namespace; - // Client software certificates is an array of extension objects (extension object i=344) - let client_software_certificates = self - .client_software_certificates - .iter() - .map(|c| { - Variant::from(ExtensionObject::from_encodable( - ObjectId::SignedSoftwareCertificate_Encoding_DefaultBinary, - c, - )) - }) - .collect::>(); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "ClientSoftwareCertificates", - "ClientSoftwareCertificates", - DataTypeId::SignedSoftwareCertificate, - (VariantTypeId::ExtensionObject, client_software_certificates), - address_space, - ); - - // User identity token (extension object i=316) - let user_identity_token = ExtensionObject::from_encodable( - ObjectId::UserIdentityToken_Encoding_DefaultBinary, - &self.user_identity_token, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "UserIdentityToken", - "UserIdentityToken", - DataTypeId::UserIdentityToken, - user_identity_token, - address_space, - ); - - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "SecureChannelId", - "SecureChannelId", - DataTypeId::String, - self.secure_channel_id.clone(), - address_space, - ); - Ok(node_id) - } -} - -audit_session_event_impl!(AuditActivateSessionEventType, base); - -impl AuditActivateSessionEventType { - pub fn new(node_id: R, time: DateTime) -> Self - where - R: Into, - { - let event_type_id = ObjectTypeId::AuditCreateSessionEventType; - Self { - base: AuditSessionEventType::new( - node_id, - event_type_id, - "AuditCreateSessionEventType", - "AuditCreateSessionEventType", - time, - ), - client_software_certificates: Vec::new(), - user_identity_token: UserIdentityToken { - policy_id: UAString::null(), - }, - secure_channel_id: UAString::null(), - } - } - - pub fn client_software_certificates( - mut self, - client_software_certificates: Vec, - ) -> Self { - self.client_software_certificates = client_software_certificates; - self - } - - pub fn user_identity_token(mut self, user_identity_token: UserIdentityToken) -> Self { - self.user_identity_token = user_identity_token; - self - } - - pub fn secure_channel_id(mut self, secure_channel_id: T) -> Self - where - T: Into, - { - self.secure_channel_id = secure_channel_id.into(); - self - } -} diff --git a/lib/src/server/events/evaluate.rs b/lib/src/server/events/evaluate.rs new file mode 100644 index 000000000..93337e6f0 --- /dev/null +++ b/lib/src/server/events/evaluate.rs @@ -0,0 +1,986 @@ +use std::cmp::Ordering; + +use regex::Regex; + +use crate::types::{ + AttributeId, EventFieldList, FilterOperator, NodeId, NumericRange, QualifiedName, Variant, + VariantTypeId, +}; + +use super::{ + event::Event, + validation::{ + ParsedContentFilter, ParsedEventFilter, ParsedOperand, ParsedSimpleAttributeOperand, + }, +}; + +impl ParsedEventFilter { + pub fn evaluate(&self, event: &dyn Event, client_handle: u32) -> Option { + if !self.content_filter.evaluate(event) { + return None; + } + + let fields: Vec<_> = self + .select_clauses + .iter() + .map(|c| get_field(event, c)) + .collect(); + Some(EventFieldList { + client_handle, + event_fields: Some(fields), + }) + } +} + +macro_rules! cmp_op { + ($slf:ident, $evt:ident, $op:ident, $pt:pat) => { + matches!( + ParsedContentFilter::compare_op( + $slf.evaluate_operand($evt, &$op.operands[0]), + $slf.evaluate_operand($evt, &$op.operands[1]), + ), + $pt + ) + .into() + }; +} + +macro_rules! as_type { + ($v:expr, $t:ident, $def:expr) => {{ + let v = $v.convert(VariantTypeId::$t); + let Variant::$t(v) = v else { + return $def; + }; + v + }}; +} + +macro_rules! bw_op { + ($lhs:expr, $rhs:expr, $op:expr) => {{ + match $op { + BitOperation::And => ($lhs & $rhs).into(), + BitOperation::Or => ($lhs | $rhs).into(), + } + }}; +} + +pub trait AttributeQueryable: Copy { + fn get_attribute( + &self, + type_definition_id: &NodeId, + browse_path: &[QualifiedName], + attribute_id: AttributeId, + index_range: NumericRange, + ) -> Variant; +} + +impl AttributeQueryable for &dyn Event { + fn get_attribute( + &self, + type_definition_id: &NodeId, + browse_path: &[QualifiedName], + attribute_id: AttributeId, + index_range: NumericRange, + ) -> Variant { + self.get_field(type_definition_id, browse_path, attribute_id, index_range) + } +} + +enum BitOperation { + And, + Or, +} + +impl ParsedContentFilter { + pub fn evaluate(&self, event: impl AttributeQueryable) -> bool { + if self.elements.is_empty() { + return true; + } + matches!(self.evulate_element(event, 0), Variant::Boolean(true)) + } + + fn evulate_element(&self, event: impl AttributeQueryable, index: usize) -> Variant { + let Some(op) = self.elements.get(index) else { + return Variant::Empty; + }; + + match op.operator { + FilterOperator::Equals => cmp_op!(self, event, op, Some(Ordering::Equal)), + FilterOperator::IsNull => { + (self.evaluate_operand(event, &op.operands[0]) == Variant::Empty).into() + } + FilterOperator::GreaterThan => cmp_op!(self, event, op, Some(Ordering::Greater)), + FilterOperator::LessThan => cmp_op!(self, event, op, Some(Ordering::Less)), + FilterOperator::GreaterThanOrEqual => { + cmp_op!(self, event, op, Some(Ordering::Equal | Ordering::Greater)) + } + FilterOperator::LessThanOrEqual => { + cmp_op!(self, event, op, Some(Ordering::Equal | Ordering::Less)) + } + FilterOperator::Like => Self::like( + self.evaluate_operand(event, &op.operands[0]), + self.evaluate_operand(event, &op.operands[1]), + ) + .into(), + FilterOperator::Not => Self::not(self.evaluate_operand(event, &op.operands[0])), + FilterOperator::Between => Self::between( + self.evaluate_operand(event, &op.operands[0]), + self.evaluate_operand(event, &op.operands[1]), + self.evaluate_operand(event, &op.operands[2]), + ) + .into(), + FilterOperator::InList => Self::in_list( + self.evaluate_operand(event, &op.operands[0]), + op.operands + .iter() + .skip(1) + .map(|o| self.evaluate_operand(event, o)), + ) + .into(), + FilterOperator::And => Self::and( + self.evaluate_operand(event, &op.operands[0]), + self.evaluate_operand(event, &op.operands[1]), + ), + FilterOperator::Or => Self::or( + self.evaluate_operand(event, &op.operands[0]), + self.evaluate_operand(event, &op.operands[1]), + ), + FilterOperator::Cast => Self::cast( + self.evaluate_operand(event, &op.operands[0]), + self.evaluate_operand(event, &op.operands[1]), + ), + FilterOperator::BitwiseAnd => Self::bitwise_op( + self.evaluate_operand(event, &op.operands[0]), + self.evaluate_operand(event, &op.operands[1]), + BitOperation::And, + ), + FilterOperator::BitwiseOr => Self::bitwise_op( + self.evaluate_operand(event, &op.operands[0]), + self.evaluate_operand(event, &op.operands[1]), + BitOperation::Or, + ), + _ => Variant::Empty, + } + } + + fn evaluate_operand(&self, event: impl AttributeQueryable, op: &ParsedOperand) -> Variant { + match op { + ParsedOperand::ElementOperand(o) => self.evulate_element(event, o.index as usize), + ParsedOperand::LiteralOperand(o) => o.value.clone(), + ParsedOperand::AttributeOperand(_) => unreachable!(), + ParsedOperand::SimpleAttributeOperand(o) => event.get_attribute( + &o.type_definition_id, + &o.browse_path, + o.attribute_id, + o.index_range.clone(), + ), + } + } + + fn in_list(lhs: Variant, rhs: impl Iterator) -> bool { + for it in rhs { + if matches!(Self::compare_op(lhs.clone(), it), Some(Ordering::Equal)) { + return true; + } + } + false + } + + fn between(it: Variant, gte: Variant, lte: Variant) -> bool { + matches!( + Self::compare_op(it.clone(), gte), + Some(Ordering::Greater | Ordering::Equal) + ) && matches!( + Self::compare_op(it, lte), + Some(Ordering::Less | Ordering::Equal) + ) + } + + fn not(rhs: Variant) -> Variant { + let rhs = as_type!(rhs, Boolean, Variant::Empty); + (!rhs).into() + } + + fn and(lhs: Variant, rhs: Variant) -> Variant { + let lhs = as_type!(lhs, Boolean, Variant::Empty); + let rhs = as_type!(rhs, Boolean, Variant::Empty); + + (lhs && rhs).into() + } + + fn or(lhs: Variant, rhs: Variant) -> Variant { + let lhs = as_type!(lhs, Boolean, Variant::Empty); + let rhs = as_type!(rhs, Boolean, Variant::Empty); + + (lhs || rhs).into() + } + + fn like(lhs: Variant, rhs: Variant) -> bool { + let lhs = as_type!(lhs, String, false); + let rhs = as_type!(rhs, String, false); + let Ok(re) = like_to_regex(rhs.as_ref()) else { + return false; + }; + re.is_match(lhs.as_ref()) + } + + fn cast(lhs: Variant, rhs: Variant) -> Variant { + let type_id = match rhs { + Variant::NodeId(n) => { + let Ok(t) = VariantTypeId::try_from(&*n) else { + return Variant::Empty; + }; + t + } + Variant::ExpandedNodeId(n) => { + let Ok(t) = VariantTypeId::try_from(&n.node_id) else { + return Variant::Empty; + }; + t + } + _ => return Variant::Empty, + }; + lhs.cast(type_id) + } + + fn convert(lhs: Variant, rhs: Variant) -> (Variant, Variant) { + let lhs_type = lhs.type_id(); + match lhs_type.precedence().cmp(&rhs.type_id().precedence()) { + std::cmp::Ordering::Less => (lhs, rhs.convert(lhs_type)), + std::cmp::Ordering::Equal => (lhs, rhs), + std::cmp::Ordering::Greater => (lhs.convert(rhs.type_id()), rhs), + } + } + + fn bitwise_op(lhs: Variant, rhs: Variant, op: BitOperation) -> Variant { + let (lhs, rhs) = Self::convert(lhs, rhs); + + match (lhs, rhs) { + (Variant::SByte(lhs), Variant::SByte(rhs)) => bw_op!(lhs, rhs, op), + (Variant::Byte(lhs), Variant::Byte(rhs)) => bw_op!(lhs, rhs, op), + (Variant::Int16(lhs), Variant::Int16(rhs)) => bw_op!(lhs, rhs, op), + (Variant::Int32(lhs), Variant::Int32(rhs)) => bw_op!(lhs, rhs, op), + (Variant::Int64(lhs), Variant::Int64(rhs)) => bw_op!(lhs, rhs, op), + (Variant::UInt16(lhs), Variant::UInt16(rhs)) => bw_op!(lhs, rhs, op), + (Variant::UInt32(lhs), Variant::UInt32(rhs)) => bw_op!(lhs, rhs, op), + (Variant::UInt64(lhs), Variant::UInt64(rhs)) => bw_op!(lhs, rhs, op), + _ => Variant::Empty, + } + } + + fn compare_op(lhs: Variant, rhs: Variant) -> Option { + let (lhs, rhs) = Self::convert(lhs, rhs); + match (lhs, rhs) { + (Variant::SByte(lhs), Variant::SByte(rhs)) => Some(lhs.cmp(&rhs)), + (Variant::Byte(lhs), Variant::Byte(rhs)) => Some(lhs.cmp(&rhs)), + (Variant::Int16(lhs), Variant::Int16(rhs)) => Some(lhs.cmp(&rhs)), + (Variant::Int32(lhs), Variant::Int32(rhs)) => Some(lhs.cmp(&rhs)), + (Variant::Int64(lhs), Variant::Int64(rhs)) => Some(lhs.cmp(&rhs)), + (Variant::UInt16(lhs), Variant::UInt16(rhs)) => Some(lhs.cmp(&rhs)), + (Variant::UInt32(lhs), Variant::UInt32(rhs)) => Some(lhs.cmp(&rhs)), + (Variant::UInt64(lhs), Variant::UInt64(rhs)) => Some(lhs.cmp(&rhs)), + (Variant::Double(lhs), Variant::Double(rhs)) => Some(lhs.total_cmp(&rhs)), + (Variant::Float(lhs), Variant::Float(rhs)) => Some(lhs.total_cmp(&rhs)), + (Variant::Boolean(lhs), Variant::Boolean(rhs)) => Some(lhs.cmp(&rhs)), + _ => None, + } + } +} + +fn get_field(event: &dyn Event, attr: &ParsedSimpleAttributeOperand) -> Variant { + event.get_field( + &attr.type_definition_id, + &attr.browse_path, + attr.attribute_id, + attr.index_range.clone(), + ) +} + +/// Converts the OPC UA SQL-esque Like format into a regular expression. +fn like_to_regex(v: &str) -> Result { + // Give a reasonable buffer + let mut pattern = String::with_capacity(v.len() * 2); + + let mut in_list = false; + + // Turn the chars into a vec to make it easier to index them + let v = v.chars().collect::>(); + + pattern.push('^'); + v.iter().enumerate().for_each(|(i, c)| { + if in_list { + if *c == ']' && (i == 0 || v[i - 1] != '\\') { + // Close the list + in_list = false; + pattern.push(*c); + } else { + // Chars in list are escaped if required + match c { + '$' | '(' | ')' | '.' | '+' | '*' | '?' => { + // Other regex chars except for ^ are escaped + pattern.push('\\'); + pattern.push(*c); + } + _ => { + // Everything between two [] will be treated as-is + pattern.push(*c); + } + } + } + } else { + match c { + '$' | '^' | '(' | ')' | '.' | '+' | '*' | '?' => { + // Other regex chars are escaped + pattern.push('\\'); + pattern.push(*c); + } + '[' => { + // Opens a list of chars to match + if i == 0 || v[i - 1] != '\\' { + // Open the list + in_list = true; + } + pattern.push(*c); + } + '%' => { + if i == 0 || v[i - 1] != '\\' { + // A % is a match on zero or more chans unless it is escaped + pattern.push_str(".*"); + } else { + pattern.push(*c); + } + } + '_' => { + if i == 0 || v[i - 1] != '\\' { + // A _ is a match on a single char unless it is escaped + pattern.push('?'); + } else { + // Remove escaping of the underscore + let _ = pattern.pop(); + pattern.push(*c); + } + } + _ => { + pattern.push(*c); + } + } + } + }); + pattern.push('$'); + Regex::new(&pattern).map_err(|err| { + error!("Problem parsing, error = {}", err); + }) +} + +#[cfg(test)] +mod tests { + use regex::Regex; + + use crate::{ + server::address_space::types::AddressSpace, + server::{ + address_space::{ObjectTypeBuilder, VariableBuilder}, + events::evaluate::like_to_regex, + node_manager::TypeTree, + BaseEventType, Event, ParsedContentFilter, + }, + types::{ + AttributeId, ByteString, ContentFilter, ContentFilterElement, DataTypeId, DateTime, + FilterOperator, LocalizedText, NodeId, ObjectId, ObjectTypeId, Operand, UAString, + VariableTypeId, Variant, + }, + }; + + fn compare_regex(r1: Regex, r2: Regex) { + assert_eq!(r1.as_str(), r2.as_str()); + } + + #[test] + fn like_to_regex_tests() { + compare_regex(like_to_regex("").unwrap(), Regex::new("^$").unwrap()); + compare_regex(like_to_regex("^$").unwrap(), Regex::new(r"^\^\$$").unwrap()); + compare_regex(like_to_regex("%").unwrap(), Regex::new("^.*$").unwrap()); + compare_regex(like_to_regex("[%]").unwrap(), Regex::new("^[%]$").unwrap()); + compare_regex(like_to_regex("[_]").unwrap(), Regex::new("^[_]$").unwrap()); + compare_regex( + like_to_regex(r"[\]]").unwrap(), + Regex::new(r"^[\]]$").unwrap(), + ); + compare_regex( + like_to_regex("[$().+*?]").unwrap(), + Regex::new(r"^[\$\(\)\.\+\*\?]$").unwrap(), + ); + compare_regex(like_to_regex("_").unwrap(), Regex::new("^?$").unwrap()); + compare_regex( + like_to_regex("[a-z]").unwrap(), + Regex::new("^[a-z]$").unwrap(), + ); + compare_regex( + like_to_regex("[abc]").unwrap(), + Regex::new("^[abc]$").unwrap(), + ); + compare_regex( + like_to_regex(r"\[\]").unwrap(), + Regex::new(r"^\[\]$").unwrap(), + ); + compare_regex( + like_to_regex("[^0-9]").unwrap(), + Regex::new("^[^0-9]$").unwrap(), + ); + + // Some samples from OPC UA part 4 + let re = like_to_regex("Th[ia][ts]%").unwrap(); + assert!(re.is_match("That is fine")); + assert!(re.is_match("This is fine")); + assert!(re.is_match("That as one")); + assert!(!re.is_match("Then at any")); // Spec says this should pass when it obviously wouldn't + + let re = like_to_regex("%en%").unwrap(); + assert!(re.is_match("entail")); + assert!(re.is_match("green")); + assert!(re.is_match("content")); + + let re = like_to_regex("abc[13-68]").unwrap(); + assert!(re.is_match("abc1")); + assert!(!re.is_match("abc2")); + assert!(re.is_match("abc3")); + assert!(re.is_match("abc4")); + assert!(re.is_match("abc5")); + assert!(re.is_match("abc6")); + assert!(!re.is_match("abc7")); + assert!(re.is_match("abc8")); + + let re = like_to_regex("ABC[^13-5]").unwrap(); + assert!(!re.is_match("ABC1")); + assert!(re.is_match("ABC2")); + assert!(!re.is_match("ABC3")); + assert!(!re.is_match("ABC4")); + assert!(!re.is_match("ABC5")); + } + + struct TestEvent { + base: BaseEventType, + field: i32, + } + + impl TestEvent { + pub fn new( + type_id: impl Into, + event_id: ByteString, + message: impl Into, + time: DateTime, + field: i32, + ) -> Self { + Self { + base: BaseEventType::new(type_id, event_id, message, time), + field, + } + } + } + + impl Event for TestEvent { + fn get_field( + &self, + type_definition_id: &crate::types::NodeId, + browse_path: &[crate::types::QualifiedName], + attribute_id: crate::types::AttributeId, + index_range: crate::types::NumericRange, + ) -> crate::types::Variant { + if !self.matches_type_id(type_definition_id) + || browse_path.len() != 1 + || attribute_id != AttributeId::Value + { + return Variant::Empty; + } + let field = &browse_path[0]; + if field.namespace_index != 0 { + return Variant::Empty; + } + + match field.name.as_ref() { + "Field" => take_value!(self.field, index_range), + _ => { + self.base + .get_field(type_definition_id, browse_path, attribute_id, index_range) + } + } + } + + fn time(&self) -> &crate::types::DateTime { + self.base.time() + } + + fn matches_type_id(&self, id: &NodeId) -> bool { + id == &NodeId::new(1, 123) || self.base.matches_type_id(id) + } + } + + fn type_tree() -> TypeTree { + let mut address_space = AddressSpace::new(); + address_space.add_namespace("http://opcfoundation.org/UA/", 0); + address_space.add_namespace("my:namespace:uri", 1); + crate::server::address_space::populate_address_space(&mut address_space); + + let event_type_id = NodeId::new(1, 123); + ObjectTypeBuilder::new(&event_type_id, "TestEventType", "TestEventType") + .is_abstract(false) + .subtype_of(ObjectTypeId::BaseEventType) + .insert(&mut address_space); + + VariableBuilder::new(&NodeId::new(1, "field"), "Field", "Field") + .property_of(&event_type_id) + .data_type(DataTypeId::UInt32) + .has_type_definition(VariableTypeId::PropertyType) + .has_modelling_rule(ObjectId::ModellingRule_Mandatory) + .insert(&mut address_space); + + let mut type_tree = TypeTree::new(); + address_space.load_into_type_tree(&mut type_tree); + + type_tree + } + + fn filter(elements: Vec, type_tree: &TypeTree) -> ParsedContentFilter { + let (_, f) = ParsedContentFilter::parse( + ContentFilter { + elements: Some(elements), + }, + type_tree, + false, + false, + ); + f.unwrap() + } + + fn filter_elem(operands: &[Operand], op: FilterOperator) -> ContentFilterElement { + ContentFilterElement { + filter_operator: op, + filter_operands: Some(operands.iter().map(|o| o.into()).collect()), + } + } + + fn event(field: i32) -> TestEvent { + TestEvent::new( + NodeId::new(1, 123), + ByteString::null(), + "message", + DateTime::now(), + field, + ) + } + + #[test] + fn test_equality_filter() { + let type_tree = type_tree(); + let f = filter( + vec![filter_elem( + &[Operand::literal(10), Operand::literal(9)], + FilterOperator::Equals, + )], + &type_tree, + ); + let event = event(2); + assert!(!f.evaluate(&event as &dyn Event)); + let f = filter( + vec![filter_elem( + &[ + Operand::literal(2), + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + ], + FilterOperator::Equals, + )], + &type_tree, + ); + assert!(f.evaluate(&event as &dyn Event)); + } + + #[test] + fn test_lt_filter() { + let type_tree = type_tree(); + let f = filter( + vec![filter_elem( + &[Operand::literal(10), Operand::literal(9)], + FilterOperator::LessThan, + )], + &type_tree, + ); + let event = event(2); + assert!(!f.evaluate(&event as &dyn Event)); + let f = filter( + vec![filter_elem( + &[ + Operand::literal(1), + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + ], + FilterOperator::LessThan, + )], + &type_tree, + ); + assert!(f.evaluate(&event as &dyn Event)); + let f = filter( + vec![filter_elem( + &[ + Operand::literal(2), + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + ], + FilterOperator::LessThan, + )], + &type_tree, + ); + assert!(!f.evaluate(&event as &dyn Event)); + } + + #[test] + fn test_lte_filter() { + let type_tree = type_tree(); + let f = filter( + vec![filter_elem( + &[Operand::literal(10), Operand::literal(9)], + FilterOperator::LessThanOrEqual, + )], + &type_tree, + ); + let event = event(2); + assert!(!f.evaluate(&event as &dyn Event)); + let f = filter( + vec![filter_elem( + &[ + Operand::literal(1), + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + ], + FilterOperator::LessThanOrEqual, + )], + &type_tree, + ); + assert!(f.evaluate(&event as &dyn Event)); + let f = filter( + vec![filter_elem( + &[ + Operand::literal(2), + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + ], + FilterOperator::LessThanOrEqual, + )], + &type_tree, + ); + assert!(f.evaluate(&event as &dyn Event)); + } + + #[test] + fn test_gt_filter() { + let type_tree = type_tree(); + let f = filter( + vec![filter_elem( + &[Operand::literal(10), Operand::literal(9)], + FilterOperator::GreaterThan, + )], + &type_tree, + ); + let event = event(2); + assert!(f.evaluate(&event as &dyn Event)); + let f = filter( + vec![filter_elem( + &[ + Operand::literal(3), + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + ], + FilterOperator::GreaterThan, + )], + &type_tree, + ); + assert!(f.evaluate(&event as &dyn Event)); + let f = filter( + vec![filter_elem( + &[ + Operand::literal(2), + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + ], + FilterOperator::GreaterThan, + )], + &type_tree, + ); + assert!(!f.evaluate(&event as &dyn Event)); + } + + #[test] + fn test_gte_filter() { + let type_tree = type_tree(); + let f = filter( + vec![filter_elem( + &[Operand::literal(10), Operand::literal(9)], + FilterOperator::GreaterThanOrEqual, + )], + &type_tree, + ); + let event = event(2); + assert!(f.evaluate(&event as &dyn Event)); + let f = filter( + vec![filter_elem( + &[ + Operand::literal(3), + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + ], + FilterOperator::GreaterThanOrEqual, + )], + &type_tree, + ); + assert!(f.evaluate(&event as &dyn Event)); + let f = filter( + vec![filter_elem( + &[ + Operand::literal(2), + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + ], + FilterOperator::GreaterThanOrEqual, + )], + &type_tree, + ); + assert!(f.evaluate(&event as &dyn Event)); + } + + #[test] + fn test_not_filter() { + let type_tree = type_tree(); + let f = filter( + vec![filter_elem(&[Operand::literal(false)], FilterOperator::Not)], + &type_tree, + ); + let evt = event(2); + assert!(f.evaluate(&evt as &dyn Event)); + + let f = filter( + vec![ + filter_elem(&[Operand::element(1)], FilterOperator::Not), + filter_elem( + &[ + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + Operand::literal(3), + ], + FilterOperator::Equals, + ), + ], + &type_tree, + ); + assert!(f.evaluate(&evt as &dyn Event)); + let evt = event(3); + assert!(!f.evaluate(&evt as &dyn Event)); + } + + #[test] + fn test_between_filter() { + let type_tree = type_tree(); + let f = filter( + vec![filter_elem( + &[ + Operand::literal(9), + Operand::literal(8), + Operand::literal(10), + ], + FilterOperator::Between, + )], + &type_tree, + ); + let evt = event(2); + assert!(f.evaluate(&evt as &dyn Event)); + let f = filter( + vec![filter_elem( + &[ + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + Operand::literal(8), + Operand::literal(10), + ], + FilterOperator::Between, + )], + &type_tree, + ); + assert!(!f.evaluate(&evt as &dyn Event)); + let evt = event(9); + assert!(f.evaluate(&evt as &dyn Event)); + let evt = event(10); + assert!(f.evaluate(&evt as &dyn Event)); + let evt = event(8); + assert!(f.evaluate(&evt as &dyn Event)); + let evt = event(11); + assert!(!f.evaluate(&evt as &dyn Event)); + } + + #[test] + fn test_and_filter() { + let type_tree = type_tree(); + let f = filter( + vec![filter_elem( + &[Operand::literal(true), Operand::literal(false)], + FilterOperator::And, + )], + &type_tree, + ); + let evt = event(2); + assert!(!f.evaluate(&evt as &dyn Event)); + let f = filter( + vec![ + filter_elem( + &[Operand::element(1), Operand::element(2)], + FilterOperator::And, + ), + filter_elem( + &[ + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + Operand::literal(3), + ], + FilterOperator::Equals, + ), + filter_elem( + &[Operand::literal(3), Operand::literal(3)], + FilterOperator::Equals, + ), + ], + &type_tree, + ); + + assert!(!f.evaluate(&evt as &dyn Event)); + let evt = event(3); + assert!(f.evaluate(&evt as &dyn Event)); + } + + #[test] + fn test_or_filter() { + let type_tree = type_tree(); + let f = filter( + vec![filter_elem( + &[Operand::literal(true), Operand::literal(false)], + FilterOperator::Or, + )], + &type_tree, + ); + let evt = event(2); + assert!(f.evaluate(&evt as &dyn Event)); + let f = filter( + vec![ + filter_elem( + &[Operand::element(1), Operand::element(2)], + FilterOperator::Or, + ), + filter_elem( + &[ + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + Operand::literal(3), + ], + FilterOperator::Equals, + ), + filter_elem( + &[Operand::literal(3), Operand::literal(2)], + FilterOperator::Equals, + ), + ], + &type_tree, + ); + + assert!(!f.evaluate(&evt as &dyn Event)); + let evt = event(3); + assert!(f.evaluate(&evt as &dyn Event)); + } + + #[test] + fn test_in_list() { + let type_tree = type_tree(); + let f = filter( + vec![filter_elem( + &[ + Operand::literal(1), + Operand::literal(2), + Operand::literal(3), + Operand::literal(1), + ], + FilterOperator::InList, + )], + &type_tree, + ); + let evt = event(2); + assert!(f.evaluate(&evt as &dyn Event)); + let f = filter( + vec![filter_elem( + &[ + Operand::simple_attribute( + ObjectTypeId::BaseEventType, + "Field", + AttributeId::Value, + UAString::null(), + ), + Operand::literal(1), + Operand::literal(2), + Operand::literal(3), + ], + FilterOperator::InList, + )], + &type_tree, + ); + assert!(f.evaluate(&evt as &dyn Event)); + let evt = event(4); + assert!(!f.evaluate(&evt as &dyn Event)); + } +} diff --git a/lib/src/server/events/event.rs b/lib/src/server/events/event.rs index 8b424d4b1..5d0e9018f 100644 --- a/lib/src/server/events/event.rs +++ b/lib/src/server/events/event.rs @@ -1,61 +1,46 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Contains functions for generating events and adding them to the address space of the server. use crate::types::{ - service_types::TimeZoneDataType, AttributeId, ByteString, DataTypeId, DateTime, DateTimeUtc, - ExtensionObject, Guid, LocalizedText, NodeId, NumericRange, ObjectId, ObjectTypeId, - QualifiedName, TimestampsToReturn, UAString, VariableTypeId, Variant, -}; - -use crate::server::address_space::{ - object::ObjectBuilder, relative_path::*, variable::VariableBuilder, AddressSpace, + AttributeId, ByteString, DateTime, ExtensionObject, LocalizedText, NodeId, NumericRange, + ObjectId, ObjectTypeId, QualifiedName, TimeZoneDataType, UAString, Variant, }; -/// Events can implement this to populate themselves into the address space pub trait Event { - type Err; + fn get_field( + &self, + type_definition_id: &NodeId, + browse_path: &[QualifiedName], + attribute_id: AttributeId, + index_range: NumericRange, + ) -> Variant; - /// Tests if the event is valid - fn is_valid(&self) -> bool; + fn time(&self) -> &DateTime; - /// Raises the event, i.e. adds the object into the address space. The event must be valid to be inserted. - fn raise(&mut self, address_space: &mut AddressSpace) -> Result; + fn matches_type_id(&self, id: &NodeId) -> bool; } +#[derive(Debug, Default)] /// This corresponds to BaseEventType definition in OPC UA Part 5 pub struct BaseEventType { - /// Node id - node_id: NodeId, - /// Parent node - parent_node: NodeId, - /// Browse name - browse_name: QualifiedName, - /// Display name - display_name: LocalizedText, /// A unique identifier for an event, e.g. a GUID in a byte string - event_id: ByteString, + pub event_id: ByteString, /// Event type describes the type of event - event_type: NodeId, - /// Source node identifies the node that the event originated from - /// or null. - source_node: NodeId, + pub event_type: NodeId, + /// Source node identifies the node that the event originated from or null. + pub source_node: NodeId, /// Source name provides the description of the source of the event, /// e.g. the display of the event source - source_name: UAString, + pub source_name: UAString, /// Time provides the time the event occurred. As close /// to the event generator as possible. - time: DateTime, + pub time: DateTime, /// Receive time provides the time the OPC UA server received /// the event from the underlying device of another server. - receive_time: DateTime, + pub receive_time: DateTime, /// Local time (optional) is a structure containing /// the offset and daylightsaving flag. - local_time: Option, + pub local_time: Option, /// Message provides a human readable localizable text description /// of the event. - message: LocalizedText, + pub message: LocalizedText, /// Severity is an indication of the urgency of the event. Values from 1 to 1000, with 1 as the lowest /// severity and 1000 being the highest. A value of 1000 would indicate an event of catastrophic nature. /// @@ -66,702 +51,124 @@ pub struct BaseEventType { /// * 401-600 - Medium /// * 201-400 - Medium Low /// * 1-200 - Low - severity: u16, - /// Properties as string/values in the order they were added - properties: Vec<(LocalizedText, Variant)>, + pub severity: u16, + /// Condition Class Id specifies in which domain this Event is used. + pub condition_class_id: Option, + /// Condition class name specifies the name of the condition class of this event, if set. + pub condition_class_name: Option, + /// ConditionSubClassId specifies additional class[es] that apply to the Event. + /// It is the NodeId of the corresponding subtype of BaseConditionClassType. + pub condition_sub_class_id: Option>, + /// Condition sub class name specifies the names of additional classes that apply to the event. + pub condition_sub_class_name: Option>, +} + +macro_rules! take_value { + ($v:expr, $r:ident) => {{ + let variant: Variant = $v.clone().into(); + variant.range_of_owned($r).unwrap_or(Variant::Empty) + }}; } impl Event for BaseEventType { - type Err = (); + fn get_field( + &self, + type_definition_id: &NodeId, + browse_path: &[QualifiedName], + attribute_id: AttributeId, + index_range: NumericRange, + ) -> Variant { + if !self.matches_type_id(type_definition_id) + || browse_path.len() != 1 + || attribute_id != AttributeId::Value + { + // Field is not from base event type. + return Variant::Empty; + } + let field = &browse_path[0]; + if field.namespace_index != 0 { + return Variant::Empty; + } - fn is_valid(&self) -> bool { - !self.node_id.is_null() - && !self.event_id.is_null_or_empty() - && !self.event_type.is_null() - && self.severity >= 1 - && self.severity <= 1000 + match field.name.as_ref() { + "EventId" => take_value!(self.event_id, index_range), + "EventType" => take_value!(self.event_type, index_range), + "SourceNode" => take_value!(self.source_node, index_range), + "SourceName" => take_value!(self.source_name, index_range), + "Time" => take_value!(self.time, index_range), + "ReceiveTime" => take_value!(self.receive_time, index_range), + "LocalTime" => take_value!( + self.local_time + .as_ref() + .map(|t| ExtensionObject::from_encodable( + ObjectId::TimeZoneDataType_Encoding_DefaultBinary, + t + )), + index_range + ), + "Message" => take_value!(self.message, index_range), + "Severity" => take_value!(self.severity, index_range), + "ConditionClassId" => take_value!(self.condition_class_id, index_range), + "ConditionClassName" => take_value!(self.condition_class_name, index_range), + "ConditionSubClassId" => take_value!(self.condition_sub_class_id, index_range), + "ConditionSubClassName" => take_value!(self.condition_sub_class_name, index_range), + _ => Variant::Empty, + } } - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - if self.is_valid() { - // create an event object in a folder with the - let ns = self.node_id.namespace; - let node_id = self.node_id.clone(); - - let object_builder = ObjectBuilder::new( - &self.node_id, - self.browse_name.clone(), - self.display_name.clone(), - ) - .organized_by(self.parent_node.clone()) - .has_type_definition(self.event_type.clone()); - - let object_builder = if !self.source_node.is_null() { - object_builder.has_event_source(self.source_node.clone()) - } else { - object_builder - }; - object_builder.insert(address_space); - - // Mandatory properties - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "EventId", - "EventId", - DataTypeId::ByteString, - self.event_id.clone(), - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "EventType", - "EventType", - DataTypeId::NodeId, - self.event_type.clone(), - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "SourceNode", - "SourceNode", - DataTypeId::NodeId, - self.source_node.clone(), - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "SourceName", - "SourceName", - DataTypeId::String, - self.source_name.clone(), - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "Time", - "Time", - DataTypeId::UtcTime, - self.time, - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "ReceiveTime", - "ReceiveTime", - DataTypeId::UtcTime, - self.receive_time, - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "Message", - "Message", - DataTypeId::LocalizedText, - self.message.clone(), - address_space, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "Severity", - "Severity", - DataTypeId::UInt16, - self.severity, - address_space, - ); - - // LocalTime is optional - if let Some(ref local_time) = self.local_time { - // Serialise to extension object - let local_time = ExtensionObject::from_encodable( - ObjectId::TimeZoneDataType_Encoding_DefaultBinary, - local_time, - ); - self.add_property( - &node_id, - NodeId::next_numeric(ns), - "LocalTime", - "LocalTime", - DataTypeId::TimeZoneDataType, - local_time, - address_space, - ); - } + fn time(&self) -> &DateTime { + &self.time + } - Ok(node_id) - } else { - error!("Event is invalid and will not be inserted"); - Err(()) - } + fn matches_type_id(&self, id: &NodeId) -> bool { + let own_type_id: NodeId = ObjectTypeId::BaseEventType.into(); + id == &own_type_id } } impl BaseEventType { - pub fn new_now( - node_id: R, - event_type_id: E, - browse_name: S, - display_name: T, - parent_node: U, - ) -> Self - where - R: Into, - E: Into, - S: Into, - T: Into, - U: Into, - { - let now = DateTime::now(); - Self::new( - node_id, - event_type_id, - browse_name, - display_name, - parent_node, - now, - ) - } - - pub fn new( - node_id: R, - event_type_id: E, - browse_name: S, - display_name: T, - parent_node: U, + pub fn new_now( + type_id: impl Into, + event_id: ByteString, + message: impl Into, + ) -> Self { + let time = DateTime::now(); + Self::new(type_id, event_id, message, time) + } + + pub fn new( + type_id: impl Into, + event_id: ByteString, + message: impl Into, time: DateTime, - ) -> Self - where - R: Into, - E: Into, - S: Into, - T: Into, - U: Into, - { + ) -> Self { Self { - node_id: node_id.into(), - browse_name: browse_name.into(), - display_name: display_name.into(), - parent_node: parent_node.into(), - event_id: Guid::new().into(), - event_type: event_type_id.into(), - source_node: NodeId::null(), - source_name: UAString::null(), + event_id, + event_type: type_id.into(), + message: message.into(), time, receive_time: time, - local_time: None, - message: LocalizedText::null(), - severity: 1, - properties: Vec::with_capacity(20), + ..Default::default() } } - /// Add a property to the event object - pub fn add_property( - &mut self, - event_id: &NodeId, - property_id: T, - browse_name: R, - display_name: S, - data_type: U, - value: V, - address_space: &mut AddressSpace, - ) where - T: Into, - R: Into, - S: Into, - U: Into, - V: Into, - { - let display_name = display_name.into(); - let value = value.into(); - self.properties.push((display_name.clone(), value.clone())); - - Self::do_add_property( - event_id, - property_id, - browse_name, - display_name, - data_type, - value, - address_space, - ) - } - - /// Helper function inserts a property for the event - fn do_add_property( - event_id: &NodeId, - property_id: T, - browse_name: R, - display_name: S, - data_type: U, - value: V, - address_space: &mut AddressSpace, - ) where - T: Into, - R: Into, - S: Into, - U: Into, - V: Into, - { - VariableBuilder::new(&property_id.into(), browse_name, display_name) - .property_of(event_id.clone()) - .has_type_definition(VariableTypeId::PropertyType) - .data_type(data_type) - .value(value) - .insert(address_space); - } - - pub fn message(mut self, message: T) -> Self - where - T: Into, - { - self.message = message.into(); - self - } - - pub fn source_node(mut self, source_node: T) -> Self - where - T: Into, - { - self.source_node = source_node.into(); + pub fn set_source_node(mut self, source_node: NodeId) -> Self { + self.source_node = source_node; self } - pub fn source_name(mut self, source_name: T) -> Self - where - T: Into, - { - self.source_name = source_name.into(); + pub fn set_source_name(mut self, source_name: UAString) -> Self { + self.source_name = source_name; self } - pub fn local_time(mut self, local_time: Option) -> Self { - self.local_time = local_time; + pub fn set_receive_time(mut self, receive_time: DateTime) -> Self { + self.receive_time = receive_time; self } - pub fn severity(mut self, severity: u16) -> Self { + pub fn set_severity(mut self, severity: u16) -> Self { self.severity = severity; self } - - pub fn receive_time(mut self, receive_time: DateTime) -> Self { - self.receive_time = receive_time; - self - } - - pub fn properties(&self) -> &Vec<(LocalizedText, Variant)> { - &self.properties - } -} - -/// This is a macro for types that aggregate from BaseEventType and want to expose the -/// builder functions. -macro_rules! base_event_impl { - ( $event:ident, $base:ident ) => { - impl $event { - pub fn add_property( - &mut self, - event_id: &NodeId, - property_id: T, - browse_name: R, - display_name: S, - data_type: U, - value: V, - address_space: &mut AddressSpace, - ) where - T: Into, - R: Into, - S: Into, - U: Into, - V: Into, - { - self.$base.add_property( - event_id, - property_id, - browse_name, - display_name, - data_type, - value, - address_space, - ); - } - - pub fn message(mut self, message: T) -> $event - where - T: Into, - { - self.$base = self.$base.message(message); - self - } - - pub fn source_node(mut self, source_node: T) -> $event - where - T: Into, - { - self.$base = self.$base.source_node(source_node); - self - } - - pub fn source_name(mut self, source_name: T) -> $event - where - T: Into, - { - self.$base = self.$base.source_name(source_name); - self - } - - pub fn local_time(mut self, local_time: Option) -> $event { - self.$base = self.$base.local_time(local_time); - self - } - - pub fn severity(mut self, severity: u16) -> $event { - self.$base = self.$base.severity(severity); - self - } - - pub fn receive_time(mut self, receive_time: DateTime) -> $event { - self.$base = self.$base.receive_time(receive_time); - self - } - } - }; -} - -fn event_source_node(event_id: &NodeId, address_space: &AddressSpace) -> Option { - if let Ok(event_time_node) = - find_node_from_browse_path(address_space, event_id, &["SourceNode".into()]) - { - if let Some(value) = event_time_node.as_node().get_attribute( - TimestampsToReturn::Neither, - AttributeId::Value, - NumericRange::None, - &QualifiedName::null(), - ) { - if let Some(value) = value.value { - match value { - Variant::NodeId(node_id) => Some(*node_id), - _ => None, - } - } else { - None - } - } else { - None - } - } else { - None - } -} - -fn event_time(event_id: &NodeId, address_space: &AddressSpace) -> Option { - // Find the Time variable under the event to return a timestamp. - if let Ok(event_time_node) = - find_node_from_browse_path(address_space, event_id, &["Time".into()]) - { - if let Some(value) = event_time_node.as_node().get_attribute( - TimestampsToReturn::Neither, - AttributeId::Value, - NumericRange::None, - &QualifiedName::null(), - ) { - if let Some(value) = value.value { - match value { - Variant::DateTime(date_time) => Some(*date_time), - _ => None, - } - } else { - None - } - } else { - None - } - } else { - None - } -} - -/// Attempts to find events that were emitted by the source object based upon a time predicate -pub fn filter_events( - source_object_id: T, - event_type_id: R, - address_space: &AddressSpace, - time_predicate: F, -) -> Option> -where - T: Into, - R: Into, - F: Fn(&DateTimeUtc) -> bool, -{ - let event_type_id = event_type_id.into(); - let source_object_id = source_object_id.into(); - // Find events of type event_type_id - if let Some(events) = address_space.find_objects_by_type(event_type_id, true) { - let event_ids = events - .iter() - .filter(move |event_id| { - let mut filter = false; - if let Some(source_node) = event_source_node(event_id, address_space) { - // Browse the relative path for the "Time" variable - if let Some(event_time) = event_time(event_id, address_space) { - // Filter on those happened since the time - if time_predicate(&event_time.as_chrono()) { - // Whose source node is source_object_id - filter = source_node == source_object_id - } - } - } - filter - }) - .cloned() - .collect::>(); - if event_ids.is_empty() { - None - } else { - Some(event_ids) - } - } else { - None - } -} - -pub fn purge_events( - source_object_id: T, - event_type_id: R, - address_space: &mut AddressSpace, - happened_before: &DateTimeUtc, -) -> usize -where - T: Into, - R: Into, -{ - if let Some(events) = filter_events( - source_object_id, - event_type_id, - address_space, - move |event_time| event_time < happened_before, - ) { - // Delete these events from the address space - info!("Deleting some events from the address space"); - let len = events.len(); - events.into_iter().for_each(|node_id| { - debug!("Deleting event {}", node_id); - address_space.delete(&node_id, true); - }); - len - } else { - 0 - } -} - -/// Searches for events of the specified event type which reference the source object -pub fn events_for_object( - source_object_id: T, - address_space: &AddressSpace, - happened_since: &DateTimeUtc, -) -> Option> -where - T: Into, -{ - filter_events( - source_object_id, - ObjectTypeId::BaseEventType, - address_space, - move |event_time| event_time >= happened_since, - ) -} - -#[test] -fn test_event_source_node() { - let mut address_space = AddressSpace::new(); - let ns = address_space.register_namespace("urn:test").unwrap(); - // Raise an event - let event_id = NodeId::next_numeric(ns); - let event_type_id = ObjectTypeId::BaseEventType; - let mut event = BaseEventType::new( - &event_id, - event_type_id, - "Event1", - "", - NodeId::objects_folder_id(), - DateTime::now(), - ) - .source_node(ObjectId::Server_ServerCapabilities); - assert!(event.raise(&mut address_space).is_ok()); - // Check that the helper fn returns the expected source node - assert_eq!( - event_source_node(&event_id, &address_space).unwrap(), - ObjectId::Server_ServerCapabilities.into() - ); -} - -#[test] -fn test_event_time() { - let mut address_space = AddressSpace::new(); - let ns = address_space.register_namespace("urn:test").unwrap(); - // Raise an event - let event_id = NodeId::next_numeric(ns); - let event_type_id = ObjectTypeId::BaseEventType; - let mut event = BaseEventType::new( - &event_id, - event_type_id, - "Event1", - "", - NodeId::objects_folder_id(), - DateTime::now(), - ) - .source_node(ObjectId::Server_ServerCapabilities); - let expected_time = event.time.clone(); - assert!(event.raise(&mut address_space).is_ok()); - // Check that the helper fn returns the expected source node - assert_eq!( - event_time(&event_id, &address_space).unwrap(), - expected_time - ); -} - -#[test] -fn test_events_for_object() { - let mut address_space = AddressSpace::new(); - let ns = address_space.register_namespace("urn:test").unwrap(); - - // Raise an event - let happened_since = chrono::Utc::now(); - let event_id = NodeId::next_numeric(ns); - let event_type_id = ObjectTypeId::BaseEventType; - let mut event = BaseEventType::new( - &event_id, - event_type_id, - "Event1", - "", - NodeId::objects_folder_id(), - DateTime::now(), - ) - .source_node(ObjectId::Server_ServerCapabilities); - assert!(event.raise(&mut address_space).is_ok()); - - // Check that event can be found - let mut events = events_for_object( - ObjectId::Server_ServerCapabilities, - &address_space, - &happened_since, - ) - .unwrap(); - assert_eq!(events.len(), 1); - assert_eq!(events.pop().unwrap(), event_id); -} - -#[test] -fn test_purge_events() { - use crate::types::Identifier; - - crate::console_logging::init(); - - let mut address_space = AddressSpace::new(); - - // Nodes will be created in this namespace - let ns = address_space.register_namespace("urn:mynamespace").unwrap(); - - // This test is going to raise a bunch of events and then purge some of them. The purged - // events should be the ones expected to be purged and there should be no trace of them - // in the address space after they are removed. - - // Raising events will create bunch of numeric node ids for their properties. This - // call will find out the node id that the first node is most likely to have (note that if - // tests are run concurrently that use next_numeric() then they are not going to belong to this - // test but that does not matter. - let first_node_id = match NodeId::next_numeric(ns).identifier { - Identifier::Numeric(i) => i + 1, - _ => panic!(), - }; - - let source_node = ObjectId::Server_ServerCapabilities; - - // Raise a bunch of events - let start_time = DateTime::now().as_chrono(); - let mut time = start_time.clone(); - let mut last_purged_node_id = 0; - - let event_type_id = ObjectTypeId::BaseEventType; - - (0..10).for_each(|i| { - let event_id = NodeId::new(ns, format!("Event{}", i)); - let event_name = format!("Event {}", i); - let mut event = BaseEventType::new( - &event_id, - event_type_id, - event_name, - "", - NodeId::objects_folder_id(), - DateTime::from(time), - ) - .source_node(source_node); - assert!(event.raise(&mut address_space).is_ok()); - - // The first 5 events will be purged, so note the last node id here because none of the - // ids between start and end should survive when tested. - if i == 4 { - last_purged_node_id = match NodeId::next_numeric(ns).identifier { - Identifier::Numeric(i) => i, - _ => panic!(), - }; - } - - time = time + chrono::Duration::minutes(5); - }); - - // Expect all events - let events = events_for_object(source_node, &address_space, &start_time).unwrap(); - assert_eq!(events.len(), 10); - - // Purge all events up to halfway - let happened_before = start_time + chrono::Duration::minutes(25); - assert_eq!( - purge_events( - source_node, - ObjectTypeId::BaseEventType, - &mut address_space, - &happened_before - ), - 5 - ); - - // Should have only 5 events left - let events = events_for_object(source_node, &address_space, &start_time).unwrap(); - assert_eq!(events.len(), 5); - - // There should be NO reference left to any of the events we purged in the address space - let references = address_space.references(); - (0..5).for_each(|i| { - let event_id = NodeId::new(ns, format!("Event{}", i)); - assert!(!references.reference_to_node_exists(&event_id)); - }); - (5..10).for_each(|i| { - let event_id = NodeId::new(ns, format!("Event{}", i)); - assert!(references.reference_to_node_exists(&event_id)); - }); - - // The node that generated the events should not be purged - // This was a bug during development - let source_node: NodeId = source_node.into(); - debug!("Expecting to still find source node {}", source_node); - assert!(address_space.find_node(&source_node).is_some()); - - // All of properties that were created for purged nodes fall between first and last node id. - // None of the properties should exist now either - just scan over the range of numbers these - // nodes reside in. - (first_node_id..last_purged_node_id).for_each(|i| { - // Event properties were numerically assigned from the NS - let node_id = NodeId::new(ns, i); - assert!(address_space.find_node(&node_id).is_none()); - assert!(!references.reference_to_node_exists(&node_id)); - }); } diff --git a/lib/src/server/events/event_filter.rs b/lib/src/server/events/event_filter.rs deleted file mode 100644 index 4b41d0858..000000000 --- a/lib/src/server/events/event_filter.rs +++ /dev/null @@ -1,393 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::convert::TryFrom; - -use crate::types::{ - operand::Operand, - service_types::{ - ContentFilter, ContentFilterElementResult, ContentFilterResult, EventFieldList, - EventFilter, EventFilterResult, FilterOperator, SimpleAttributeOperand, - }, - status_code::StatusCode, - AttributeId, DateTimeUtc, NodeId, Variant, -}; - -use crate::server::{ - address_space::{address_space::AddressSpace, node::NodeType, relative_path::*}, - events::event::events_for_object, - events::operator, -}; - -/// This validates the event filter as best it can to make sure it doesn't contain nonsense. -pub fn validate( - event_filter: &EventFilter, - address_space: &AddressSpace, -) -> Result { - let select_clause_results = event_filter.select_clauses.as_ref().map(|select_clauses| { - select_clauses - .iter() - .map(|clause| validate_select_clause(clause, address_space)) - .collect() - }); - let where_clause_result = validate_where_clause(&event_filter.where_clause, address_space)?; - Ok(EventFilterResult { - select_clause_results, - select_clause_diagnostic_infos: None, - where_clause_result, - }) -} - -/// Evaluate the event filter and see if it triggers. -pub fn evaluate( - object_id: &NodeId, - event_filter: &EventFilter, - address_space: &AddressSpace, - happened_since: &DateTimeUtc, - client_handle: u32, -) -> Option> { - if let Some(events) = events_for_object(object_id, address_space, happened_since) { - let event_fields = events - .iter() - .filter(|event_id| { - if let Ok(result) = - evaluate_where_clause(event_id, &event_filter.where_clause, address_space) - { - result == Variant::Boolean(true) - } else { - false - } - }) - .map(|event_id| { - // Produce an event notification list from the select clauses. - let event_fields = event_filter.select_clauses.as_ref().map(|select_clauses| { - select_clauses - .iter() - .map(|v| operator::value_of_simple_attribute(event_id, v, address_space)) - .collect() - }); - EventFieldList { - client_handle, - event_fields, - } - }) - .collect::>(); - if event_fields.is_empty() { - None - } else { - Some(event_fields) - } - } else { - None - } -} - -/// Evaluates a where clause which is a tree of conditionals -pub(crate) fn evaluate_where_clause( - object_id: &NodeId, - where_clause: &ContentFilter, - address_space: &AddressSpace, -) -> Result { - // Clause is meant to have been validated before now so this code is not as stringent and makes some expectations. - if let Some(ref elements) = where_clause.elements { - if !elements.is_empty() { - use std::collections::HashSet; - let mut used_elements = HashSet::new(); - used_elements.insert(0); - let result = operator::evaluate( - object_id, - &elements[0], - &mut used_elements, - elements, - address_space, - )?; - Ok(result) - } else { - Ok(true.into()) - } - } else { - Ok(true.into()) - } -} - -fn validate_select_clause( - clause: &SimpleAttributeOperand, - address_space: &AddressSpace, -) -> StatusCode { - // The SimpleAttributeOperand structure is used in the selectClauses to select the value to return - // if an Event meets the criteria specified by the whereClause. A null value is returned in the corresponding - // event field in the publish response if the selected field is not part of the event or an - // error was returned in the selectClauseResults of the EventFilterResult. - - if !clause.index_range.is_empty() { - // TODO support index ranges - error!("Select clause specifies an index range and will be rejected"); - StatusCode::BadIndexRangeInvalid - } else if let Some(ref browse_path) = clause.browse_path { - // Validate that the browse paths seem okay relative to the object type definition in the clause - if let Ok(node) = - find_node_from_browse_path(address_space, &clause.type_definition_id, browse_path) - { - // Validate the attribute id. Per spec: - // - // The SimpleAttributeOperand allows the client to specify any attribute; however the server - // is only required to support the value attribute for variable nodes and the NodeId attribute - // for object nodes. That said, profiles defined in Part 7 may make support for - // additional attributes mandatory. - // - // So code will implement the bare minimum for now. - let valid_attribute_id = match node { - NodeType::Object(_) => { - // Only the node id - clause.attribute_id == AttributeId::NodeId as u32 - } - NodeType::Variable(_) => { - // Only the value - clause.attribute_id == AttributeId::Value as u32 - } - _ => { - // find_node_from_browse_path shouldn't have returned anything except an object - // or variable node. - panic!() - } - }; - if !valid_attribute_id { - StatusCode::BadAttributeIdInvalid - } else { - StatusCode::Good - } - } else { - error!("Invalid select clause node not found {:?}", clause); - StatusCode::BadNodeIdUnknown - } - } else { - error!("Invalid select clause with no browse path supplied"); - StatusCode::BadNodeIdUnknown - } -} - -fn validate_where_clause( - where_clause: &ContentFilter, - address_space: &AddressSpace, -) -> Result { - // The ContentFilter structure defines a collection of elements that define filtering criteria. - // Each element in the collection describes an operator and an array of operands to be used by - // the operator. The operators that can be used in a ContentFilter are described in Table 119. - // The filter is evaluated by evaluating the first entry in the element array starting with the - // first operand in the operand array. The operands of an element may contain References to - // sub-elements resulting in the evaluation continuing to the referenced elements in the element - // array. The evaluation shall not introduce loops. For example evaluation starting from element - // “A” shall never be able to return to element “A”. However there may be more than one path - // leading to another element “B”. If an element cannot be traced back to the starting element - // it is ignored. Extra operands for any operator shall result in an error. Annex B provides - // examples using the ContentFilter structure. - - if let Some(ref elements) = where_clause.elements { - let element_results = elements.iter().map(|e| { - let (status_code, operand_status_codes) = if e.filter_operands.is_none() { - // All operators need at least one operand - (StatusCode::BadFilterOperandCountMismatch, None) - } else { - let filter_operands = e.filter_operands.as_ref().unwrap(); - - // The right number of operators? The spec implies it is okay to pass - // more operands than the required #, but less is an error. - let operand_count_mismatch = match e.filter_operator { - FilterOperator::Equals => filter_operands.len() < 2, - FilterOperator::IsNull => filter_operands.len() < 1, - FilterOperator::GreaterThan => filter_operands.len() < 2, - FilterOperator::LessThan => filter_operands.len() < 2, - FilterOperator::GreaterThanOrEqual => filter_operands.len() < 2, - FilterOperator::LessThanOrEqual => filter_operands.len() < 2, - FilterOperator::Like => filter_operands.len() < 2, - FilterOperator::Not => filter_operands.len() < 1, - FilterOperator::Between => filter_operands.len() < 3, - FilterOperator::InList => filter_operands.len() < 2, // 2..n - FilterOperator::And => filter_operands.len() < 2, - FilterOperator::Or => filter_operands.len() < 2, - FilterOperator::Cast => filter_operands.len() < 2, - FilterOperator::BitwiseAnd => filter_operands.len() < 2, - FilterOperator::BitwiseOr => filter_operands.len() < 2, - _ => true, - }; - - // Check if the operands look okay - let operand_status_codes = filter_operands.iter().map(|e| { - // Look to see if any operand cannot be parsed - match ::try_from(e) { - Ok(operand) => { - match operand { - Operand::AttributeOperand(_) => { - // AttributeOperand may not be used in an EventFilter where clause - error!("AttributeOperand is not permitted in EventFilter where clause"); - StatusCode::BadFilterOperandInvalid - } - Operand::ElementOperand(ref o) => { - // Check that operands have to have an index <= number of elements - if o.index as usize >= elements.len() { - error!("Invalid element operand is out of range"); - StatusCode::BadFilterOperandInvalid - } else { - StatusCode::Good - } - // TODO operand should not refer to itself either directly or through circular - // references - } - Operand::SimpleAttributeOperand(ref o) => { - // The structure requires the node id of an event type supported - // by the server and a path to an InstanceDeclaration. An InstanceDeclaration - // is a Node which can be found by following forward hierarchical references from the fully - // inherited EventType where the Node is also the source of a HasModellingRuleReference. EventTypes - // InstanceDeclarations and Modelling rules are described completely in Part 3. - - // In some case the same BrowsePath will apply to multiple EventTypes. If - // the Client specifies the BaseEventType in the SimpleAttributeOperand - // then the Server shall evaluate the BrowsePath without considering the Type. - - // Each InstanceDeclaration in the path shall be Object or Variable Node. - - // Check the element exists in the address space - if let Some(ref browse_path) = o.browse_path { - if let Ok(_node) = find_node_from_browse_path(address_space, &o.type_definition_id, browse_path) { - StatusCode::Good - } else { - StatusCode::BadFilterOperandInvalid - } - } else { - StatusCode::BadFilterOperandInvalid - } - } - _ => StatusCode::Good - } - } - Err(err) => { - error!("Operand cannot be read from extension object, err = {}", err); - StatusCode::BadFilterOperandInvalid - } - } - }).collect::>(); - - // Check if any operands were invalid - let operator_invalid = operand_status_codes.iter().any(|e| !e.is_good()); - - // Check what error status to return - let status_code = if operand_count_mismatch { - error!("Where clause has invalid filter operand count"); - StatusCode::BadFilterOperandCountMismatch - } else if operator_invalid { - error!("Where clause has invalid filter operator"); - StatusCode::BadFilterOperatorInvalid - } else { - StatusCode::Good - }; - - (status_code, Some(operand_status_codes)) - }; - ContentFilterElementResult { - status_code, - operand_status_codes, - operand_diagnostic_infos: None, - } - }).collect::>(); - - Ok(ContentFilterResult { - element_results: Some(element_results), - element_diagnostic_infos: None, - }) - } else { - Ok(ContentFilterResult { - element_results: None, - element_diagnostic_infos: None, - }) - } -} - -#[test] -fn validate_where_clause_test() { - use crate::types::service_types::ContentFilterElement; - - let address_space = AddressSpace::new(); - - { - let where_clause = ContentFilter { elements: None }; - // check for at least one filter operand - let result = validate_where_clause(&where_clause, &address_space); - assert_eq!( - result.unwrap(), - ContentFilterResult { - element_results: None, - element_diagnostic_infos: None, - } - ); - } - - // Make a where clause where every single operator is included but each has the wrong number of operands. - // We should expect them all to be in error - { - let where_clause = ContentFilter { - elements: Some(vec![ - ContentFilterElement::from((FilterOperator::Equals, vec![Operand::literal(10)])), - ContentFilterElement::from((FilterOperator::IsNull, vec![])), - ContentFilterElement::from(( - FilterOperator::GreaterThan, - vec![Operand::literal(10)], - )), - ContentFilterElement::from((FilterOperator::LessThan, vec![Operand::literal(10)])), - ContentFilterElement::from(( - FilterOperator::GreaterThanOrEqual, - vec![Operand::literal(10)], - )), - ContentFilterElement::from(( - FilterOperator::LessThanOrEqual, - vec![Operand::literal(10)], - )), - ContentFilterElement::from((FilterOperator::Like, vec![Operand::literal(10)])), - ContentFilterElement::from((FilterOperator::Not, vec![])), - ContentFilterElement::from(( - FilterOperator::Between, - vec![Operand::literal(10), Operand::literal(20)], - )), - ContentFilterElement::from((FilterOperator::InList, vec![Operand::literal(10)])), - ContentFilterElement::from((FilterOperator::And, vec![Operand::literal(10)])), - ContentFilterElement::from((FilterOperator::Or, vec![Operand::literal(10)])), - ContentFilterElement::from((FilterOperator::Cast, vec![Operand::literal(10)])), - ContentFilterElement::from(( - FilterOperator::BitwiseAnd, - vec![Operand::literal(10)], - )), - ContentFilterElement::from((FilterOperator::BitwiseOr, vec![Operand::literal(10)])), - ContentFilterElement::from((FilterOperator::Like, vec![Operand::literal(10)])), - ]), - }; - // Check for less than required number of operands - let result = validate_where_clause(&where_clause, &address_space).unwrap(); - result - .element_results - .unwrap() - .iter() - .for_each(|e| assert_eq!(e.status_code, StatusCode::BadFilterOperandCountMismatch)); - } - - // check for filter operator invalid, by giving it a bogus extension object for an element - { - use crate::types::{service_types::ContentFilterElement, ExtensionObject}; - let bad_operator = ExtensionObject::null(); - let where_clause = ContentFilter { - elements: Some(vec![ContentFilterElement { - filter_operator: FilterOperator::IsNull, - filter_operands: Some(vec![bad_operator]), - }]), - }; - let result = validate_where_clause(&where_clause, &address_space).unwrap(); - let element_results = result.element_results.unwrap(); - assert_eq!(element_results.len(), 1); - assert_eq!( - element_results[0].status_code, - StatusCode::BadFilterOperatorInvalid - ); - } - - // TODO check operands are compatible with operator - // TODO check for ElementOperands which are cyclical or out of range -} diff --git a/lib/src/server/events/mod.rs b/lib/src/server/events/mod.rs index e35bfad99..d0c58bab6 100644 --- a/lib/src/server/events/mod.rs +++ b/lib/src/server/events/mod.rs @@ -1,10 +1,10 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -pub mod event_filter; -pub(crate) mod operator; -#[macro_use] -pub mod event; #[macro_use] -pub mod audit; +mod event; +mod evaluate; +mod validation; + +pub use event::{BaseEventType, Event}; +pub use validation::{ + ParsedAttributeOperand, ParsedContentFilter, ParsedContentFilterElement, ParsedEventFilter, + ParsedOperand, ParsedSimpleAttributeOperand, +}; diff --git a/lib/src/server/events/operator.rs b/lib/src/server/events/operator.rs deleted file mode 100644 index f9b4cd2c4..000000000 --- a/lib/src/server/events/operator.rs +++ /dev/null @@ -1,1012 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Operator implementations for event filters -use std::collections::HashSet; -use std::convert::TryFrom; - -use regex::Regex; - -use crate::types::{ - operand::Operand, - service_types::{ContentFilterElement, FilterOperator, SimpleAttributeOperand}, - status_code::StatusCode, - AttributeId, ExtensionObject, NodeId, NumericRange, QualifiedName, TimestampsToReturn, Variant, - VariantTypeId, -}; - -use crate::server::address_space::{ - node::{NodeBase, NodeType}, - relative_path::find_node_from_browse_path, - AddressSpace, -}; - -/// Turns a list of operands inside extension objects to their analogous Operand objects -fn make_filter_operands(filter_operands: &[ExtensionObject]) -> Result, StatusCode> { - // If any operand cannot be converted then the whole action is in error - let operands = filter_operands - .iter() - .map(Operand::try_from) - .take_while(|v| v.is_ok()) - .map(|v| v.unwrap()) - .collect::>(); - - // Every operand must have been converted - if operands.len() == filter_operands.len() { - Ok(operands) - } else { - error!("One or more operands could not be parsed"); - Err(StatusCode::BadFilterOperandInvalid) - } -} - -/// Evaluates the expression -pub(crate) fn evaluate( - object_id: &NodeId, - element: &ContentFilterElement, - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - if let Some(ref filter_operands) = element.filter_operands { - if !filter_operands.is_empty() { - // Turn ExtensionObjects into Operands here. This should be externalised even further so it - // doesn't have to be done on each evaluation, e.g. turn ContentFilterElement into a ServerContentFilterElement - // which has the operands . - let operands = make_filter_operands(filter_operands)?; - match element.filter_operator { - FilterOperator::Equals => eq( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::IsNull => is_null( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::GreaterThan => gt( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::LessThan => lt( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::GreaterThanOrEqual => gte( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::LessThanOrEqual => lte( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::Like => like( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::Not => not( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::Between => between( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::InList => in_list( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::And => and( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::Or => or( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::Cast => cast( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::BitwiseAnd => bitwise_and( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - FilterOperator::BitwiseOr => bitwise_or( - object_id, - &operands[..], - used_elements, - elements, - address_space, - ), - _ => Err(StatusCode::BadFilterOperatorUnsupported), - } - } else { - // All operators need at least one operand - warn!("evaluate() called with no operands (zero len)"); - Err(StatusCode::BadFilterOperandCountMismatch) - } - } else { - // All operators need at least one operand - warn!("evaluate() called with no operands (None)"); - Err(StatusCode::BadFilterOperandCountMismatch) - } -} - -/// Get the value of something and convert to the expected type. -fn value_as( - object_id: &NodeId, - as_type: VariantTypeId, - operand: &Operand, - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - let v = value_of(object_id, operand, used_elements, elements, address_space)?; - Ok(v.convert(as_type)) -} - -pub(crate) fn value_of_simple_attribute( - object_id: &NodeId, - o: &SimpleAttributeOperand, - address_space: &AddressSpace, -) -> Variant { - // Get the Object / Variable by browse path - if let Some(ref browse_path) = o.browse_path { - // TODO o.data_type is ignored but be used to restrict the browse - // path to subtypes of HierarchicalReferences - - // Find the actual node via browse path - if let Ok(node) = find_node_from_browse_path(address_space, object_id, browse_path) { - match node { - NodeType::Object(ref node) => { - if o.attribute_id == AttributeId::NodeId as u32 { - node.node_id().into() - } else { - error!( - "value_of, unsupported attribute id {} on object", - o.attribute_id - ); - Variant::Empty - } - } - NodeType::Variable(ref node) => { - if o.attribute_id == AttributeId::Value as u32 { - if let Some(ref value) = node - .value( - TimestampsToReturn::Neither, - NumericRange::None, - &QualifiedName::null(), - 0.0, - ) - .value - { - value.clone() - } else { - Variant::Empty - } - } else { - error!( - "value_of, unsupported attribute id {} on Variable", - o.attribute_id - ); - Variant::Empty - } - } - _ => Variant::Empty, - } - } else { - error!( - "value_of, cannot find node from browse path {:?}", - browse_path - ); - Variant::Empty - } - } else { - error!("value_of, invalid browse path supplied to operand"); - Variant::Empty - } -} - -// This function fetches the value of the operand. -pub(crate) fn value_of( - object_id: &NodeId, - operand: &Operand, - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - match operand { - Operand::ElementOperand(ref o) => { - if used_elements.contains(&o.index) { - error!("Operator contains elements that have already been used cyclical and is invalid"); - Err(StatusCode::BadFilterOperandInvalid) - } else { - used_elements.insert(o.index); - let result = evaluate( - object_id, - &elements[o.index as usize], - used_elements, - elements, - address_space, - ); - used_elements.remove(&o.index); - result - } - } - Operand::LiteralOperand(ref o) => Ok(o.value.clone()), - Operand::SimpleAttributeOperand(ref o) => { - Ok(value_of_simple_attribute(object_id, o, address_space)) - } - Operand::AttributeOperand(_) => { - panic!(); - } - } -} - -fn convert(v1: Variant, v2: Variant) -> (Variant, Variant) { - // Types may have to be converted to be compared - let dt1 = v1.type_id(); - let dt2 = v2.type_id(); - if dt1 != dt2 { - if dt1.precedence() < dt2.precedence() { - (v1, v2.convert(dt1)) - } else { - (v1.convert(dt2), v2) - } - } else { - (v1, v2) - } -} - -// Tests if the operand is null (empty). TRUE if operand[0] is a null value. -pub(crate) fn is_null( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - let v1 = value_of( - object_id, - &operands[0], - used_elements, - elements, - address_space, - )?; - Ok((Variant::Empty == v1).into()) -} - -#[derive(PartialEq)] -enum ComparisonResult { - // Value 1 is less than value 2 - LessThan, - // Value 1 is equal to value 2 - Equals, - // Value 1 is greater than value 2 - GreaterThan, - // Not equals, for boolean comparisons - NotEquals, - // Error - Error, -} - -macro_rules! compare_values { - ( $v1: expr, $v2: expr, $variant_type: ident ) => {{ - if let Variant::$variant_type(v1) = $v1 { - if let Variant::$variant_type(v2) = $v2 { - if v1 < v2 { - ComparisonResult::LessThan - } else if v1 == v2 { - ComparisonResult::Equals - } else { - ComparisonResult::GreaterThan - } - } else { - panic!(); - } - } else { - panic!(); - } - }}; -} - -/// Compares to operands by taking their numeric value, comparing the value and saying -/// which of the two is less than, greater than or equal. If the values cannot be compared, the -/// result is an error. -fn compare_operands( - object_id: &NodeId, - o1: &Operand, - o2: &Operand, - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - let v1 = value_of(object_id, o1, used_elements, elements, address_space)?; - let v2 = value_of(object_id, o2, used_elements, elements, address_space)?; - // Try and convert one value or the other to the same type - let (v1, v2) = convert(v1, v2); - let result = match v1.type_id() { - VariantTypeId::SByte => compare_values!(v1, v2, SByte), - VariantTypeId::Byte => compare_values!(v1, v2, Byte), - VariantTypeId::Int16 => compare_values!(v1, v2, Int16), - VariantTypeId::Int32 => compare_values!(v1, v2, Int32), - VariantTypeId::Int64 => compare_values!(v1, v2, Int64), - VariantTypeId::UInt16 => compare_values!(v1, v2, UInt16), - VariantTypeId::UInt32 => compare_values!(v1, v2, UInt32), - VariantTypeId::UInt64 => compare_values!(v1, v2, UInt64), - VariantTypeId::Double => compare_values!(v1, v2, Double), - VariantTypeId::Float => compare_values!(v1, v2, Float), - VariantTypeId::Boolean => { - if v1 == v2 { - ComparisonResult::Equals - } else { - ComparisonResult::NotEquals - } - } - _ => ComparisonResult::Error, - }; - Ok(result) -} - -// Check if the two values are equal to each other. If the operands are of different types, -// the system shall perform any implicit conversion to a common type. This operator resolves to -// FALSE if no implicit conversion is available and the operands are of different types. This -// operator returns FALSE if the implicit conversion fails. -pub(crate) fn eq( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - let result = compare_operands( - object_id, - &operands[0], - &operands[1], - used_elements, - elements, - address_space, - )?; - Ok((result == ComparisonResult::Equals).into()) -} - -// Check if operand[0] is greater than operand[1] -pub(crate) fn gt( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - let result = compare_operands( - object_id, - &operands[0], - &operands[1], - used_elements, - elements, - address_space, - )?; - Ok((result == ComparisonResult::GreaterThan).into()) -} - -// Check if operand[0] is less than operand[1] -pub(crate) fn lt( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - let result = compare_operands( - object_id, - &operands[0], - &operands[1], - used_elements, - elements, - address_space, - )?; - Ok((result == ComparisonResult::LessThan).into()) -} - -// Check if operand[0] is greater than or equal to operand[1] -pub(crate) fn gte( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - let result = compare_operands( - object_id, - &operands[0], - &operands[1], - used_elements, - elements, - address_space, - )?; - Ok((result == ComparisonResult::GreaterThan || result == ComparisonResult::Equals).into()) -} - -// Check if operand[0] is less than or equal to operand[1] -pub(crate) fn lte( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - let result = compare_operands( - object_id, - &operands[0], - &operands[1], - used_elements, - elements, - address_space, - )?; - Ok((result == ComparisonResult::LessThan || result == ComparisonResult::Equals).into()) -} - -/// Converts the OPC UA SQL-esque Like format into a regular expression. -fn like_to_regex(v: &str) -> Result { - // Give a reasonable buffer - let mut pattern = String::with_capacity(v.len() * 2); - - let mut in_list = false; - - // Turn the chars into a vec to make it easier to index them - let v = v.chars().collect::>(); - - pattern.push('^'); - v.iter().enumerate().for_each(|(i, c)| { - if in_list { - if *c == ']' && (i == 0 || v[i - 1] != '\\') { - // Close the list - in_list = false; - pattern.push(*c); - } else { - // Chars in list are escaped if required - match c { - '$' | '(' | ')' | '.' | '+' | '*' | '?' => { - // Other regex chars except for ^ are escaped - pattern.push('\\'); - pattern.push(*c); - } - _ => { - // Everything between two [] will be treated as-is - pattern.push(*c); - } - } - } - } else { - match c { - '$' | '^' | '(' | ')' | '.' | '+' | '*' | '?' => { - // Other regex chars are escaped - pattern.push('\\'); - pattern.push(*c); - } - '[' => { - // Opens a list of chars to match - if i == 0 || v[i - 1] != '\\' { - // Open the list - in_list = true; - } - pattern.push(*c); - } - '%' => { - if i == 0 || v[i - 1] != '\\' { - // A % is a match on zero or more chans unless it is escaped - pattern.push_str(".*"); - } else { - pattern.push(*c); - } - } - '_' => { - if i == 0 || v[i - 1] != '\\' { - // A _ is a match on a single char unless it is escaped - pattern.push('?'); - } else { - // Remove escaping of the underscore - let _ = pattern.pop(); - pattern.push(*c); - } - } - _ => { - pattern.push(*c); - } - } - } - }); - pattern.push('$'); - Regex::new(&pattern).map_err(|err| { - error!("Problem parsing, error = {}", err); - }) -} - -#[cfg(test)] -fn compare_regex(r1: Regex, r2: Regex) { - assert_eq!(r1.as_str(), r2.as_str()); -} - -#[test] -fn like_to_regex_tests() { - compare_regex(like_to_regex("").unwrap(), Regex::new("^$").unwrap()); - compare_regex(like_to_regex("^$").unwrap(), Regex::new(r"^\^\$$").unwrap()); - compare_regex(like_to_regex("%").unwrap(), Regex::new("^.*$").unwrap()); - compare_regex(like_to_regex("[%]").unwrap(), Regex::new("^[%]$").unwrap()); - compare_regex(like_to_regex("[_]").unwrap(), Regex::new("^[_]$").unwrap()); - compare_regex( - like_to_regex(r"[\]]").unwrap(), - Regex::new(r"^[\]]$").unwrap(), - ); - compare_regex( - like_to_regex("[$().+*?]").unwrap(), - Regex::new(r"^[\$\(\)\.\+\*\?]$").unwrap(), - ); - compare_regex(like_to_regex("_").unwrap(), Regex::new("^?$").unwrap()); - compare_regex( - like_to_regex("[a-z]").unwrap(), - Regex::new("^[a-z]$").unwrap(), - ); - compare_regex( - like_to_regex("[abc]").unwrap(), - Regex::new("^[abc]$").unwrap(), - ); - compare_regex( - like_to_regex(r"\[\]").unwrap(), - Regex::new(r"^\[\]$").unwrap(), - ); - compare_regex( - like_to_regex("[^0-9]").unwrap(), - Regex::new("^[^0-9]$").unwrap(), - ); - - // Some samples from OPC UA part 4 - let re = like_to_regex("Th[ia][ts]%").unwrap(); - assert!(re.is_match("That is fine")); - assert!(re.is_match("This is fine")); - assert!(re.is_match("That as one")); - assert!(!re.is_match("Then at any")); // Spec says this should pass when it obviously wouldn't - - let re = like_to_regex("%en%").unwrap(); - assert!(re.is_match("entail")); - assert!(re.is_match("green")); - assert!(re.is_match("content")); - - let re = like_to_regex("abc[13-68]").unwrap(); - assert!(re.is_match("abc1")); - assert!(!re.is_match("abc2")); - assert!(re.is_match("abc3")); - assert!(re.is_match("abc4")); - assert!(re.is_match("abc5")); - assert!(re.is_match("abc6")); - assert!(!re.is_match("abc7")); - assert!(re.is_match("abc8")); - - let re = like_to_regex("ABC[^13-5]").unwrap(); - assert!(!re.is_match("ABC1")); - assert!(re.is_match("ABC2")); - assert!(!re.is_match("ABC3")); - assert!(!re.is_match("ABC4")); - assert!(!re.is_match("ABC5")); -} - -// Check if operand[0] is matches the pattern defined by operand[1]. -pub(crate) fn like( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - // If 0 matches a pattern in 1. See table 117 - // - // 0 and 1 are operands that resolve to a string - // - // Returns FALSE if no operand can be resolved to a string - // - // % Match zero or more chars - // _ Match any single character - // \ Escape character - // [] Match any single character in a list - // [^] Not matching any single character in a list - - let v1 = value_as( - object_id, - VariantTypeId::String, - &operands[0], - used_elements, - elements, - address_space, - )?; - let v2 = value_as( - object_id, - VariantTypeId::String, - &operands[1], - used_elements, - elements, - address_space, - )?; - - let result = if let Variant::String(v1) = v1 { - if let Variant::String(v2) = v2 { - // Turn the pattern into a regex. Certain chars will be replaced with their regex equivalents, others will be escaped. - if let Ok(re) = like_to_regex(v2.as_ref()) { - re.is_match(v1.as_ref()) - } else { - false - } - } else { - false - } - } else { - false - }; - Ok(result.into()) -} - -// TRUE if operand[0] is FALSE. -pub(crate) fn not( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - // operand[0] resolves to a boolean - // TRUE if 0 is FALSE - // If resolve fails, result is NULL - let v = value_as( - object_id, - VariantTypeId::Boolean, - &operands[0], - used_elements, - elements, - address_space, - )?; - let result = if let Variant::Boolean(v) = v { - (!v).into() - } else { - Variant::Empty - }; - Ok(result) -} - -// TRUE if operand[0] is greater or equal to operand[1] and less than or equal to operand[2]. -pub(crate) fn between( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - // 0, 1, 2 are ordered values - // Element 0 must be greater or equal than element 1 - let result = match compare_operands( - object_id, - &operands[0], - &operands[1], - used_elements, - elements, - address_space, - )? { - ComparisonResult::GreaterThan | ComparisonResult::Equals => { - // Element must be less than or equal to element 2 - match compare_operands( - object_id, - &operands[0], - &operands[2], - used_elements, - elements, - address_space, - )? { - ComparisonResult::LessThan | ComparisonResult::Equals => true, - _ => false, - } - } - _ => false, - }; - Ok(result.into()) -} - -// TRUE if operand[0] is equal to one or more of the remaining operands -pub(crate) fn in_list( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - // TRUE if operand[0] is equal to one or more of the remaining operands. - // The Equals Operator is evaluated for operand[0] and each remaining operand in the list. - // If any Equals evaluation is TRUE, InList returns TRUE. - let found = operands[1..].iter().any(|o| { - if let Ok(result) = compare_operands( - object_id, - &operands[0], - o, - used_elements, - elements, - address_space, - ) { - result == ComparisonResult::Equals - } else { - false - } - }); - Ok(found.into()) -} - -// TRUE if operand[0] and operand[1] are TRUE. -pub(crate) fn and( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - // The following restrictions apply to the operands: - // [0]: Any operand that resolves to a Boolean. - // [1]: Any operand that resolves to a Boolean. - // If any operand cannot be resolved to a Boolean it is considered a NULL. - let v1 = value_as( - object_id, - VariantTypeId::Boolean, - &operands[0], - used_elements, - elements, - address_space, - )?; - let v2 = value_as( - object_id, - VariantTypeId::Boolean, - &operands[1], - used_elements, - elements, - address_space, - )?; - - // Derived from Table 120 Logical AND Truth Table - let result = if v1 == Variant::Boolean(true) && v2 == Variant::Boolean(true) { - true.into() - } else if v1 == Variant::Boolean(false) || v2 == Variant::Boolean(false) { - false.into() - } else { - Variant::Empty - }; - Ok(result) -} - -// TRUE if operand[0] or operand[1] are TRUE. -pub(crate) fn or( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - // The following restrictions apply to the operands: - // [0]: Any operand that resolves to a Boolean. - // [1]: Any operand that resolves to a Boolean. - // If any operand cannot be resolved to a Boolean it is considered a NULL. - let v1 = value_as( - object_id, - VariantTypeId::Boolean, - &operands[0], - used_elements, - elements, - address_space, - )?; - let v2 = value_as( - object_id, - VariantTypeId::Boolean, - &operands[1], - used_elements, - elements, - address_space, - )?; - - // Derived from Table 121 Logical OR Truth Table. - let result = if v1 == Variant::Boolean(true) || v2 == Variant::Boolean(true) { - true.into() - } else if v1 == Variant::Boolean(false) && v2 == Variant::Boolean(false) { - false.into() - } else { - // One or both values are NULL - Variant::Empty - }; - Ok(result) -} - -// Converts operand[0] to a value with a data type with a NodeId identified by operand[1]. -pub(crate) fn cast( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - // Explicitly casts operand 0 to a value with the data type with a node if identified in node 1 - // [0] Any operand - // [1] Any operand that resolves to a NodeId or ExpandedNodeId where the node is of type DataType - // - // In case of error evaluates to NULL. - - let v1 = value_of( - object_id, - &operands[0], - used_elements, - elements, - address_space, - )?; - let v2 = value_of( - object_id, - &operands[1], - used_elements, - elements, - address_space, - )?; - - // Cast v1 using the datatype in v2 - let result = match v2 { - Variant::NodeId(node_id) => { - if let Ok(type_id) = VariantTypeId::try_from(&(*node_id)) { - v1.cast(type_id) - } else { - Variant::Empty - } - } - Variant::ExpandedNodeId(node_id) => { - if let Ok(type_id) = VariantTypeId::try_from(&node_id.node_id) { - v1.cast(type_id) - } else { - Variant::Empty - } - } - _ => Variant::Empty, - }; - Ok(result) -} - -#[derive(PartialEq)] -enum BitOperation { - And, - Or, -} - -macro_rules! bitwise_operation { - ( $v1: expr, $v2: expr, $op: expr, $variant_type: ident ) => {{ - if let Variant::$variant_type(v1) = $v1 { - if let Variant::$variant_type(v2) = $v2 { - match $op { - BitOperation::And => (v1 & v2).into(), - BitOperation::Or => (v1 | v2).into(), - } - } else { - panic!(); - } - } else { - panic!(); - } - }}; -} - -fn bitwise_operation( - object_id: &NodeId, - operation: BitOperation, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - let v1 = value_of( - object_id, - &operands[0], - used_elements, - elements, - address_space, - )?; - let v2 = value_of( - object_id, - &operands[1], - used_elements, - elements, - address_space, - )?; - // Try and convert one value or the other to the same type - let (v1, v2) = convert(v1, v2); - let result = match v1.type_id() { - VariantTypeId::SByte => bitwise_operation!(v1, v2, operation, SByte), - VariantTypeId::Byte => bitwise_operation!(v1, v2, operation, Byte), - VariantTypeId::Int16 => bitwise_operation!(v1, v2, operation, Int16), - VariantTypeId::Int32 => bitwise_operation!(v1, v2, operation, Int32), - VariantTypeId::Int64 => bitwise_operation!(v1, v2, operation, Int64), - VariantTypeId::UInt16 => bitwise_operation!(v1, v2, operation, UInt16), - VariantTypeId::UInt32 => bitwise_operation!(v1, v2, operation, UInt32), - VariantTypeId::UInt64 => bitwise_operation!(v1, v2, operation, UInt64), - _ => Variant::Empty, - }; - Ok(result) -} - -// The result is an integer which matches the size of the largest operand and contains a bitwise -// And operation of the two operands where both have been converted to the same size (largest of -// the two operands). -pub(crate) fn bitwise_and( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - bitwise_operation( - object_id, - BitOperation::And, - operands, - used_elements, - elements, - address_space, - ) -} - -// The result is an integer which matches the size of the largest operand and contains a bitwise Or -// operation of the two operands where both have been converted to the same size (largest of the -// two operands). -pub(crate) fn bitwise_or( - object_id: &NodeId, - operands: &[Operand], - used_elements: &mut HashSet, - elements: &[ContentFilterElement], - address_space: &AddressSpace, -) -> Result { - bitwise_operation( - object_id, - BitOperation::Or, - operands, - used_elements, - elements, - address_space, - ) -} diff --git a/lib/src/server/events/validation.rs b/lib/src/server/events/validation.rs new file mode 100644 index 000000000..2f59796a5 --- /dev/null +++ b/lib/src/server/events/validation.rs @@ -0,0 +1,616 @@ +use std::collections::HashSet; + +use hashbrown::HashMap; + +use crate::{ + server::node_manager::TypeTree, + types::{ + AttributeId, ContentFilter, ContentFilterElementResult, ContentFilterResult, + ElementOperand, EventFilter, EventFilterResult, FilterOperator, LiteralOperand, NodeClass, + NodeId, NumericRange, ObjectTypeId, Operand, QualifiedName, RelativePath, + SimpleAttributeOperand, StatusCode, UAString, + }, +}; + +#[derive(Debug, Clone)] +pub struct ParsedAttributeOperand { + pub node_id: NodeId, + pub alias: UAString, + pub browse_path: RelativePath, + pub attribute_id: AttributeId, + pub index_range: NumericRange, +} + +#[derive(Debug, Clone)] +pub struct ParsedSimpleAttributeOperand { + pub type_definition_id: NodeId, + pub browse_path: Vec, + pub attribute_id: AttributeId, + pub index_range: NumericRange, +} + +#[derive(Debug, Clone)] +pub enum ParsedOperand { + ElementOperand(ElementOperand), + LiteralOperand(LiteralOperand), + AttributeOperand(ParsedAttributeOperand), + SimpleAttributeOperand(ParsedSimpleAttributeOperand), +} + +impl ParsedOperand { + pub(crate) fn parse( + operand: Operand, + num_elements: usize, + type_tree: &TypeTree, + allow_attribute_operand: bool, + ) -> Result { + match operand { + Operand::ElementOperand(e) => { + if e.index as usize >= num_elements { + Err(StatusCode::BadFilterOperandInvalid) + } else { + Ok(Self::ElementOperand(e)) + } + } + Operand::LiteralOperand(o) => Ok(Self::LiteralOperand(o)), + Operand::AttributeOperand(o) => { + if !allow_attribute_operand { + return Err(StatusCode::BadFilterOperandInvalid); + } + let attribute_id = AttributeId::from_u32(o.attribute_id) + .map_err(|_| StatusCode::BadAttributeIdInvalid)?; + let index_range = o + .index_range + .as_ref() + .parse::() + .map_err(|_| StatusCode::BadIndexRangeInvalid)?; + Ok(Self::AttributeOperand(ParsedAttributeOperand { + node_id: o.node_id, + attribute_id, + alias: o.alias, + browse_path: o.browse_path, + index_range, + })) + } + Operand::SimpleAttributeOperand(o) => Ok(Self::SimpleAttributeOperand( + validate_select_clause(o, type_tree)?, + )), + } + } +} + +#[derive(Debug, Clone)] +pub struct ParsedEventFilter { + pub(super) content_filter: ParsedContentFilter, + pub(super) select_clauses: Vec, +} + +impl ParsedEventFilter { + pub fn new( + raw: EventFilter, + type_tree: &TypeTree, + ) -> (EventFilterResult, Result) { + validate(raw, type_tree) + } +} + +#[derive(Debug, Clone)] +pub struct ParsedContentFilter { + pub(super) elements: Vec, +} + +impl ParsedContentFilter { + pub fn empty() -> Self { + Self { + elements: Vec::new(), + } + } + + pub(crate) fn parse( + filter: ContentFilter, + type_tree: &TypeTree, + allow_attribute_operand: bool, + allow_complex_operators: bool, + ) -> (ContentFilterResult, Result) { + validate_where_clause( + filter, + type_tree, + allow_attribute_operand, + allow_complex_operators, + ) + } +} + +#[derive(Debug, Clone)] +pub struct ParsedContentFilterElement { + pub(super) operator: FilterOperator, + pub(super) operands: Vec, +} + +/// This validates the event filter as best it can to make sure it doesn't contain nonsense. +fn validate( + event_filter: EventFilter, + type_tree: &TypeTree, +) -> (EventFilterResult, Result) { + let num_select_clauses = event_filter + .select_clauses + .as_ref() + .map(|r| r.len()) + .unwrap_or_default(); + let mut select_clause_results = Vec::with_capacity(num_select_clauses); + let mut final_select_clauses = Vec::with_capacity(num_select_clauses); + for clause in event_filter.select_clauses.into_iter().flatten() { + match validate_select_clause(clause, type_tree) { + Ok(result) => { + select_clause_results.push(StatusCode::Good); + final_select_clauses.push(result); + } + Err(e) => select_clause_results.push(e), + } + } + let (where_clause_result, parsed_where_clause) = + validate_where_clause(event_filter.where_clause, type_tree, false, false); + + ( + EventFilterResult { + select_clause_results: if select_clause_results.is_empty() { + None + } else { + Some(select_clause_results) + }, + select_clause_diagnostic_infos: None, + where_clause_result, + }, + parsed_where_clause.map(|f| ParsedEventFilter { + content_filter: f, + select_clauses: final_select_clauses, + }), + ) +} + +fn validate_select_clause( + clause: SimpleAttributeOperand, + type_tree: &TypeTree, +) -> Result { + let Some(path) = clause.browse_path else { + return Err(StatusCode::BadNodeIdUnknown); + }; + + let Ok(index_range) = clause.index_range.as_ref().parse::() else { + return Err(StatusCode::BadIndexRangeInvalid); + }; + + let Ok(attribute_id) = AttributeId::from_u32(clause.attribute_id) else { + return Err(StatusCode::BadAttributeIdInvalid); + }; + + // From the standard: If the SimpleAttributeOperand is used in an EventFilter + // and the typeDefinitionId is BaseEventType the Server shall evaluate the + // browsePath without considering the typeDefinitionId. + if clause.type_definition_id == ObjectTypeId::BaseEventType.into() { + // Do a simpler form of the attribute ID check in this case. + if attribute_id != AttributeId::NodeId && attribute_id != AttributeId::Value { + return Err(StatusCode::BadAttributeIdInvalid); + } + // We could in theory evaluate _every_ event type here, but that would be painful + // and potentially expensive on servers with lots of types. It also wouldn't + // be all that helpful. + return Ok(ParsedSimpleAttributeOperand { + type_definition_id: clause.type_definition_id, + browse_path: path, + attribute_id, + index_range, + }); + } + + let Some(node) = type_tree.find_type_prop_by_browse_path(&clause.type_definition_id, &path) + else { + return Err(StatusCode::BadNodeIdUnknown); + }; + + // Validate the attribute id. Per spec: + // + // The SimpleAttributeOperand allows the client to specify any attribute; however the server + // is only required to support the value attribute for variable nodes and the NodeId attribute + // for object nodes. That said, profiles defined in Part 7 may make support for + // additional attributes mandatory. + // + // So code will implement the bare minimum for now. + let is_valid = match node.node_class { + NodeClass::Object => attribute_id == AttributeId::NodeId, + NodeClass::Variable => attribute_id == AttributeId::Value, + _ => false, + }; + + if !is_valid { + Err(StatusCode::BadAttributeIdInvalid) + } else { + Ok(ParsedSimpleAttributeOperand { + type_definition_id: clause.type_definition_id, + browse_path: path, + attribute_id, + index_range, + }) + } +} + +fn validate_where_clause( + where_clause: ContentFilter, + type_tree: &TypeTree, + allow_attribute_operand: bool, + allow_complex_operators: bool, +) -> (ContentFilterResult, Result) { + // The ContentFilter structure defines a collection of elements that define filtering criteria. + // Each element in the collection describes an operator and an array of operands to be used by + // the operator. The operators that can be used in a ContentFilter are described in Table 119. + // The filter is evaluated by evaluating the first entry in the element array starting with the + // first operand in the operand array. The operands of an element may contain References to + // sub-elements resulting in the evaluation continuing to the referenced elements in the element + // array. The evaluation shall not introduce loops. For example evaluation starting from element + // “A” shall never be able to return to element “A”. However there may be more than one path + // leading to another element “B”. If an element cannot be traced back to the starting element + // it is ignored. Extra operands for any operator shall result in an error. Annex B provides + // examples using the ContentFilter structure. + + let Some(elements) = &where_clause.elements else { + return ( + ContentFilterResult { + element_results: None, + element_diagnostic_infos: None, + }, + Ok(ParsedContentFilter::empty()), + ); + }; + + let mut operand_refs: HashMap> = HashMap::new(); + + let element_result_pairs: Vec<( + ContentFilterElementResult, + Option, + )> = elements + .iter() + .enumerate() + .map(|(element_idx, e)| { + let Some(filter_operands) = &e.filter_operands else { + return ( + ContentFilterElementResult { + status_code: StatusCode::BadFilterOperandCountMismatch, + operand_status_codes: None, + operand_diagnostic_infos: None, + }, + None, + ); + }; + + let operand_count_mismatch = match e.filter_operator { + FilterOperator::Equals => filter_operands.len() != 2, + FilterOperator::IsNull => filter_operands.len() != 1, + FilterOperator::GreaterThan => filter_operands.len() != 2, + FilterOperator::LessThan => filter_operands.len() != 2, + FilterOperator::GreaterThanOrEqual => filter_operands.len() != 2, + FilterOperator::LessThanOrEqual => filter_operands.len() != 2, + FilterOperator::Like => filter_operands.len() != 2, + FilterOperator::Not => filter_operands.len() != 1, + FilterOperator::Between => filter_operands.len() != 3, + FilterOperator::InList => filter_operands.len() < 2, // 2..n + FilterOperator::And => filter_operands.len() != 2, + FilterOperator::Or => filter_operands.len() != 2, + FilterOperator::Cast => filter_operands.len() != 2, + FilterOperator::BitwiseAnd => filter_operands.len() != 2, + FilterOperator::BitwiseOr => filter_operands.len() != 2, + FilterOperator::InView => filter_operands.len() != 1, + FilterOperator::OfType => filter_operands.len() != 1, + FilterOperator::RelatedTo => filter_operands.len() != 6, + }; + + if !allow_complex_operators + && matches!( + e.filter_operator, + FilterOperator::InView | FilterOperator::OfType | FilterOperator::RelatedTo + ) + { + return ( + ContentFilterElementResult { + status_code: StatusCode::BadFilterOperatorUnsupported, + operand_status_codes: None, + operand_diagnostic_infos: None, + }, + None, + ); + } + + let mut valid_operands = Vec::with_capacity(filter_operands.len()); + let mut operand_status_codes = Vec::with_capacity(filter_operands.len()); + + let operand_results: Vec<_> = filter_operands + .iter() + .map(|e| { + let operand = ::try_from(e)?; + ParsedOperand::parse( + operand, + elements.len(), + type_tree, + allow_attribute_operand, + ) + }) + .collect(); + + for res in operand_results { + match res { + Ok(op) => { + operand_status_codes.push(StatusCode::Good); + if let ParsedOperand::ElementOperand(e) = &op { + operand_refs + .entry(element_idx) + .or_default() + .push(e.index as usize); + } + valid_operands.push(op); + } + Err(e) => operand_status_codes.push(e), + } + } + let operator_invalid = valid_operands.len() != filter_operands.len(); + + // Check what error status to return + let status_code = if operand_count_mismatch { + StatusCode::BadFilterOperandCountMismatch + } else if operator_invalid { + StatusCode::BadFilterOperandInvalid + } else { + StatusCode::Good + }; + + let res = if status_code.is_good() { + Some(ParsedContentFilterElement { + operator: e.filter_operator, + operands: valid_operands, + }) + } else { + None + }; + + ( + ContentFilterElementResult { + status_code, + operand_status_codes: Some(operand_status_codes), + operand_diagnostic_infos: None, + }, + res, + ) + }) + .collect(); + + let mut is_valid = true; + let mut valid_elements = Vec::with_capacity(elements.len()); + let mut element_results = Vec::with_capacity(elements.len()); + for (result, element) in element_result_pairs { + if let Some(element) = element { + valid_elements.push(element); + } else { + is_valid = false; + } + element_results.push(result); + } + + // Discover cycles. The operators must form a tree starting from the first + let mut path = HashSet::new(); + match has_cycles(&operand_refs, 0, &mut path) { + Ok(()) => (), + Err(()) => is_valid = false, + } + + ( + ContentFilterResult { + element_results: Some(element_results), + element_diagnostic_infos: None, + }, + if is_valid { + Ok(ParsedContentFilter { + elements: valid_elements, + }) + } else { + Err(StatusCode::BadEventFilterInvalid) + }, + ) +} + +fn has_cycles( + children: &HashMap>, + id: usize, + path: &mut HashSet, +) -> Result<(), ()> { + let Some(child_refs) = children.get(&id) else { + return Ok(()); + }; + if !path.insert(id) { + return Err(()); + } + + for child in child_refs { + has_cycles(children, *child, path)?; + } + + path.remove(&id); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use crate::{ + server::{events::validation::validate_where_clause, node_manager::TypeTree}, + types::{ + AttributeId, ContentFilter, ContentFilterElement, ContentFilterResult, FilterOperator, + NodeClass, NodeId, ObjectTypeId, Operand, SimpleAttributeOperand, StatusCode, + }, + }; + + #[test] + fn test_validate_empty_where_clause() { + let type_tree = TypeTree::new(); + // check for at least one filter operand + let where_clause = ContentFilter { elements: None }; + let (result, filter) = validate_where_clause(where_clause, &type_tree, false, false); + assert_eq!( + result, + ContentFilterResult { + element_results: None, + element_diagnostic_infos: None, + } + ); + assert!(filter.is_ok()); + } + + #[test] + fn test_validate_operator_len() { + let type_tree = TypeTree::new(); + + // Make a where clause where every single operator is included but each has the wrong number of operands. + // We should expect them all to be in error + let where_clause = ContentFilter { + elements: Some(vec![ + ContentFilterElement::from((FilterOperator::Equals, vec![Operand::literal(10)])), + ContentFilterElement::from((FilterOperator::IsNull, vec![])), + ContentFilterElement::from(( + FilterOperator::GreaterThan, + vec![Operand::literal(10)], + )), + ContentFilterElement::from((FilterOperator::LessThan, vec![Operand::literal(10)])), + ContentFilterElement::from(( + FilterOperator::GreaterThanOrEqual, + vec![Operand::literal(10)], + )), + ContentFilterElement::from(( + FilterOperator::LessThanOrEqual, + vec![Operand::literal(10)], + )), + ContentFilterElement::from((FilterOperator::Like, vec![Operand::literal(10)])), + ContentFilterElement::from((FilterOperator::Not, vec![])), + ContentFilterElement::from(( + FilterOperator::Between, + vec![Operand::literal(10), Operand::literal(20)], + )), + ContentFilterElement::from((FilterOperator::InList, vec![Operand::literal(10)])), + ContentFilterElement::from((FilterOperator::And, vec![Operand::literal(10)])), + ContentFilterElement::from((FilterOperator::Or, vec![Operand::literal(10)])), + ContentFilterElement::from((FilterOperator::Cast, vec![Operand::literal(10)])), + ContentFilterElement::from(( + FilterOperator::BitwiseAnd, + vec![Operand::literal(10)], + )), + ContentFilterElement::from((FilterOperator::BitwiseOr, vec![Operand::literal(10)])), + ContentFilterElement::from((FilterOperator::Like, vec![Operand::literal(10)])), + ]), + }; + // Check for less than required number of operands + let (result, filter) = validate_where_clause(where_clause, &type_tree, false, false); + result + .element_results + .unwrap() + .iter() + .for_each(|e| assert_eq!(e.status_code, StatusCode::BadFilterOperandCountMismatch)); + assert_eq!(filter.unwrap_err(), StatusCode::BadEventFilterInvalid); + } + + #[test] + fn test_validate_bad_filter_operand() { + let type_tree = TypeTree::new(); + + // check for filter operator invalid, by giving it a bogus extension object for an element + use crate::types::{service_types::ContentFilterElement, ExtensionObject}; + let bad_operator = ExtensionObject::null(); + let where_clause = ContentFilter { + elements: Some(vec![ContentFilterElement { + filter_operator: FilterOperator::IsNull, + filter_operands: Some(vec![bad_operator]), + }]), + }; + let (result, filter) = validate_where_clause(where_clause, &type_tree, false, false); + let element_results = result.element_results.unwrap(); + assert_eq!(element_results.len(), 1); + assert_eq!( + element_results[0].status_code, + StatusCode::BadFilterOperandInvalid + ); + let err = filter.unwrap_err(); + assert_eq!(err, StatusCode::BadEventFilterInvalid); + } + + #[test] + fn test_validate_select_operands() { + let mut type_tree = TypeTree::new(); + + type_tree.add_type_node( + &NodeId::new(1, "event"), + &ObjectTypeId::BaseEventType.into(), + NodeClass::ObjectType, + ); + type_tree.add_type_property( + &NodeId::new(1, "prop"), + &NodeId::new(1, "event"), + &[&"Prop".into()], + NodeClass::Variable, + ); + + // One attribute that exists, one that doesn't. + let where_clause = ContentFilter { + elements: Some(vec![ + ContentFilterElement::from(( + FilterOperator::IsNull, + vec![Operand::SimpleAttributeOperand(SimpleAttributeOperand { + type_definition_id: NodeId::new(1, "event"), + browse_path: Some(vec!["Prop".into()]), + attribute_id: AttributeId::Value as u32, + index_range: Default::default(), + })], + )), + ContentFilterElement::from(( + FilterOperator::IsNull, + vec![Operand::SimpleAttributeOperand(SimpleAttributeOperand { + type_definition_id: NodeId::new(1, "event"), + browse_path: Some(vec!["Prop2".into()]), + attribute_id: AttributeId::Value as u32, + index_range: Default::default(), + })], + )), + ]), + }; + + let (result, filter) = validate_where_clause(where_clause, &type_tree, false, false); + let element_results = result.element_results.unwrap(); + assert_eq!(element_results.len(), 2); + assert_eq!(element_results[0].status_code, StatusCode::Good); + assert_eq!( + element_results[1].status_code, + StatusCode::BadFilterOperandInvalid + ); + let status_codes = element_results[1].operand_status_codes.as_ref().unwrap(); + assert_eq!(status_codes.len(), 1); + assert_eq!(status_codes[0], StatusCode::BadNodeIdUnknown); + assert_eq!(filter.unwrap_err(), StatusCode::BadEventFilterInvalid); + } + + #[test] + fn test_validate_circular_filter() { + let type_tree = TypeTree::new(); + + let where_clause = ContentFilter { + elements: Some(vec![ + ContentFilterElement::from(( + FilterOperator::And, + vec![Operand::element(1), Operand::element(2)], + )), + ContentFilterElement::from((FilterOperator::IsNull, vec![Operand::literal(10)])), + ContentFilterElement::from(( + FilterOperator::Or, + vec![Operand::element(1), Operand::element(3)], + )), + ContentFilterElement::from((FilterOperator::Not, vec![Operand::element(0)])), + ]), + }; + + let (_result, filter) = validate_where_clause(where_clause, &type_tree, false, false); + assert_eq!(filter.unwrap_err(), StatusCode::BadEventFilterInvalid); + } +} diff --git a/lib/src/server/historical/mod.rs b/lib/src/server/historical/mod.rs deleted file mode 100644 index 67518a4f5..000000000 --- a/lib/src/server/historical/mod.rs +++ /dev/null @@ -1,159 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::{result::Result, sync::Arc}; - -use crate::sync::*; -use crate::types::status_code::StatusCode; -use crate::types::*; - -use crate::server::address_space::AddressSpace; - -/// Values that should be set in the address space via `AddressSpace::set_history_server_capabilities()` -/// to denote to clients what history capabilities the server has. -pub struct HistoryServerCapabilities { - pub access_history_data: bool, - pub access_history_events: bool, - pub max_return_data: u32, - pub max_return_events: u32, - pub insert_data: bool, - pub replace_data: bool, - pub update_data: bool, - pub delete_raw: bool, - pub delete_at_time: bool, - pub insert_event: bool, - pub replace_event: bool, - pub update_event: bool, - pub delete_event: bool, - pub insert_annotation: bool, -} - -/// The `HistoricalEventProvider` trait provides the function stubs that a server will call -/// to process historical event operations. The implementor of this trait may provide their -///// own implementation as many functions as they desire leaving the remainder as stubs. -/// -/// IMPORTANT NOTE: This trait is currently synchronous and may change in the future to some other -/// form. In the meantime it means if you are doing lengthy reads then use continuation points -/// to spawn a thread for that activity. Updates and deletes should be spawned on separate threads -/// if they are lengthy operations. -pub trait HistoricalEventProvider { - fn read_event_details( - &self, - _address_space: Arc>, - _request: ReadEventDetails, - _timestamps_to_return: TimestampsToReturn, - _release_continuation_points: bool, - _nodes_to_read: &[HistoryReadValueId], - ) -> Result, StatusCode> { - info!("Unimplemented read_event_details"); - Err(StatusCode::BadHistoryOperationUnsupported) - } - - fn update_event_details( - &self, - _address_space: Arc>, - _request: UpdateEventDetails, - ) -> Result, StatusCode> { - info!("Unimplemented update_event_details"); - Err(StatusCode::BadHistoryOperationUnsupported) - } - - fn delete_event_details( - &self, - _address_space: Arc>, - _request: DeleteEventDetails, - ) -> Result, StatusCode> { - info!("Unimplemented delete_event_details"); - Err(StatusCode::BadHistoryOperationUnsupported) - } -} - -pub enum HistoryRawData { - HistoryData(HistoryData), - HistoryModifiedData(HistoryModifiedData), -} - -/// The `HistoricalEventProvider` trait provides the function stubs that a server will call -/// to process historical data operations. The implementor of this trait may provide their -/// own implementation as many functions as they desire leaving the remainder as stubs. -/// -/// IMPORTANT NOTE: This trait is currently synchronous and may change in the future to some other -/// form. In the meantime it means if you are doing lengthy reads then use continuation points -/// to spawn a thread for that activity. Updates and deletes should be spawned on separate threads -/// if they are lengthy operations. -pub trait HistoricalDataProvider { - /// Note: Function returns an `HistoryRawData` enum containing *either* a `HistoryData` for a read raw action - /// or a `HistoryModifiedData` for a read modified action. - fn read_raw_modified_details( - &self, - _address_space: Arc>, - _request: ReadRawModifiedDetails, - _timestamps_to_return: TimestampsToReturn, - _release_continuation_points: bool, - _nodes_to_read: &[HistoryReadValueId], - ) -> Result, StatusCode> { - info!("Unimplemented read_raw_modified_details"); - Err(StatusCode::BadHistoryOperationUnsupported) - } - - fn read_processed_details( - &self, - _address_space: Arc>, - _request: ReadProcessedDetails, - _timestamps_to_return: TimestampsToReturn, - _release_continuation_points: bool, - _nodes_to_read: &[HistoryReadValueId], - ) -> Result, StatusCode> { - info!("Unimplemented read_processed_details"); - Err(StatusCode::BadHistoryOperationUnsupported) - } - - fn read_at_time_details( - &self, - _address_space: Arc>, - _request: ReadAtTimeDetails, - _timestamps_to_return: TimestampsToReturn, - _release_continuation_points: bool, - _nodes_to_read: &[HistoryReadValueId], - ) -> Result, StatusCode> { - info!("Unimplemented read_at_time_details"); - Err(StatusCode::BadHistoryOperationUnsupported) - } - - fn update_data_details( - &self, - _address_space: Arc>, - _request: UpdateDataDetails, - ) -> Result, StatusCode> { - info!("Unimplemented update_data_details"); - Err(StatusCode::BadHistoryOperationUnsupported) - } - - fn update_structure_data_details( - &self, - _address_space: Arc>, - _request: UpdateStructureDataDetails, - ) -> Result, StatusCode> { - info!("Unimplemented update_structure_data_details"); - Err(StatusCode::BadHistoryOperationUnsupported) - } - - fn delete_raw_modified_details( - &self, - _address_space: Arc>, - _request: DeleteRawModifiedDetails, - ) -> Result, StatusCode> { - info!("Unimplemented delete_raw_modified_details"); - Err(StatusCode::BadHistoryOperationUnsupported) - } - - fn delete_at_time_details( - &self, - _address_space: Arc>, - _request: DeleteAtTimeDetails, - ) -> Result, StatusCode> { - info!("Unimplemented delete_at_time_details"); - Err(StatusCode::BadHistoryOperationUnsupported) - } -} diff --git a/lib/src/server/html/index.html b/lib/src/server/html/index.html deleted file mode 100644 index b8b500c69..000000000 --- a/lib/src/server/html/index.html +++ /dev/null @@ -1,377 +0,0 @@ - - - - - Server Diagnostics - - - - - - - - - -
Please wait...
- - - - - diff --git a/lib/src/server/http/mod.rs b/lib/src/server/http/mod.rs deleted file mode 100644 index dc7802479..000000000 --- a/lib/src/server/http/mod.rs +++ /dev/null @@ -1,122 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::{path::PathBuf, sync::Arc, thread}; - -use actix_files as fs; -use actix_web::{web, App, HttpResponse, HttpServer, Responder, Result}; -use tokio::runtime::Runtime; - -use crate::sync::*; - -use crate::server::{metrics::ServerMetrics, server::Connections, state::ServerState}; - -/// This is our metrics service, the thing called to handle requests coming from hyper -#[derive(Clone)] -struct AppState { - server_state: Arc>, - connections: Arc>, - server_metrics: Arc>, - base_path: Arc>, -} - -async fn index(data: web::Data) -> Result { - let base_path = data.base_path.read(); - let mut index_path = base_path.clone(); - index_path.push("index.html"); - debug!("Resolving index.html to location {}", index_path.display()); - Ok(fs::NamedFile::open(index_path)?) -} - -async fn abort(data: web::Data) -> impl Responder { - if cfg!(debug_assertions) { - // Abort the server from the command - let mut server_state = data.server_state.write(); - server_state.abort(); - HttpResponse::Ok().content_type("text/plain").body("OK") - } else { - // Abort is only enabled in debug mode - HttpResponse::Ok() - .content_type("text/plain") - .body("NOT IMPLEMENTED") - } -} - -async fn metrics(data: web::Data) -> impl Responder { - use std::ops::Deref; - - // Send metrics data as json - let json = { - // Careful with the ordering here to avoid potential deadlock. Metrics are locked - // several times in scope to avoid deadlocks issues. - { - let server_state = data.server_state.read(); - let mut server_metrics = data.server_metrics.write(); - server_metrics.update_from_server_state(&server_state); - } - - // Take a copy of connections - let connections = { - let connections = data.connections.read(); - connections.clone() - }; - let mut server_metrics = data.server_metrics.write(); - server_metrics.update_from_connections(connections); - serde_json::to_string_pretty(server_metrics.deref()).unwrap() - }; - - HttpResponse::Ok() - .content_type("application/json") - .body(json) -} - -/// Runs an http server on the specified binding address, serving out the supplied server metrics -pub fn run_http_server( - runtime: &Runtime, - address: &str, - content_path: &str, - server_state: Arc>, - connections: Arc>, - server_metrics: Arc>, -) { - let address = String::from(address); - let base_path = PathBuf::from(content_path); - let server_state_http = server_state.clone(); - - // Getting this working was very painful since Actix HttpServer does not implement Send trait, so the - // code has to run on a single thread, but also async and through Tokio. - - let runtime_handle = runtime.handle().clone(); - thread::spawn(move || { - info!( - "HTTP server is running on http://{}/ to provide OPC UA server metrics", - address - ); - - let local = tokio::task::LocalSet::new(); - local.spawn_local(async move { - // Spawns a new HTTP server - if let Ok(server) = HttpServer::new(move || { - App::new() - .app_data(web::Data::new(AppState { - server_state: server_state_http.clone(), - connections: connections.clone(), - server_metrics: server_metrics.clone(), - base_path: Arc::new(RwLock::new(base_path.clone())), - })) - .route("/server/metrics", web::get().to(metrics)) - .route("/server/abort", web::get().to(abort)) - .route("/", web::get().to(index)) - }) - .bind(&address) - { - let _ = server.run().await; - } else { - error!("Could not start HTTP server"); - } - }); - runtime_handle.block_on(local); - debug!("HTTP server has terminated"); - }); -} diff --git a/lib/src/server/identity_token.rs b/lib/src/server/identity_token.rs index 1348dd865..5cb19812a 100644 --- a/lib/src/server/identity_token.rs +++ b/lib/src/server/identity_token.rs @@ -10,6 +10,7 @@ pub(crate) const POLICY_ID_USER_PASS_RSA_15: &str = "userpass_rsa_15"; pub(crate) const POLICY_ID_USER_PASS_RSA_OAEP: &str = "userpass_rsa_oaep"; pub(crate) const POLICY_ID_X509: &str = "x509"; +/// Identity token representation on the server, decoded from the client. pub enum IdentityToken { None, AnonymousIdentityToken(AnonymousIdentityToken), @@ -19,6 +20,8 @@ pub enum IdentityToken { } impl IdentityToken { + /// Decode an identity token from an extension object received from the client. + /// Returns `Invalid` if decoding failed. pub fn new(o: &ExtensionObject, decoding_options: &DecodingOptions) -> Self { if o.is_empty() { // Treat as anonymous diff --git a/lib/src/server/state.rs b/lib/src/server/info.rs similarity index 59% rename from lib/src/server/state.rs rename to lib/src/server/info.rs index 0f63b8070..11a403204 100644 --- a/lib/src/server/state.rs +++ b/lib/src/server/info.rs @@ -4,11 +4,16 @@ //! Provides server state information, such as status, configuration, running servers and so on. +use std::sync::atomic::{AtomicU16, AtomicU8, Ordering}; use std::sync::Arc; -use crate::core::prelude::*; +use arc_swap::ArcSwap; + +use crate::core::comms::url::{hostname_from_url, url_matches_except_host}; +use crate::core::handle::AtomicHandle; use crate::crypto::{user_identity, PrivateKey, SecurityPolicy, X509}; -use crate::sync::*; +use crate::server::authenticator::Password; +use crate::sync::RwLock; use crate::types::{ profiles, service_types::{ @@ -18,123 +23,72 @@ use crate::types::{ }, status_code::StatusCode, }; - -use crate::server::{ - callbacks::{RegisterNodes, UnregisterNodes}, - config::{ServerConfig, ServerEndpoint}, - constants, - diagnostics::ServerDiagnostics, - events::{ - audit::{AuditEvent, AuditLog}, - event::Event, - }, - historical::{HistoricalDataProvider, HistoricalEventProvider}, - identity_token::{ - IdentityToken, POLICY_ID_ANONYMOUS, POLICY_ID_USER_PASS_NONE, POLICY_ID_USER_PASS_RSA_15, - POLICY_ID_USER_PASS_RSA_OAEP, POLICY_ID_X509, - }, +use crate::types::{ + AttributeId, ByteString, DataValue, DateTime, DecodingOptions, ExtensionObject, LocalizedText, + MessageSecurityMode, UAString, VariableId, }; -pub(crate) struct OperationalLimits { - pub max_nodes_per_translate_browse_paths_to_node_ids: usize, - pub max_nodes_per_read: usize, - pub max_nodes_per_write: usize, - pub max_nodes_per_method_call: usize, - pub max_nodes_per_browse: usize, - pub max_nodes_per_register_nodes: usize, - pub max_nodes_per_node_management: usize, - pub max_monitored_items_per_call: usize, - pub max_nodes_per_history_read_data: usize, - pub max_nodes_per_history_read_events: usize, - pub max_nodes_per_history_update_data: usize, - pub max_nodes_per_history_update_events: usize, -} +use crate::server::config::{ServerConfig, ServerEndpoint}; -impl Default for OperationalLimits { - fn default() -> Self { - Self { - max_nodes_per_translate_browse_paths_to_node_ids: - constants::MAX_NODES_PER_TRANSLATE_BROWSE_PATHS_TO_NODE_IDS, - max_nodes_per_read: constants::MAX_NODES_PER_READ, - max_nodes_per_write: constants::MAX_NODES_PER_WRITE, - max_nodes_per_method_call: constants::MAX_NODES_PER_METHOD_CALL, - max_nodes_per_browse: constants::MAX_NODES_PER_BROWSE, - max_nodes_per_register_nodes: constants::MAX_NODES_PER_REGISTER_NODES, - max_nodes_per_node_management: constants::MAX_NODES_PER_NODE_MANAGEMENT, - max_monitored_items_per_call: constants::MAX_MONITORED_ITEMS_PER_CALL, - max_nodes_per_history_read_data: constants::MAX_NODES_PER_HISTORY_READ_DATA, - max_nodes_per_history_read_events: constants::MAX_NODES_PER_HISTORY_READ_EVENTS, - max_nodes_per_history_update_data: constants::MAX_NODES_PER_HISTORY_UPDATE_DATA, - max_nodes_per_history_update_events: constants::MAX_NODES_PER_HISTORY_UPDATE_EVENTS, - } - } -} +use super::authenticator::{AuthManager, UserToken}; +use super::identity_token::{ + IdentityToken, POLICY_ID_ANONYMOUS, POLICY_ID_USER_PASS_NONE, POLICY_ID_USER_PASS_RSA_15, + POLICY_ID_USER_PASS_RSA_OAEP, POLICY_ID_X509, +}; +use super::node_manager::TypeTree; +use super::{OperationalLimits, ServerCapabilities, SubscriptionCache, ANONYMOUS_USER_TOKEN_ID}; -/// Server state is any state associated with the server as a whole that individual sessions might -/// be interested in. That includes configuration info etc. -pub struct ServerState { +/// Server state is any configuration associated with the server as a whole that individual sessions might +/// be interested in. +pub struct ServerInfo { /// The application URI pub application_uri: UAString, /// The product URI pub product_uri: UAString, /// The application name pub application_name: LocalizedText, - /// The protocol, hostname and port formatted as a url, but less the path - pub base_endpoint: String, /// The time the server started - pub start_time: DateTime, + pub start_time: ArcSwap, /// The list of servers (by urn) pub servers: Vec, /// Server configuration - pub config: Arc>, + pub config: Arc, /// Server public certificate read from config location or null if there is none pub server_certificate: Option, /// Server private key pub server_pkey: Option, - /// The next subscription id - subscriptions are shared across the whole server. Initial value - /// is a random u32. - pub last_subscription_id: u32, - /// Maximum number of subscriptions per session, 0 means no limit (danger) - pub max_subscriptions: usize, - /// Maximum number of monitored items per subscription, 0 means no limit (danger) - pub max_monitored_items_per_sub: usize, - /// Maximum number of queued values in a monitored item, 0 means no limit (danger) - pub max_monitored_item_queue_size: usize, - /// Minimum publishing interval (in millis) - pub min_publishing_interval_ms: Duration, - /// Minimum sampling interval (in millis) - pub min_sampling_interval_ms: Duration, - /// Default keep alive count - pub default_keep_alive_count: u32, - /// Maxmimum keep alive count - pub max_keep_alive_count: u32, - /// Maximum lifetime count (3 times as large as max keep alive) - pub max_lifetime_count: u32, /// Operational limits pub(crate) operational_limits: OperationalLimits, /// Current state - pub state: ServerStateType, - /// Sets the abort flag that terminates the associated server - pub abort: bool, + pub state: ArcSwap, /// Audit log - pub(crate) audit_log: Arc>, + // pub(crate) audit_log: Arc>, /// Diagnostic information - pub(crate) diagnostics: Arc>, - /// Callback for register nodes - pub(crate) register_nodes_callback: Option>, - /// Callback for unregister nodes - pub(crate) unregister_nodes_callback: Option>, - /// Callback for historical data - pub(crate) historical_data_provider: Option>, - /// Callback for historical events - pub(crate) historical_event_provider: Option>, + // pub(crate) diagnostics: Arc>, /// Size of the send buffer in bytes pub send_buffer_size: usize, /// Size of the receive buffer in bytes pub receive_buffer_size: usize, + /// Authenticator to use when verifying user identities, and checking for user access. + pub authenticator: Arc, + /// Structure containing type metadata shared by the entire server. + pub type_tree: Arc>, + /// Generator for subscription IDs. + pub subscription_id_handle: AtomicHandle, + /// Generator for monitored item IDs. + pub monitored_item_id_handle: AtomicHandle, + /// Generator for secure channel IDs. + pub secure_channel_id_handle: Arc, + /// Server capabilities + pub capabilities: ServerCapabilities, + /// Service level observer. + pub service_level: Arc, + /// Currently active local port. + pub port: AtomicU16, } -impl ServerState { +impl ServerInfo { + /// Get the list of endpoints that match the provided filters. pub fn endpoints( &self, endpoint_url: &UAString, @@ -162,15 +116,15 @@ impl ServerState { } } - let config = trace_read_lock!(self.config); if let Ok(hostname) = hostname_from_url(endpoint_url.as_ref()) { - if !hostname.eq_ignore_ascii_case(&config.tcp_config.host) { - debug!("Endpoint url \"{}\" hostname supplied by caller does not match server's hostname \"{}\"", endpoint_url, &config.tcp_config.host); + if !hostname.eq_ignore_ascii_case(&self.config.tcp_config.host) { + debug!("Endpoint url \"{}\" hostname supplied by caller does not match server's hostname \"{}\"", endpoint_url, &self.config.tcp_config.host); } - let endpoints = config + let endpoints = self + .config .endpoints .iter() - .map(|(_, e)| self.new_endpoint_description(&config, e, true)) + .map(|(_, e)| self.new_endpoint_description(e, true)) .collect(); Some(endpoints) } else { @@ -178,23 +132,29 @@ impl ServerState { "Endpoint url \"{}\" is unrecognized, using default", endpoint_url ); - if let Some(e) = config.default_endpoint() { - Some(vec![self.new_endpoint_description(&config, e, true)]) + if let Some(e) = self.config.default_endpoint() { + Some(vec![self.new_endpoint_description(e, true)]) } else { Some(vec![]) } } } + /// Check if the endpoint given by `endpoint_url`, `security_policy`, and `security_mode` + /// exists on the server. pub fn endpoint_exists( &self, endpoint_url: &str, security_policy: SecurityPolicy, security_mode: MessageSecurityMode, ) -> bool { - let config = trace_read_lock!(self.config); - config - .find_endpoint(endpoint_url, security_policy, security_mode) + self.config + .find_endpoint( + endpoint_url, + &self.base_endpoint(), + security_policy, + security_mode, + ) .is_some() } @@ -206,16 +166,16 @@ impl ServerState { endpoint_url: &str, ) -> Option> { debug!("find_endpoint, url = {}", endpoint_url); - let config = trace_read_lock!(self.config); - let base_endpoint_url = config.base_endpoint_url(); - let endpoints: Vec = config + let base_endpoint_url = self.base_endpoint(); + let endpoints: Vec = self + .config .endpoints .iter() .filter(|&(_, e)| { // Test end point's security_policy_uri and matching url url_matches_except_host(&e.endpoint_url(&base_endpoint_url), endpoint_url) }) - .map(|(_, e)| self.new_endpoint_description(&config, e, false)) + .map(|(_, e)| self.new_endpoint_description(e, false)) .collect(); if endpoints.is_empty() { None @@ -249,11 +209,7 @@ impl ServerState { UAString::null() } - fn user_identity_tokens( - &self, - config: &ServerConfig, - endpoint: &ServerEndpoint, - ) -> Vec { + fn user_identity_tokens(&self, endpoint: &ServerEndpoint) -> Vec { let mut user_identity_tokens = Vec::with_capacity(3); // Anonymous policy @@ -267,7 +223,7 @@ impl ServerState { }); } // User pass policy - if endpoint.supports_user_pass(&config.user_tokens) { + if endpoint.supports_user_pass(&self.config.user_tokens) { // The endpoint may set a password security policy user_identity_tokens.push(UserTokenPolicy { policy_id: Self::user_pass_security_policy_id(endpoint), @@ -278,7 +234,7 @@ impl ServerState { }); } // X509 policy - if endpoint.supports_x509(&config.user_tokens) { + if endpoint.supports_x509(&self.config.user_tokens) { user_identity_tokens.push(UserTokenPolicy { policy_id: UAString::from(POLICY_ID_X509), token_type: UserTokenType::Certificate, @@ -301,13 +257,12 @@ impl ServerState { /// Constructs a new endpoint description using the server's info and that in an Endpoint fn new_endpoint_description( &self, - config: &ServerConfig, endpoint: &ServerEndpoint, all_fields: bool, ) -> EndpointDescription { - let base_endpoint_url = config.base_endpoint_url(); + let base_endpoint_url = self.base_endpoint(); - let user_identity_tokens = self.user_identity_tokens(config, endpoint); + let user_identity_tokens = self.user_identity_tokens(endpoint); // CreateSession doesn't need all the endpoint description // and docs say not to bother sending the server and server @@ -352,45 +307,65 @@ impl ServerState { } } + /// Get the list of discovery URLs on the server. pub fn discovery_urls(&self) -> Option> { - let config = trace_read_lock!(self.config); - if config.discovery_urls.is_empty() { + if self.config.discovery_urls.is_empty() { None } else { - Some(config.discovery_urls.iter().map(UAString::from).collect()) + Some( + self.config + .discovery_urls + .iter() + .map(UAString::from) + .collect(), + ) } } + /// Get the application type, will be `Server`. pub fn application_type(&self) -> ApplicationType { ApplicationType::Server } + /// Get the gateway server URI. pub fn gateway_server_uri(&self) -> UAString { UAString::null() } - pub fn abort(&mut self) { - info!("Server has been told to abort"); - self.abort = true; - self.state = ServerStateType::Shutdown; - } - + /// Get the current server state. pub fn state(&self) -> ServerStateType { - self.state + **self.state.load() } - pub fn set_state(&mut self, state: ServerStateType) { - self.state = state; + /// Set the server state. Note that this does not actually affect the server, it is purely + /// informative. + pub(crate) fn set_state(&self, state: ServerStateType, subs: &SubscriptionCache) { + self.state.store(Arc::new(state)); + subs.notify_data_change( + [( + DataValue::new_now(state as i32), + &VariableId::Server_ServerStatus.into(), + AttributeId::Value, + )] + .into_iter(), + ); } - pub fn is_abort(&self) -> bool { - self.abort + /// Check if the server state indicates the server is running. + pub fn is_running(&self) -> bool { + self.state() == ServerStateType::Running } - pub fn is_running(&self) -> bool { - self.state == ServerStateType::Running + /// Get the base endpoint, i.e. the configured host + current port. + pub fn base_endpoint(&self) -> String { + format!( + "opc.tcp://{}:{}", + self.config.tcp_config.host, + self.port.load(Ordering::Relaxed) + ) } + /// Get the server certificate as a byte string. pub fn server_certificate_as_byte_string(&self) -> ByteString { if let Some(ref server_certificate) = self.server_certificate { server_certificate.as_byte_string() @@ -399,6 +374,7 @@ impl ServerState { } } + /// Get a representation of this server as a `RegisteredServer` object. pub fn registered_server(&self) -> RegisteredServer { let server_uri = self.application_uri.clone(); let product_uri = self.product_uri.clone(); @@ -420,18 +396,13 @@ impl ServerState { } } - pub fn create_subscription_id(&mut self) -> u32 { - self.last_subscription_id += 1; - self.last_subscription_id - } - /// Authenticates access to an endpoint. The endpoint is described by its path, policy, mode and /// the token is supplied in an extension object that must be extracted and authenticated. /// /// It is possible that the endpoint does not exist, or that the token is invalid / unsupported /// or that the token cannot be used with the end point. The return codes reflect the responses /// that ActivateSession would expect from a service call. - pub fn authenticate_endpoint( + pub async fn authenticate_endpoint( &self, request: &ActivateSessionRequest, endpoint_url: &str, @@ -439,11 +410,14 @@ impl ServerState { security_mode: MessageSecurityMode, user_identity_token: &ExtensionObject, server_nonce: &ByteString, - ) -> Result { + ) -> Result { // Get security from endpoint url - let config = trace_read_lock!(self.config); - - if let Some(endpoint) = config.find_endpoint(endpoint_url, security_policy, security_mode) { + if let Some(endpoint) = self.config.find_endpoint( + endpoint_url, + &self.base_endpoint(), + security_policy, + security_mode, + ) { // Now validate the user identity token match IdentityToken::new(user_identity_token, &self.decoding_options()) { IdentityToken::None => { @@ -451,24 +425,27 @@ impl ServerState { Err(StatusCode::BadIdentityTokenInvalid) } IdentityToken::AnonymousIdentityToken(token) => { - Self::authenticate_anonymous_token(endpoint, &token) + self.authenticate_anonymous_token(endpoint, &token).await } - IdentityToken::UserNameIdentityToken(token) => self - .authenticate_username_identity_token( - &config, + IdentityToken::UserNameIdentityToken(token) => { + self.authenticate_username_identity_token( endpoint, &token, &self.server_pkey, server_nonce, - ), - IdentityToken::X509IdentityToken(token) => self.authenticate_x509_identity_token( - &config, - endpoint, - &token, - &request.user_token_signature, - &self.server_certificate, - server_nonce, - ), + ) + .await + } + IdentityToken::X509IdentityToken(token) => { + self.authenticate_x509_identity_token( + endpoint, + &token, + &request.user_token_signature, + &self.server_certificate, + server_nonce, + ) + .await + } IdentityToken::Invalid(o) => { error!("User identity token type {:?} is unsupported", o.node_id); Err(StatusCode::BadIdentityTokenInvalid) @@ -480,52 +457,38 @@ impl ServerState { } } - pub fn set_register_nodes_callbacks( - &mut self, - register_nodes_callback: Box, - unregister_nodes_callback: Box, - ) { - self.register_nodes_callback = Some(register_nodes_callback); - self.unregister_nodes_callback = Some(unregister_nodes_callback); - } - /// Returns the decoding options of the server pub fn decoding_options(&self) -> DecodingOptions { - let config = trace_read_lock!(self.config); - config.decoding_options() + self.config.decoding_options() } /// Authenticates an anonymous token, i.e. does the endpoint support anonymous access or not - fn authenticate_anonymous_token( + async fn authenticate_anonymous_token( + &self, endpoint: &ServerEndpoint, token: &AnonymousIdentityToken, - ) -> Result { + ) -> Result { if token.policy_id.as_ref() != POLICY_ID_ANONYMOUS { error!("Token doesn't possess the correct policy id"); - Err(StatusCode::BadIdentityTokenInvalid) - } else if !endpoint.supports_anonymous() { - error!( - "Endpoint \"{}\" does not support anonymous authentication", - endpoint.path - ); - Err(StatusCode::BadIdentityTokenRejected) - } else { - debug!("Anonymous identity is authenticated"); - Ok(String::from(crate::server::config::ANONYMOUS_USER_TOKEN_ID)) + return Err(StatusCode::BadIdentityTokenInvalid); } + self.authenticator + .authenticate_anonymous_token(endpoint) + .await?; + + Ok(UserToken(ANONYMOUS_USER_TOKEN_ID.to_string())) } /// Authenticates the username identity token with the supplied endpoint. The function returns the user token identifier /// that matches the identity token. - fn authenticate_username_identity_token( + async fn authenticate_username_identity_token( &self, - config: &ServerConfig, endpoint: &ServerEndpoint, token: &UserNameIdentityToken, server_key: &Option, server_nonce: &ByteString, - ) -> Result { - if !endpoint.supports_user_pass(&config.user_tokens) { + ) -> Result { + if !endpoint.supports_user_pass(&self.config.user_tokens) { error!("Endpoint doesn't support username password tokens"); Err(StatusCode::BadIdentityTokenRejected) } else if token.policy_id != Self::user_pass_security_policy_id(endpoint) { @@ -555,64 +518,37 @@ impl ServerState { token.plaintext_password()? }; - // Iterate ids in endpoint - for user_token_id in &endpoint.user_token_ids { - if let Some(server_user_token) = config.user_tokens.get(user_token_id) { - if server_user_token.is_user_pass() - && server_user_token.user == token.user_name.as_ref() - { - // test for empty password - let valid = if server_user_token.pass.is_none() { - // Empty password for user - token_password.is_empty() - } else { - // Password compared as UTF-8 bytes - let server_password = - server_user_token.pass.as_ref().unwrap().as_bytes(); - server_password == token_password.as_bytes() - }; - if !valid { - error!( - "Cannot authenticate \"{}\", password is invalid", - server_user_token.user - ); - return Err(StatusCode::BadUserAccessDenied); - } else { - return Ok(user_token_id.clone()); - } - } - } - } - error!( - "Cannot authenticate \"{}\", user not found for endpoint", - token.user_name - ); - Err(StatusCode::BadUserAccessDenied) + self.authenticator + .authenticate_username_identity_token( + endpoint, + token.user_name.as_ref(), + &Password::new(token_password), + ) + .await } } /// Authenticate the x509 token against the endpoint. The function returns the user token identifier /// that matches the identity token. - fn authenticate_x509_identity_token( + async fn authenticate_x509_identity_token( &self, - config: &ServerConfig, endpoint: &ServerEndpoint, token: &X509IdentityToken, user_token_signature: &SignatureData, server_certificate: &Option, server_nonce: &ByteString, - ) -> Result { - if !endpoint.supports_x509(&config.user_tokens) { + ) -> Result { + if !endpoint.supports_x509(&self.config.user_tokens) { error!("Endpoint doesn't support x509 tokens"); Err(StatusCode::BadIdentityTokenRejected) } else if token.policy_id.as_ref() != POLICY_ID_X509 { error!("Token doesn't possess the correct policy id"); Err(StatusCode::BadIdentityTokenRejected) } else { - let result = match server_certificate { + match server_certificate { Some(ref server_certificate) => { // Find the security policy used for verifying tokens - let user_identity_tokens = self.user_identity_tokens(config, endpoint); + let user_identity_tokens = self.user_identity_tokens(endpoint); let security_policy = user_identity_tokens .iter() .find(|t| t.token_type == UserTokenType::Certificate) @@ -637,45 +573,23 @@ impl ServerState { } } None => Err(StatusCode::BadIdentityTokenInvalid), - }; - result.and_then(|_| { - // Check the endpoint to see if this token is supported - let signing_cert = X509::from_byte_string(&token.certificate_data)?; - let signing_thumbprint = signing_cert.thumbprint(); - for user_token_id in &endpoint.user_token_ids { - if let Some(server_user_token) = config.user_tokens.get(user_token_id) { - if let Some(ref user_thumbprint) = server_user_token.thumbprint { - // The signing cert matches a user's identity, so it is valid - if *user_thumbprint == signing_thumbprint { - return Ok(user_token_id.clone()); - } - } - } - } - Err(StatusCode::BadIdentityTokenInvalid) - }) - } - } + }?; - pub fn set_historical_data_provider( - &mut self, - historical_data_provider: Box, - ) { - self.historical_data_provider = Some(historical_data_provider); - } + // Check the endpoint to see if this token is supported + let signing_cert = X509::from_byte_string(&token.certificate_data)?; + let signing_thumbprint = signing_cert.thumbprint(); - pub fn set_historical_event_provider( - &mut self, - historical_event_provider: Box, - ) { - self.historical_event_provider = Some(historical_event_provider); + self.authenticator + .authenticate_x509_identity_token(endpoint, &signing_thumbprint) + .await + } } - pub(crate) fn raise_and_log(&self, event: T) -> Result + /* pub(crate) fn raise_and_log(&self, event: T) -> Result where T: AuditEvent + Event, { let audit_log = trace_write_lock!(self.audit_log); audit_log.raise_and_log(event) - } + } */ } diff --git a/lib/src/server/metrics.rs b/lib/src/server/metrics.rs deleted file mode 100644 index 1009a962c..000000000 --- a/lib/src/server/metrics.rs +++ /dev/null @@ -1,181 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Provides debug metric of server state that can be used by anything that wants -//! to see what is happening in the server. State is updated by the server as sessions are added, removed, -//! and when subscriptions / monitored items are added, removed. - -use crate::runtime_components; -use crate::types::DateTime; - -use crate::server::{ - comms::transport::{Transport, TransportState}, - config, - diagnostics::ServerDiagnostics, - server, - state::ServerState, - subscriptions::subscriptions, -}; - -#[derive(Serialize)] -pub struct ServerMetrics { - pub server: Server, - pub diagnostics: ServerDiagnostics, - pub config: Option, - pub connections: Vec, - pub runtime_components: Vec, -} - -#[derive(Serialize)] -pub struct Server { - pub start_time: String, - pub uptime_ms: i64, -} - -#[derive(Serialize)] -pub struct Connection { - pub sessions: Vec, - // creation time - // state - pub client_address: String, - pub transport_state: String, -} - -#[derive(Serialize)] -pub struct Session { - pub id: String, - pub session_activated: bool, - pub session_terminated: bool, - pub session_terminated_at: String, - pub subscriptions: subscriptions::Metrics, -} - -impl Default for ServerMetrics { - fn default() -> Self { - // Sample metrics - Self { - server: Server { - start_time: String::new(), - uptime_ms: 0, - }, - diagnostics: ServerDiagnostics::default(), - config: None, - connections: Vec::new(), - runtime_components: Vec::new(), - } - } -} - -impl ServerMetrics { - pub fn new() -> ServerMetrics { - Self::default() - } - - pub fn set_server_info(&mut self, server: &server::Server) { - let server_state = server.server_state(); - let config = { - let server_state = trace_read_lock!(server_state); - server_state.config.clone() - }; - let mut config = { - let config = trace_read_lock!(config); - config.clone() - }; - // For security, blank out user tokens - config.user_tokens.clear(); - config.user_tokens.insert( - String::new(), - config::ServerUserToken { - user: String::from("User identity tokens have been removed"), - pass: None, - x509: None, - thumbprint: None, - }, - ); - self.config = Some(config); - } - - // Update the server state metrics (uptime etc.) - pub fn update_from_server_state(&mut self, server_state: &ServerState) { - let start_time = &server_state.start_time; - let now = DateTime::now(); - - self.server.start_time = start_time.as_chrono().to_rfc3339(); - - // Take a snapshot of the diagnostics - { - let diagnostics = trace_read_lock!(server_state.diagnostics); - self.diagnostics = diagnostics.clone(); - } - - let elapsed = now - .as_chrono() - .signed_duration_since(start_time.as_chrono()); - self.server.uptime_ms = elapsed.num_milliseconds(); - } - - // Update the connection metrics which includes susbcriptions and monitored items - pub fn update_from_connections(&mut self, connections: server::Connections) { - self.runtime_components = runtime_components!(); - self.connections = connections - .iter() - .map(|c| { - // Carefully extract info while minimizing chance of deadlock - let (client_address, transport_state, session_manager) = { - let connection = trace_read_lock!(c); - let client_address = - if let Some(ref client_address) = connection.client_address() { - format!("{:?}", client_address) - } else { - String::new() - }; - let transport_state = match connection.state() { - TransportState::New => "New".to_string(), - TransportState::WaitingHello => "WaitingHello".to_string(), - TransportState::ProcessMessages => "ProcessMessages".to_string(), - TransportState::Finished(status_code) => { - format!("Finished({})", status_code) - } - }; - ( - client_address, - transport_state, - connection.session_manager(), - ) - }; - let session_manager = trace_read_lock!(session_manager); - let sessions = session_manager - .sessions - .iter() - .map(|(_, session)| { - let session = trace_read_lock!(session); - let id = session.session_id().to_string(); - let session_activated = session.is_activated(); - let session_terminated = session.is_terminated(); - let session_terminated_at = if session.is_terminated() { - session.terminated_at().to_rfc3339() - } else { - String::new() - }; - let subscriptions = session.subscriptions().metrics(); - Session { - id, - session_activated, - session_terminated, - session_terminated_at, - subscriptions, - } - }) - .collect(); - - // session.subscriptions.iterate ... - Connection { - client_address, - transport_state, - sessions, - } - }) - .collect(); - } -} diff --git a/lib/src/server/mod.rs b/lib/src/server/mod.rs index dd35447c3..989c16937 100644 --- a/lib/src/server/mod.rs +++ b/lib/src/server/mod.rs @@ -1,99 +1,36 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! The OPC UA Server module contains the server side functionality - address space, services, -//! server security, session management, local discovery server registration and subscriptions. -//! -//! # Usage -//! -//! An implementation will usually start by building a [`ServerConfig`], either -//! from a configuration file, or through code. Then it will construct a [`Server`], initialise -//! its address space, and then run it. -//! -//! [`Server`]: ./server/struct.Server.html -//! [`ServerConfig`]: ./config/struct.ServerConfig.html -//! -//! # Example -//! -//! This is a very simple server which runs with the default address space on the default port. -//! -//! ```no_run -//! use opcua::server::prelude::*; -//! -//! fn main() { -//! let server: Server = ServerBuilder::new_sample().server().unwrap(); -//! server.run(); -//! } -//! ``` - -/// Returns true of the Option> is None or the vec inside is empty. This is particularly -/// used by services where the spec says "All Services with arrays of operations in the request -/// shall return a bad code in the serviceResult if the array is empty." -macro_rules! is_empty_option_vec { - ( $v: expr ) => { - $v.is_none() || $v.as_ref().unwrap().is_empty() - }; -} - -mod identity_token; -mod services; - -#[cfg(feature = "discovery-server-registration")] -mod discovery; - -mod session_diagnostics; - -#[cfg(feature = "http")] -pub mod http; - pub mod address_space; -pub mod builder; -pub mod callbacks; -pub mod comms; -pub mod config; -pub mod continuation_point; -pub mod diagnostics; -#[macro_use] -pub mod events; -pub mod historical; -pub mod metrics; -pub mod server; -pub mod session; -pub mod state; -pub mod subscriptions; -pub mod util; +pub mod authenticator; +mod builder; +mod config; +mod discovery; +mod events; +mod identity_token; +mod info; +pub mod node_manager; +mod server; +mod server_handle; +mod session; +mod subscriptions; +mod transport; -pub mod prelude { - //! Provides a way to use most types and functions commonly used by server implementations from a - //! single use statement. - pub use crate::core::prelude::*; - pub use crate::crypto::*; - pub use crate::server::{ - address_space::types::*, - address_space::{AccessLevel, EventNotifier, UserAccessLevel}, - builder::*, - callbacks::*, - config::*, - events::event::*, - historical::*, - server::*, - subscriptions::*, - util::*, - }; - pub use crate::types::service_types::*; - pub use crate::types::status_code::StatusCode; - pub use crate::types::*; -} +pub use builder::ServerBuilder; +pub use config::*; +pub use events::*; +pub use server::Server; +pub use server_handle::ServerHandle; +pub use session::continuation_points::ContinuationPoint; +pub use subscriptions::{ + CreateMonitoredItem, MonitoredItem, MonitoredItemHandle, SessionSubscriptions, Subscription, + SubscriptionCache, SubscriptionState, +}; +/// Contains constaints for default configuration values. +/// These are for the most part possible to override through server configuration. pub mod constants { - //! Provides constants that govern the internal workings of the server implementation. /// The default hello timeout period in seconds pub const DEFAULT_HELLO_TIMEOUT_SECONDS: u32 = 5; /// Default OPC UA server port for this implementation pub const DEFAULT_RUST_OPC_UA_SERVER_PORT: u16 = 4855; - /// Default maximum number of subscriptions in a session - pub const DEFAULT_MAX_SUBSCRIPTIONS: usize = 100; /// Default maximum number of monitored items per subscription pub const DEFAULT_MAX_MONITORED_ITEMS_PER_SUB: usize = 1000; /// Default, well known address for TCP discovery server @@ -106,46 +43,80 @@ pub mod constants { /// publish interval cannot be less than this. pub const SUBSCRIPTION_TIMER_RATE_MS: u64 = 100; /// Minimum publishing interval for subscriptions - pub const MIN_PUBLISHING_INTERVAL: f64 = (SUBSCRIPTION_TIMER_RATE_MS as f64) / 1000.0; + pub const MIN_PUBLISHING_INTERVAL_MS: f64 = SUBSCRIPTION_TIMER_RATE_MS as f64; /// Minimum sampling interval on monitored items - pub const MIN_SAMPLING_INTERVAL: f64 = (SUBSCRIPTION_TIMER_RATE_MS as f64) / 1000.0; + pub const MIN_SAMPLING_INTERVAL_MS: f64 = SUBSCRIPTION_TIMER_RATE_MS as f64; /// Maximum data change queue allowed by clients on monitored items pub const MAX_DATA_CHANGE_QUEUE_SIZE: usize = 10; - /// The default size of preallocated vecs of monitored items per subscription - pub const DEFAULT_MONITORED_ITEM_CAPACITY: usize = 100; - /// Interval to check for HELLO timeout in millis. This can be fairly coarse because it's not - /// something that requires huge accuracy. - pub const HELLO_TIMEOUT_POLL_MS: u64 = 500; /// Maximum time in MS that a session can be inactive before a timeout - pub const MAX_SESSION_TIMEOUT: f64 = 60000f64; - /// Maximum size in bytes that a request message is allowed to be - pub const MAX_REQUEST_MESSAGE_SIZE: u32 = 32768; + pub const MAX_SESSION_TIMEOUT: u64 = 60_000; /// Default keep alive count pub const DEFAULT_KEEP_ALIVE_COUNT: u32 = 10; /// Maximum keep alive count pub const MAX_KEEP_ALIVE_COUNT: u32 = 30000; /// Maximum browse continuation points - pub const MAX_BROWSE_CONTINUATION_POINTS: usize = 20; + pub const MAX_BROWSE_CONTINUATION_POINTS: usize = 5000; /// Maximum history continuation points - pub const MAX_HISTORY_CONTINUATION_POINTS: usize = 10; + pub const MAX_HISTORY_CONTINUATION_POINTS: usize = 500; /// Maximum query continuation points - pub const MAX_QUERY_CONTINUATION_POINTS: usize = 10; + pub const MAX_QUERY_CONTINUATION_POINTS: usize = 500; /// Maximum number of nodes in a TranslateBrowsePathsToNodeIdsRequest - pub const MAX_NODES_PER_TRANSLATE_BROWSE_PATHS_TO_NODE_IDS: usize = 10; - pub const MAX_NODES_PER_READ: usize = 50; - pub const MAX_NODES_PER_WRITE: usize = 10; - pub const MAX_NODES_PER_METHOD_CALL: usize = 10; - pub const MAX_NODES_PER_BROWSE: usize = 50; - pub const MAX_NODES_PER_REGISTER_NODES: usize = 10; - /// Maximum number of nodes / references per node manaument operation - pub const MAX_NODES_PER_NODE_MANAGEMENT: usize = 100; - pub const MAX_MONITORED_ITEMS_PER_CALL: usize = 10; - pub const MAX_NODES_PER_HISTORY_READ_DATA: usize = 10; - pub const MAX_NODES_PER_HISTORY_READ_EVENTS: usize = 10; - pub const MAX_NODES_PER_HISTORY_UPDATE_DATA: usize = 10; - pub const MAX_NODES_PER_HISTORY_UPDATE_EVENTS: usize = 10; -} + pub const MAX_NODES_PER_TRANSLATE_BROWSE_PATHS_TO_NODE_IDS: usize = 100; + /// Maximum number of ReadValueIds in a Read request. + pub const MAX_NODES_PER_READ: usize = 10000; + /// Maximum number of WriteValues in a Write request. + pub const MAX_NODES_PER_WRITE: usize = 10000; + /// Maximum number of method calls in a Call request. + pub const MAX_NODES_PER_METHOD_CALL: usize = 100; + /// Maximum number of nodes in a Browse or BrowseNext request. + pub const MAX_NODES_PER_BROWSE: usize = 1000; + /// Maximum number of nodes per register/deregister request. + pub const MAX_NODES_PER_REGISTER_NODES: usize = 1000; + /// Maximum number of nodes per node manaument operation + pub const MAX_NODES_PER_NODE_MANAGEMENT: usize = 1000; + /// Maximum number of references per reference management operation. + pub const MAX_REFERENCES_PER_REFERENCE_MANAGEMENT: usize = 1000; + /// Maximum number of monitored items per operation. + pub const MAX_MONITORED_ITEMS_PER_CALL: usize = 1000; + /// Maximum number of nodes per history read for data. + pub const MAX_NODES_PER_HISTORY_READ_DATA: usize = 100; + /// Maixmum number of nodes per history read for events. + pub const MAX_NODES_PER_HISTORY_READ_EVENTS: usize = 100; + /// Maximum number of nodes per history update call. Not separate constants + /// for data and events because update may mix the two. + pub const MAX_NODES_PER_HISTORY_UPDATE: usize = 100; + /// Maximum number of node descriptions per query call. + pub const MAX_NODE_DESCS_PER_QUERY: usize = 100; + /// Maximum number of references to return per query data set. + pub const MAX_REFERENCES_QUERY_RETURN: usize = 100; + /// Maximum number of data sets to return per query. + pub const MAX_DATA_SETS_QUERY_RETURN: usize = 1000; + /// Maximum number of subscriptions per subscription management call, where applicable. + pub const MAX_SUBSCRIPTIONS_PER_CALL: usize = 10; -#[cfg(test)] -mod tests; + /// Maximum number of sessions active on a server. + pub const MAX_SESSIONS: usize = 20; + /// Maximum number of references per node during Browse or BrowseNext. + pub const MAX_REFERENCES_PER_BROWSE_NODE: usize = 1000; + + /// Maximum number of subscriptions per session. + pub const MAX_SUBSCRIPTIONS_PER_SESSION: usize = 10; + /// Maximum number of pending publish requests per session before further requests are rejected. + pub const MAX_PENDING_PUBLISH_REQUESTS: usize = 20; + /// Maximum number of pending publish requsts per subscription. The smaller of this * number of subscriptions + /// and max_pending_publish_requests is used. + pub const MAX_PUBLISH_REQUESTS_PER_SUBSCRIPTION: usize = 4; + + /// Default publish timeout in milliseconds. + pub const DEFAULT_PUBLISH_TIMEOUT_MS: u64 = 30000; + /// Maximum number of notifications per publish, can be set lower by the client. + pub const MAX_NOTIFICATIONS_PER_PUBLISH: u64 = 0; + /// Maximum number of queued notifications. Any notifications beyond this are dropped. + pub const MAX_QUEUED_NOTIFICATIONS: usize = 20; + + /// Receive buffer size default. + pub const RECEIVE_BUFFER_SIZE: usize = std::u16::MAX as usize; + /// Send buffer size default. + pub const SEND_BUFFER_SIZE: usize = std::u16::MAX as usize; +} diff --git a/lib/src/server/node_manager/attributes.rs b/lib/src/server/node_manager/attributes.rs new file mode 100644 index 000000000..d0b6cd4ec --- /dev/null +++ b/lib/src/server/node_manager/attributes.rs @@ -0,0 +1,205 @@ +use crate::types::{ + AttributeId, DataValue, DateTime, NodeId, NumericRange, QualifiedName, ReadValueId, StatusCode, + WriteValue, +}; + +#[derive(Debug, Clone)] +/// Parsed and validated version of a raw ReadValueId from OPC-UA. +pub struct ParsedReadValueId { + pub node_id: NodeId, + pub attribute_id: AttributeId, + pub index_range: NumericRange, + pub data_encoding: QualifiedName, +} + +impl ParsedReadValueId { + /// Try to parse from a `ReadValueId`. + pub fn parse(val: ReadValueId) -> Result { + let attribute_id = AttributeId::from_u32(val.attribute_id) + .map_err(|_| StatusCode::BadAttributeIdInvalid)?; + let index_range: NumericRange = val + .index_range + .as_ref() + .parse() + .map_err(|_| StatusCode::BadIndexRangeInvalid)?; + + Ok(Self { + node_id: val.node_id, + attribute_id, + index_range, + // TODO: Do something here? Do we actually care about supporting custom data encodings? + data_encoding: val.data_encoding, + }) + } + + /// Create a "null" `ParsedReadValueId`, with no node ID. + pub fn null() -> Self { + Self { + node_id: NodeId::null(), + attribute_id: AttributeId::NodeId, + index_range: NumericRange::None, + data_encoding: QualifiedName::null(), + } + } + + /// Check whether this `ParsedReadValueId` is null. + pub fn is_null(&self) -> bool { + self.node_id.is_null() + } +} + +impl Default for ParsedReadValueId { + fn default() -> Self { + Self::null() + } +} + +#[derive(Debug)] +/// Container for a single item in a `Read` service call. +pub struct ReadNode { + node: ParsedReadValueId, + pub(crate) result: DataValue, +} + +impl ReadNode { + /// Create a `ReadNode` from a `ReadValueId`. + pub(crate) fn new(node: ReadValueId) -> Self { + let mut status = StatusCode::BadNodeIdUnknown; + + let node = match ParsedReadValueId::parse(node) { + Ok(r) => r, + Err(e) => { + status = e; + ParsedReadValueId::null() + } + }; + + Self { + node, + result: DataValue { + status: Some(status), + server_timestamp: Some(DateTime::now()), + ..Default::default() + }, + } + } + + /// Get the current result status code. + pub fn status(&self) -> StatusCode { + self.result.status() + } + + /// Get the node/attribute pair to read. + pub fn node(&self) -> &ParsedReadValueId { + &self.node + } + + /// Set the result of this read operation. + pub fn set_result(&mut self, result: DataValue) { + self.result = result; + } + + /// Set the result of this read operation to an error with no value or + /// timestamp. Use this not if the value is an error, but if the read + /// failed. + pub fn set_error(&mut self, status: StatusCode) { + self.result = DataValue { + status: Some(status), + server_timestamp: Some(DateTime::now()), + ..Default::default() + } + } + + pub(crate) fn take_result(self) -> DataValue { + self.result + } +} + +#[derive(Debug, Clone)] +/// Parsed and validated version of the raw OPC-UA `WriteValue`. +pub struct ParsedWriteValue { + pub node_id: NodeId, + pub attribute_id: AttributeId, + pub index_range: NumericRange, + pub value: DataValue, +} + +impl ParsedWriteValue { + /// Try to parse from a `WriteValue`. + pub fn parse(val: WriteValue) -> Result { + let attribute_id = AttributeId::from_u32(val.attribute_id) + .map_err(|_| StatusCode::BadAttributeIdInvalid)?; + let index_range: NumericRange = val + .index_range + .as_ref() + .parse() + .map_err(|_| StatusCode::BadIndexRangeInvalid)?; + + Ok(Self { + node_id: val.node_id, + attribute_id, + index_range, + value: val.value, + }) + } + + /// Create a "null" `ParsedWriteValue`. + pub fn null() -> Self { + Self { + node_id: NodeId::null(), + attribute_id: AttributeId::NodeId, + index_range: NumericRange::None, + value: DataValue::null(), + } + } + + /// Check if this `ParsedWriteValue` is null. + pub fn is_null(&self) -> bool { + self.node_id.is_null() + } +} + +impl Default for ParsedWriteValue { + fn default() -> Self { + Self::null() + } +} + +/// Container for a single item in a `Write` service call. +#[derive(Debug)] +pub struct WriteNode { + value: ParsedWriteValue, + status: StatusCode, +} + +impl WriteNode { + /// Create a `WriteNode` from a raw OPC-UA `WriteValue`. + pub(crate) fn new(value: WriteValue) -> Self { + let mut status = StatusCode::BadNodeIdUnknown; + + let value = match ParsedWriteValue::parse(value) { + Ok(r) => r, + Err(e) => { + status = e; + ParsedWriteValue::null() + } + }; + + Self { value, status } + } + + /// Get the current status. + pub fn status(&self) -> StatusCode { + self.status + } + + /// Set the status code result of this operation. + pub fn set_status(&mut self, status: StatusCode) { + self.status = status; + } + + /// Get the value to write. + pub fn value(&self) -> &ParsedWriteValue { + &self.value + } +} diff --git a/lib/src/server/node_manager/build.rs b/lib/src/server/node_manager/build.rs new file mode 100644 index 000000000..551131f09 --- /dev/null +++ b/lib/src/server/node_manager/build.rs @@ -0,0 +1,71 @@ +use std::{collections::HashMap, sync::Arc}; + +use crate::server::address_space::AddressSpace; + +use super::{DynNodeManager, NodeManager, ServerContext}; + +pub trait NodeManagerBuilder { + fn build(self: Box, context: ServerContext) -> Arc; +} + +impl NodeManagerBuilder for T +where + T: FnOnce(ServerContext) -> R, +{ + fn build(self: Box, context: ServerContext) -> Arc { + Arc::new(self(context)) + } +} + +/// Utility for handling assignment of namespaces on server startup. +#[derive(Debug, Default)] +pub struct NamespaceMap { + known_namespaces: HashMap, +} + +impl NamespaceMap { + pub fn new() -> Self { + let mut known_namespaces = HashMap::new(); + known_namespaces.insert("http://opcfoundation.org/UA/".to_owned(), 0u16); + + Self { known_namespaces } + } + + pub fn add_namespace(&mut self, namespace: &str) -> u16 { + if let Some(ns) = self.known_namespaces.get(namespace) { + return *ns; + } + let max = self + .known_namespaces + .iter() + .map(|kv| *kv.1) + .max() + .unwrap_or_default(); + self.known_namespaces.insert(namespace.to_owned(), max + 1); + + max + 1 + } + + pub fn known_namespaces(&self) -> &HashMap { + &self.known_namespaces + } + + pub fn get_index(&self, ns: &str) -> Option { + self.known_namespaces.get(ns).copied() + } +} + +pub fn add_namespaces( + context: &ServerContext, + address_space: &mut AddressSpace, + namespaces: &[&str], +) -> Vec { + let mut type_tree = context.type_tree.write(); + let mut res = Vec::new(); + for ns in namespaces { + let idx = type_tree.namespaces_mut().add_namespace(ns); + address_space.add_namespace(ns, idx); + res.push(idx); + } + res +} diff --git a/lib/src/server/node_manager/context.rs b/lib/src/server/node_manager/context.rs new file mode 100644 index 000000000..cb6fe8202 --- /dev/null +++ b/lib/src/server/node_manager/context.rs @@ -0,0 +1,63 @@ +use std::sync::Arc; + +use crate::{ + server::{ + authenticator::{AuthManager, UserToken}, + info::ServerInfo, + session::instance::Session, + SubscriptionCache, + }, + sync::RwLock, + types::{BrowseDescriptionResultMask, NodeId}, +}; + +use super::{ + view::{ExternalReferenceRequest, NodeMetadata}, + NodeManagers, TypeTree, +}; + +#[derive(Clone)] +/// Context object passed during writes, contains useful context the node +/// managers can use to execute service calls. +pub struct RequestContext { + /// The full session object for the session responsible for this service call. + pub session: Arc>, + /// The session ID for the session responsible for this service call. + pub session_id: u32, + /// The global `AuthManager` object. + pub authenticator: Arc, + /// The current user token. + pub token: UserToken, + /// Index of the current node manager. + pub current_node_manager_index: usize, + /// Global type tree object. + pub type_tree: Arc>, + /// Subscription cache, containing all subscriptions on the server. + pub subscriptions: Arc, + /// Server info object, containing configuration and other shared server + /// state. + pub info: Arc, +} + +/// Resolve a list of references. +pub(crate) async fn resolve_external_references( + context: &RequestContext, + node_managers: &NodeManagers, + references: &[(&NodeId, BrowseDescriptionResultMask)], +) -> Vec> { + let mut res: Vec<_> = references + .into_iter() + .map(|(n, mask)| ExternalReferenceRequest::new(n, *mask)) + .collect(); + + for nm in node_managers.iter() { + let mut items: Vec<_> = res + .iter_mut() + .filter(|r| nm.owns_node(r.node_id())) + .collect(); + + nm.resolve_external_references(context, &mut items).await; + } + + res.into_iter().map(|r| r.into_inner()).collect() +} diff --git a/lib/src/server/node_manager/history.rs b/lib/src/server/node_manager/history.rs new file mode 100644 index 000000000..5121d5f2e --- /dev/null +++ b/lib/src/server/node_manager/history.rs @@ -0,0 +1,282 @@ +use crate::{ + crypto::random, + server::session::{continuation_points::ContinuationPoint, instance::Session}, + types::{ + BinaryEncoder, ByteString, DecodingOptions, DeleteAtTimeDetails, DeleteEventDetails, + DeleteRawModifiedDetails, ExtensionObject, HistoryData, HistoryEvent, HistoryModifiedData, + HistoryReadResult, HistoryReadValueId, HistoryUpdateResult, NodeId, NumericRange, ObjectId, + QualifiedName, ReadAnnotationDataDetails, ReadAtTimeDetails, ReadEventDetails, + ReadProcessedDetails, ReadRawModifiedDetails, StatusCode, UpdateDataDetails, + UpdateEventDetails, UpdateStructureDataDetails, + }, +}; + +/// Container for a single node in a history read request. +pub struct HistoryNode { + node_id: NodeId, + index_range: NumericRange, + data_encoding: QualifiedName, + input_continuation_point: Option, + next_continuation_point: Option, + result: Option, + status: StatusCode, +} + +pub(crate) enum HistoryReadDetails { + RawModified(ReadRawModifiedDetails), + AtTime(ReadAtTimeDetails), + Processed(ReadProcessedDetails), + Events(ReadEventDetails), + Annotations(ReadAnnotationDataDetails), +} + +impl HistoryReadDetails { + pub fn from_extension_object( + obj: ExtensionObject, + decoding_options: &DecodingOptions, + ) -> Result { + let object_id = obj + .object_id() + .map_err(|_| StatusCode::BadHistoryOperationInvalid)?; + match object_id { + ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary => { + Ok(Self::RawModified(obj.decode_inner(decoding_options)?)) + } + ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary => { + Ok(Self::AtTime(obj.decode_inner(decoding_options)?)) + } + ObjectId::ReadProcessedDetails_Encoding_DefaultBinary => { + Ok(Self::Processed(obj.decode_inner(decoding_options)?)) + } + ObjectId::ReadEventDetails_Encoding_DefaultBinary => { + Ok(Self::Events(obj.decode_inner(decoding_options)?)) + } + ObjectId::ReadAnnotationDataDetails_Encoding_DefaultBinary => { + Ok(Self::Annotations(obj.decode_inner(decoding_options)?)) + } + _ => Err(StatusCode::BadHistoryOperationInvalid), + } + } +} + +/// Details object for history updates. +#[derive(Debug, Clone)] +pub enum HistoryUpdateDetails { + UpdateData(UpdateDataDetails), + UpdateStructureData(UpdateStructureDataDetails), + UpdateEvent(UpdateEventDetails), + DeleteRawModified(DeleteRawModifiedDetails), + DeleteAtTime(DeleteAtTimeDetails), + DeleteEvent(DeleteEventDetails), +} + +impl HistoryUpdateDetails { + /// Try to create a `HistoryUpdateDetails` object from an extension object. + pub fn from_extension_object( + obj: ExtensionObject, + decoding_options: &DecodingOptions, + ) -> Result { + let object_id = obj + .object_id() + .map_err(|_| StatusCode::BadHistoryOperationInvalid)?; + match object_id { + ObjectId::UpdateDataDetails_Encoding_DefaultBinary => { + Ok(Self::UpdateData(obj.decode_inner(decoding_options)?)) + } + ObjectId::UpdateStructureDataDetails_Encoding_DefaultBinary => Ok( + Self::UpdateStructureData(obj.decode_inner(decoding_options)?), + ), + ObjectId::UpdateEventDetails_Encoding_DefaultBinary => { + Ok(Self::UpdateEvent(obj.decode_inner(decoding_options)?)) + } + ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary => { + Ok(Self::DeleteRawModified(obj.decode_inner(decoding_options)?)) + } + ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary => { + Ok(Self::DeleteAtTime(obj.decode_inner(decoding_options)?)) + } + ObjectId::DeleteEventDetails_Encoding_DefaultBinary => { + Ok(Self::DeleteEvent(obj.decode_inner(decoding_options)?)) + } + _ => Err(StatusCode::BadHistoryOperationInvalid), + } + } + + /// Get the node ID of the details object, independent of type. + pub fn node_id(&self) -> &NodeId { + match self { + HistoryUpdateDetails::UpdateData(d) => &d.node_id, + HistoryUpdateDetails::UpdateStructureData(d) => &d.node_id, + HistoryUpdateDetails::UpdateEvent(d) => &d.node_id, + HistoryUpdateDetails::DeleteRawModified(d) => &d.node_id, + HistoryUpdateDetails::DeleteAtTime(d) => &d.node_id, + HistoryUpdateDetails::DeleteEvent(d) => &d.node_id, + } + } +} + +/// Trait for values storable as history data. +pub trait HistoryResult: BinaryEncoder + Sized { + /// The object ID of the object encoding. + const OBJECT_ID: ObjectId; + + /// Return an extension object containing the encoded data for the current object. + fn as_extension_object(&self) -> ExtensionObject { + ExtensionObject::from_encodable(Self::OBJECT_ID, self) + } +} + +impl HistoryResult for HistoryData { + const OBJECT_ID: ObjectId = ObjectId::HistoryData_Encoding_DefaultBinary; +} +impl HistoryResult for HistoryModifiedData { + const OBJECT_ID: ObjectId = ObjectId::HistoryModifiedData_Encoding_DefaultBinary; +} +impl HistoryResult for HistoryEvent { + const OBJECT_ID: ObjectId = ObjectId::HistoryEvent_Encoding_DefaultBinary; +} +// impl HistoryResult for HistoryModifiedEvent {} + +impl HistoryNode { + pub(crate) fn new( + node: HistoryReadValueId, + is_events: bool, + cp: Option, + ) -> Self { + let mut status = StatusCode::BadNodeIdUnknown; + let index_range = match node.index_range.as_ref().parse::() { + Err(_) => { + status = StatusCode::BadIndexRangeInvalid; + NumericRange::None + } + Ok(r) => r, + }; + + if !matches!(index_range, NumericRange::None) && is_events { + status = StatusCode::BadIndexRangeDataMismatch; + } + + Self { + node_id: node.node_id, + index_range, + data_encoding: node.data_encoding, + input_continuation_point: cp, + next_continuation_point: None, + result: None, + status, + } + } + + /// Get the node ID to read history from. + pub fn node_id(&self) -> &NodeId { + &self.node_id + } + + /// Get the index range to read. + pub fn index_range(&self) -> &NumericRange { + &self.index_range + } + + /// Get the specified data encoding to read. + pub fn data_encoding(&self) -> &QualifiedName { + &self.data_encoding + } + + /// Get the current continuation point. + pub fn continuation_point(&self) -> Option<&ContinuationPoint> { + self.input_continuation_point.as_ref() + } + + /// Get the next continuation point. + pub fn next_continuation_point(&self) -> Option<&ContinuationPoint> { + self.next_continuation_point.as_ref() + } + + /// Set the next continuation point. + pub fn set_next_continuation_point(&mut self, continuation_point: Option) { + self.next_continuation_point = continuation_point; + } + + /// Set the result to some history data object. + pub fn set_result(&mut self, result: &T) { + self.result = Some(result.as_extension_object()); + } + + /// Set the result status. + pub fn set_status(&mut self, status: StatusCode) { + self.status = status; + } + + /// Get the current result status. + pub fn status(&self) -> StatusCode { + self.status + } + + pub(crate) fn into_result(mut self, session: &mut Session) -> HistoryReadResult { + let cp = match self.next_continuation_point { + Some(p) => { + let id = random::byte_string(6); + if session.add_history_continuation_point(&id, p).is_err() { + self.status = StatusCode::BadNoContinuationPoints; + ByteString::null() + } else { + id + } + } + None => ByteString::null(), + }; + + HistoryReadResult { + status_code: self.status, + continuation_point: cp, + history_data: self.result.unwrap_or_else(|| ExtensionObject::null()), + } + } +} + +/// History update details for one node. +pub struct HistoryUpdateNode { + details: HistoryUpdateDetails, + status: StatusCode, + operation_results: Option>, +} + +impl HistoryUpdateNode { + pub(crate) fn new(details: HistoryUpdateDetails) -> Self { + Self { + details, + status: StatusCode::BadNodeIdUnknown, + operation_results: None, + } + } + + /// Set the result status of this history operation. + pub fn set_status(&mut self, status: StatusCode) { + self.status = status; + } + + /// Get the current status. + pub fn status(&self) -> StatusCode { + self.status + } + + /// Set the operation results. If present the length must match + /// the length of the entries in the history update details. + pub fn set_operation_results(&mut self, operation_results: Option>) { + self.operation_results = operation_results; + } + + pub(crate) fn into_result(self) -> HistoryUpdateResult { + HistoryUpdateResult { + diagnostic_infos: None, + status_code: self.status, + operation_results: self.operation_results, + } + } + + /// Get a reference to the history update details describing the history update + /// to execute. + pub fn details(&self) -> &HistoryUpdateDetails { + &self.details + } +} diff --git a/lib/src/server/node_manager/memory/core.rs b/lib/src/server/node_manager/memory/core.rs new file mode 100644 index 000000000..5a542a83f --- /dev/null +++ b/lib/src/server/node_manager/memory/core.rs @@ -0,0 +1,377 @@ +use std::time::Duration; + +use async_trait::async_trait; +use hashbrown::HashMap; + +use crate::{ + server::{ + address_space::{read_node_value, AddressSpace}, + node_manager::{ + NodeManagersRef, ParsedReadValueId, RequestContext, ServerContext, SyncSampler, + }, + subscriptions::CreateMonitoredItem, + ServerCapabilities, + }, + sync::RwLock, + types::{ + AccessRestrictionType, DataValue, IdType, Identifier, NumericRange, ObjectId, + ReferenceTypeId, StatusCode, TimestampsToReturn, VariableId, Variant, + }, +}; + +use super::{ + InMemoryNodeManager, InMemoryNodeManagerImpl, InMemoryNodeManagerImplBuilder, NamespaceMetadata, +}; + +/// Node manager impl for the core namespace. +pub struct CoreNodeManagerImpl { + sampler: SyncSampler, + node_managers: NodeManagersRef, +} + +/// Node manager for the core namespace. +pub type CoreNodeManager = InMemoryNodeManager; + +pub struct CoreNodeManagerBuilder; + +impl InMemoryNodeManagerImplBuilder for CoreNodeManagerBuilder { + type Impl = CoreNodeManagerImpl; + + fn build(self, context: ServerContext, address_space: &mut AddressSpace) -> Self::Impl { + { + let mut type_tree = context.type_tree.write(); + address_space.add_namespace( + "http://opcfoundation.org/UA/", + type_tree + .namespaces_mut() + .add_namespace("http://opcfoundation.org/UA/"), + ); + } + + crate::server::address_space::populate_address_space(address_space); + CoreNodeManagerImpl::new(context.node_managers.clone()) + } +} + +/* +The core node manager serves as an example for how you can create a simple +node manager based on the in-memory node manager. + +In this case the data is largely static, so all we need to really +implement is Read, leaving the responsibility for notifying any subscriptions +of changes to these to the one doing the modifying. +*/ + +#[async_trait] +impl InMemoryNodeManagerImpl for CoreNodeManagerImpl { + async fn init(&self, address_space: &mut AddressSpace, context: ServerContext) { + self.add_aggregates(address_space, &context.info.capabilities); + let interval = context + .info + .config + .limits + .subscriptions + .min_sampling_interval_ms + .floor() as u64; + let sampler_interval = if interval > 0 { interval } else { 100 }; + self.sampler.run( + Duration::from_millis(sampler_interval), + context.subscriptions.clone(), + ); + } + + fn namespaces(&self) -> Vec { + vec![NamespaceMetadata { + is_namespace_subset: Some(false), + // TODO: Should be possible to fill this + namespace_publication_date: None, + namespace_version: None, + namespace_uri: "http://opcfoundation.org/UA/".to_owned(), + static_node_id_types: Some(vec![IdType::Numeric]), + namespace_index: 0, + ..Default::default() + }] + } + + fn name(&self) -> &str { + "core" + } + + async fn read_values( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes: &[&ParsedReadValueId], + max_age: f64, + timestamps_to_return: TimestampsToReturn, + ) -> Vec { + let address_space = address_space.read(); + + nodes + .iter() + .map(|n| { + self.read_node_value(context, &address_space, n, max_age, timestamps_to_return) + }) + .collect() + } + + async fn create_value_monitored_items( + &self, + context: &RequestContext, + address_space: &RwLock, + items: &mut [&mut &mut CreateMonitoredItem], + ) { + let address_space = address_space.read(); + for node in items { + let value = self.read_node_value( + context, + &address_space, + node.item_to_monitor(), + 0.0, + node.timestamps_to_return(), + ); + if value.status() != StatusCode::BadAttributeIdInvalid { + node.set_initial_value(value); + } + node.set_status(StatusCode::Good); + } + } +} + +impl CoreNodeManagerImpl { + pub(super) fn new(node_managers: NodeManagersRef) -> Self { + Self { + sampler: SyncSampler::new(), + node_managers, + } + } + + fn read_node_value( + &self, + context: &RequestContext, + address_space: &AddressSpace, + node_to_read: &ParsedReadValueId, + max_age: f64, + timestamps_to_return: TimestampsToReturn, + ) -> DataValue { + let mut result_value = DataValue::null(); + // Check that the read is permitted. + let node = match address_space.validate_node_read(context, node_to_read) { + Ok(n) => n, + Err(e) => { + result_value.status = Some(e); + return result_value; + } + }; + // Try to read a special value, that is obtained from somewhere else. + // A custom node manager might read this from some device, or get them + // in some other way. + + // In this case, the values are largely read from configuration. + if let Some(v) = self.read_server_value(context, node_to_read) { + v + } else { + // If it can't be found, read it from the node hierarchy. + read_node_value(node, context, node_to_read, max_age, timestamps_to_return) + } + } + + fn read_server_value( + &self, + context: &RequestContext, + node: &ParsedReadValueId, + ) -> Option { + if node.node_id.namespace != 0 { + return None; + } + let Identifier::Numeric(identifier) = node.node_id.identifier else { + return None; + }; + let Ok(var_id) = VariableId::try_from(identifier) else { + return None; + }; + + let limits = &context.info.config.limits; + let hist_cap = &context.info.capabilities.history; + + let v: Variant = match var_id { + VariableId::Server_ServerCapabilities_MaxArrayLength => { + (limits.max_array_length as u32).into() + } + VariableId::Server_ServerCapabilities_MaxBrowseContinuationPoints => { + (limits.max_browse_continuation_points as u32).into() + } + VariableId::Server_ServerCapabilities_MaxByteStringLength => { + (limits.max_byte_string_length as u32).into() + } + VariableId::Server_ServerCapabilities_MaxHistoryContinuationPoints => { + (limits.max_history_continuation_points as u32).into() + } + VariableId::Server_ServerCapabilities_MaxQueryContinuationPoints => { + (limits.max_query_continuation_points as u32).into() + } + VariableId::Server_ServerCapabilities_MaxStringLength => { + (limits.max_string_length as u32).into() + } + VariableId::Server_ServerCapabilities_MinSupportedSampleRate => { + (limits.subscriptions.min_sampling_interval_ms as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxMonitoredItemsPerCall => { + (limits.operational.max_monitored_items_per_call as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxNodesPerBrowse => { + (limits.operational.max_nodes_per_browse as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryReadData => { + (limits.operational.max_nodes_per_history_read_data as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryReadEvents => { + (limits.operational.max_nodes_per_history_read_events as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryUpdateData => { + (limits.operational.max_nodes_per_history_update as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryUpdateEvents => { + (limits.operational.max_nodes_per_history_update as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxNodesPerMethodCall => { + (limits.operational.max_nodes_per_method_call as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxNodesPerNodeManagement => { + (limits.operational.max_nodes_per_node_management as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxNodesPerRead => { + (limits.operational.max_nodes_per_read as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxNodesPerRegisterNodes => { + (limits.operational.max_nodes_per_register_nodes as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxNodesPerTranslateBrowsePathsToNodeIds => { + (limits.operational.max_nodes_per_translate_browse_paths_to_node_ids as u32).into() + } + VariableId::Server_ServerCapabilities_OperationLimits_MaxNodesPerWrite => { + (limits.operational.max_nodes_per_write as u32).into() + } + VariableId::Server_ServerCapabilities_ServerProfileArray => { + context.info.capabilities.profiles.clone().into() + } + + // History capabilities + VariableId::HistoryServerCapabilities_AccessHistoryDataCapability => { + hist_cap.access_history_data.into() + } + VariableId::HistoryServerCapabilities_AccessHistoryEventsCapability => { + hist_cap.access_history_events.into() + } + VariableId::HistoryServerCapabilities_DeleteAtTimeCapability => { + hist_cap.delete_at_time.into() + } + VariableId::HistoryServerCapabilities_DeleteEventCapability => { + hist_cap.delete_event.into() + } + VariableId::HistoryServerCapabilities_DeleteRawCapability => { + hist_cap.delete_raw.into() + } + VariableId::HistoryServerCapabilities_InsertAnnotationCapability => { + hist_cap.insert_annotation.into() + } + VariableId::HistoryServerCapabilities_InsertDataCapability => { + hist_cap.insert_data.into() + } + VariableId::HistoryServerCapabilities_InsertEventCapability => { + hist_cap.insert_event.into() + } + VariableId::HistoryServerCapabilities_MaxReturnDataValues => { + hist_cap.max_return_data_values.into() + } + VariableId::HistoryServerCapabilities_MaxReturnEventValues => { + hist_cap.max_return_event_values.into() + } + VariableId::HistoryServerCapabilities_ReplaceDataCapability => { + hist_cap.replace_data.into() + } + VariableId::HistoryServerCapabilities_ReplaceEventCapability => { + hist_cap.replace_event.into() + } + VariableId::HistoryServerCapabilities_ServerTimestampSupported => { + hist_cap.server_timestamp_supported.into() + } + VariableId::HistoryServerCapabilities_UpdateDataCapability => { + hist_cap.update_data.into() + } + VariableId::HistoryServerCapabilities_UpdateEventCapability => { + hist_cap.update_event.into() + } + + // Misc server status + VariableId::Server_ServiceLevel => { + context.info.service_level.load(std::sync::atomic::Ordering::Relaxed).into() + } + + // Namespace metadata + VariableId::OPCUANamespaceMetadata_IsNamespaceSubset => { + false.into() + } + VariableId::OPCUANamespaceMetadata_DefaultAccessRestrictions => { + AccessRestrictionType::None.bits().into() + } + VariableId::OPCUANamespaceMetadata_NamespaceUri => { + "http://opcfoundation.org/UA/".to_owned().into() + } + VariableId::OPCUANamespaceMetadata_StaticNodeIdTypes => { + vec![IdType::Numeric as u8].into() + } + + VariableId::Server_NamespaceArray => { + // This actually calls into other node managers to obtain the value, in fact + // it calls into _this_ node manager as well. + // Be careful to avoid holding exclusive locks in a way that causes a deadlock + // when doing this. Here we hold a read lock on the address space, + // but in this case it doesn't matter. + let nss: HashMap<_, _> = self.node_managers.iter().flat_map(|n| n.namespaces_for_user(context)).map(|ns| (ns.namespace_index, ns.namespace_uri)).collect(); + // Make sure that holes are filled with empty strings, so that the + // namespace array actually has correct indices. + let Some(&max) = nss.keys().max() else { + return None; + }; + let namespaces: Vec<_> = (0..(max + 1)).map(|idx| nss.get(&idx).cloned().unwrap_or_default()).collect(); + namespaces.into() + } + + _ => return None, + }; + + let v = if !matches!(node.index_range, NumericRange::None) { + match v.range_of(node.index_range.clone()) { + Ok(v) => v, + Err(e) => { + return Some(DataValue { + value: None, + status: Some(e), + ..Default::default() + }) + } + } + } else { + v + }; + + Some(DataValue { + value: Some(v), + status: Some(StatusCode::Good), + source_timestamp: Some(**context.info.start_time.load()), + server_timestamp: Some(**context.info.start_time.load()), + ..Default::default() + }) + } + + fn add_aggregates(&self, address_space: &mut AddressSpace, capabilities: &ServerCapabilities) { + for aggregate in &capabilities.history.aggregates { + address_space.insert_reference( + &ObjectId::HistoryServerCapabilities_AggregateFunctions.into(), + &aggregate, + ReferenceTypeId::Organizes, + ) + } + } +} diff --git a/lib/src/server/node_manager/memory/diagnostics.rs b/lib/src/server/node_manager/memory/diagnostics.rs new file mode 100644 index 000000000..d26abff7b --- /dev/null +++ b/lib/src/server/node_manager/memory/diagnostics.rs @@ -0,0 +1,659 @@ +use std::{ + collections::{BTreeMap, VecDeque}, + sync::Arc, + time::Duration, +}; + +use async_trait::async_trait; + +use crate::{ + server::{ + address_space::AccessLevel, + node_manager::{ + as_opaque_node_id, + build::NodeManagerBuilder, + from_opaque_node_id, + view::{AddReferenceResult, NodeMetadata}, + BrowseNode, DynNodeManager, NodeManager, NodeManagersRef, ReadNode, RequestContext, + ServerContext, SyncSampler, TypeTree, + }, + }, + types::{ + AccessLevelExType, AccessRestrictionType, AttributeId, BrowseDirection, DataTypeId, + DataValue, DateTime, ExpandedNodeId, ExtensionObject, IdType, LocalizedText, NodeClass, + NodeId, NumericRange, ObjectId, ObjectTypeId, QualifiedName, ReferenceDescription, + ReferenceTypeId, RolePermissionType, StatusCode, TimestampsToReturn, VariableTypeId, + Variant, + }, +}; + +/// Node manager handling nodes in the server hierarchy that are not part of the +/// core namespace, and that are somehow dynamic. This includes the node for each namespace, +/// session diagnostics, etc. +pub struct DiagnosticsNodeManager { + sampler: SyncSampler, + node_managers: NodeManagersRef, + namespace_index: u16, +} + +/* +The diagnostics node manager is a simple example of a node manager that +obtains its structure from somewhere else. Specifically in this case, +the structure is virtual, and obtained from the current server state. + +In order to allow this the node manager cannot be based on the in-memory node manager, +which allows for static hierarchies only. + +We want to produce consistent node IDs without a cache, so we use opaque node IDs to +make identifiers that describe where to find the data. That way we can handle Read's +of nodes without explicitly storing each node ID. +*/ + +#[derive(Default, Clone, Debug)] +pub struct NamespaceMetadata { + pub default_access_restrictions: AccessRestrictionType, + pub default_role_permissions: Option>, + pub default_user_role_permissions: Option>, + pub is_namespace_subset: Option, + pub namespace_publication_date: Option, + pub namespace_uri: String, + pub namespace_version: Option, + pub static_node_id_types: Option>, + pub static_numeric_node_id_range: Option>, + pub static_string_node_id_pattern: Option, + pub namespace_index: u16, +} + +#[derive(Default)] +struct BrowseContinuationPoint { + nodes: VecDeque, +} + +#[derive(Serialize, Deserialize)] +struct NamespaceNode { + namespace: String, + property: Option, +} + +#[derive(Serialize, Deserialize)] +#[serde(untagged)] +enum DiagnosticsNode { + Namespace(NamespaceNode), +} + +pub struct DiagnosticsNodeManagerBuilder; + +impl NodeManagerBuilder for DiagnosticsNodeManagerBuilder { + fn build(self: Box, context: ServerContext) -> Arc { + Arc::new(DiagnosticsNodeManager::new(context)) + } +} + +impl DiagnosticsNodeManager { + pub(crate) fn new(context: ServerContext) -> Self { + let namespace_index = { + let mut type_tree = context.type_tree.write(); + type_tree + .namespaces_mut() + .add_namespace(context.info.application_uri.as_ref()) + }; + Self { + sampler: SyncSampler::new(), + node_managers: context.node_managers.clone(), + namespace_index, + } + } + + fn namespaces(&self, context: &RequestContext) -> BTreeMap { + self.node_managers + .iter() + .flat_map(move |nm| nm.namespaces_for_user(context)) + .map(|ns| (ns.namespace_uri.clone(), ns)) + .collect() + } + + fn namespace_node_metadata(&self, ns: &NamespaceMetadata) -> NodeMetadata { + NodeMetadata { + node_id: ExpandedNodeId::new( + as_opaque_node_id( + &DiagnosticsNode::Namespace(NamespaceNode { + namespace: ns.namespace_uri.clone(), + property: None, + }), + self.namespace_index, + ) + .unwrap(), + ), + type_definition: ExpandedNodeId::new(ObjectTypeId::NamespaceMetadataType), + browse_name: QualifiedName::new(ns.namespace_index, ns.namespace_uri.clone()), + display_name: LocalizedText::new("", &ns.namespace_uri), + node_class: NodeClass::Object, + } + } + + fn browse_namespaces( + &self, + node_to_browse: &mut BrowseNode, + type_tree: &TypeTree, + namespaces: &BTreeMap, + ) { + // Only hierarchical references in this case, so we can check for that first. + if !matches!( + node_to_browse.browse_direction(), + BrowseDirection::Forward | BrowseDirection::Both + ) { + return; + } + + if !node_to_browse.allows_reference_type(&ReferenceTypeId::HasComponent.into(), type_tree) { + return; + } + + let mut cp = BrowseContinuationPoint::default(); + + for namespace in namespaces.values() { + // Handled by the core node manager + if namespace.namespace_index == 0 { + continue; + } + let metadata = self.namespace_node_metadata(namespace); + let ref_desc = ReferenceDescription { + reference_type_id: ReferenceTypeId::HasComponent.into(), + is_forward: true, + node_id: metadata.node_id, + browse_name: metadata.browse_name, + display_name: metadata.display_name, + node_class: metadata.node_class, + type_definition: metadata.type_definition, + }; + + if let AddReferenceResult::Full(c) = node_to_browse.add(type_tree, ref_desc) { + cp.nodes.push_back(c); + } + } + + if !cp.nodes.is_empty() { + node_to_browse.set_next_continuation_point(Box::new(cp)); + } + } + + fn browse_namespace_metadata_node( + &self, + node_to_browse: &mut BrowseNode, + type_tree: &TypeTree, + meta: &NamespaceMetadata, + ) { + let mut cp = BrowseContinuationPoint::default(); + + if matches!( + node_to_browse.browse_direction(), + BrowseDirection::Forward | BrowseDirection::Both + ) { + if node_to_browse.allows_reference_type(&ReferenceTypeId::HasProperty.into(), type_tree) + && node_to_browse.allows_node_class(NodeClass::Variable) + { + for prop in [ + "DefaultAccessRestrictions", + "DefaultRolePermissions", + "DefaultUserRolePermissions", + "IsNamespaceSubset", + "NamespacePublicationDate", + "NamespaceUri", + "NamespaceVersion", + "StaticNodeIdTypes", + "StaticNumericNodeIdRange", + "StaticStringNodeIdPattern", + ] { + let ref_desc = ReferenceDescription { + reference_type_id: ReferenceTypeId::HasProperty.into(), + is_forward: true, + node_id: ExpandedNodeId::new( + as_opaque_node_id( + &DiagnosticsNode::Namespace(NamespaceNode { + namespace: meta.namespace_uri.clone(), + property: Some(prop.to_owned()), + }), + self.namespace_index, + ) + .unwrap(), + ), + type_definition: ExpandedNodeId::new(VariableTypeId::PropertyType), + browse_name: QualifiedName::new(0, prop), + display_name: LocalizedText::new("", prop), + node_class: NodeClass::Variable, + }; + if let AddReferenceResult::Full(c) = node_to_browse.add(type_tree, ref_desc) { + cp.nodes.push_back(c); + } + } + } + + if node_to_browse + .allows_reference_type(&ReferenceTypeId::HasTypeDefinition.into(), type_tree) + { + let ref_desc = ReferenceDescription { + reference_type_id: ReferenceTypeId::HasTypeDefinition.into(), + is_forward: true, + node_id: ObjectTypeId::NamespaceMetadataType.into(), + browse_name: QualifiedName::new(0, "NamespaceMetadataType"), + display_name: LocalizedText::new("", "NamespaceMetadataType"), + node_class: NodeClass::ObjectType, + type_definition: ExpandedNodeId::null(), + }; + if let AddReferenceResult::Full(c) = node_to_browse.add(type_tree, ref_desc) { + cp.nodes.push_back(c); + } + } + } + + if matches!( + node_to_browse.browse_direction(), + BrowseDirection::Inverse | BrowseDirection::Both + ) { + let ref_desc = ReferenceDescription { + reference_type_id: ReferenceTypeId::HasComponent.into(), + is_forward: false, + node_id: ObjectId::Server_Namespaces.into(), + browse_name: QualifiedName::new(0, "Namespaces"), + display_name: LocalizedText::new("", "Namespaces"), + node_class: NodeClass::Object, + type_definition: ObjectTypeId::NamespacesType.into(), + }; + if let AddReferenceResult::Full(c) = node_to_browse.add(type_tree, ref_desc) { + cp.nodes.push_back(c); + } + } + + if !cp.nodes.is_empty() { + node_to_browse.set_next_continuation_point(Box::new(cp)); + } + } + + fn browse_namespace_property_node( + &self, + node_to_browse: &mut BrowseNode, + type_tree: &TypeTree, + meta: &NamespaceMetadata, + ) { + let mut cp = BrowseContinuationPoint::default(); + + if matches!( + node_to_browse.browse_direction(), + BrowseDirection::Forward | BrowseDirection::Both + ) { + let ref_desc = ReferenceDescription { + reference_type_id: ReferenceTypeId::HasTypeDefinition.into(), + is_forward: true, + node_id: VariableTypeId::PropertyType.into(), + browse_name: QualifiedName::new(0, "PropertyType"), + display_name: LocalizedText::new("", "PropertyType"), + node_class: NodeClass::VariableType, + type_definition: ExpandedNodeId::null(), + }; + if let AddReferenceResult::Full(c) = node_to_browse.add(type_tree, ref_desc) { + cp.nodes.push_back(c); + } + } + + if matches!( + node_to_browse.browse_direction(), + BrowseDirection::Inverse | BrowseDirection::Both + ) { + let metadata = self.namespace_node_metadata(meta); + let ref_desc = ReferenceDescription { + reference_type_id: ReferenceTypeId::HasComponent.into(), + is_forward: false, + node_id: metadata.node_id, + browse_name: metadata.browse_name, + display_name: metadata.display_name, + node_class: metadata.node_class, + type_definition: metadata.type_definition, + }; + + if let AddReferenceResult::Full(c) = node_to_browse.add(type_tree, ref_desc) { + cp.nodes.push_back(c); + } + } + + if !cp.nodes.is_empty() { + node_to_browse.set_next_continuation_point(Box::new(cp)); + } + } + + fn browse_namespace_node( + &self, + node_to_browse: &mut BrowseNode, + type_tree: &TypeTree, + namespaces: &BTreeMap, + ns_node: &NamespaceNode, + ) { + let Some(namespace) = namespaces.get(&ns_node.namespace) else { + node_to_browse.set_status(StatusCode::BadNodeIdUnknown); + return; + }; + + if ns_node.property.is_some() { + self.browse_namespace_property_node(node_to_browse, type_tree, namespace); + } else { + self.browse_namespace_metadata_node(node_to_browse, type_tree, namespace); + } + } + + fn read_namespace_metadata_node( + &self, + start_time: DateTime, + node_to_read: &mut ReadNode, + namespace: &NamespaceMetadata, + ) { + let v: Variant = match node_to_read.node().attribute_id { + AttributeId::NodeId => as_opaque_node_id( + &DiagnosticsNode::Namespace(NamespaceNode { + namespace: namespace.namespace_uri.clone(), + property: None, + }), + self.namespace_index, + ) + .unwrap() + .into(), + AttributeId::NodeClass => (NodeClass::Object as i32).into(), + AttributeId::BrowseName => { + QualifiedName::new(namespace.namespace_index, &namespace.namespace_uri).into() + } + AttributeId::DisplayName => LocalizedText::new("", &namespace.namespace_uri).into(), + AttributeId::EventNotifier => 0u8.into(), + AttributeId::WriteMask | AttributeId::UserWriteMask => 0u32.into(), + _ => { + node_to_read.set_error(StatusCode::BadAttributeIdInvalid); + return; + } + }; + + node_to_read.set_result(DataValue { + value: Some(v), + status: Some(StatusCode::Good), + source_timestamp: Some(start_time.clone()), + source_picoseconds: None, + server_timestamp: Some(start_time.clone()), + server_picoseconds: None, + }); + } + + fn read_namespace_property_node( + &self, + start_time: DateTime, + node_to_read: &mut ReadNode, + namespace: &NamespaceMetadata, + prop: &str, + ) { + if !matches!( + prop, + "DefaultAccessRestrictions" + | "DefaultRolePermissions" + | "DefaultUserRolePermissions" + | "IsNamespaceSubset" + | "NamespacePublicationDate" + | "NamespaceUri" + | "NamespaceVersion" + | "StaticNodeIdTypes" + | "StaticNumericNodeIdRange" + | "StaticStringNodeIdPattern" + ) { + node_to_read.set_error(StatusCode::BadNodeIdUnknown); + return; + } + + let v: Variant = match node_to_read.node().attribute_id { + AttributeId::NodeId => as_opaque_node_id( + &DiagnosticsNode::Namespace(NamespaceNode { + namespace: namespace.namespace_uri.clone(), + property: Some(prop.to_owned()), + }), + self.namespace_index, + ) + .unwrap() + .into(), + AttributeId::NodeClass => (NodeClass::Object as i32).into(), + AttributeId::BrowseName => QualifiedName::new(0, prop).into(), + AttributeId::DisplayName => LocalizedText::new("", prop).into(), + AttributeId::Value => match prop { + "DefaultAccessRestrictions" => namespace.default_access_restrictions.bits().into(), + "DefaultRolePermissions" => namespace + .default_role_permissions + .as_ref() + .map(|r| { + r.iter() + .map(|v| { + ExtensionObject::from_encodable( + ObjectId::RolePermissionType_Encoding_DefaultBinary, + v, + ) + }) + .collect::>() + }) + .into(), + "DefaultUserRolePermissions" => namespace + .default_user_role_permissions + .as_ref() + .map(|r| { + r.iter() + .map(|v| { + ExtensionObject::from_encodable( + ObjectId::RolePermissionType_Encoding_DefaultBinary, + v, + ) + }) + .collect::>() + }) + .into(), + "IsNamespaceSubset" => namespace.is_namespace_subset.into(), + "NamespacePublicationDate" => namespace.namespace_publication_date.into(), + "NamespaceUri" => namespace.namespace_uri.clone().into(), + "NamespaceVersion" => namespace.namespace_version.clone().into(), + "StaticNodeIdTypes" => namespace + .static_node_id_types + .as_ref() + .map(|r| r.iter().map(|v| (*v) as u8).collect::>()) + .into(), + "StaticNumericNodeIdRange" => namespace + .static_numeric_node_id_range + .as_ref() + .map(|r| r.iter().map(|v| v.as_string()).collect::>()) + .into(), + "StaticStringNodeIdPattern" => { + namespace.static_string_node_id_pattern.clone().into() + } + _ => { + node_to_read.set_error(StatusCode::BadNodeIdUnknown); + return; + } + }, + AttributeId::DataType => match prop { + "DefaultAccessRestrictions" => { + Variant::NodeId(Box::new(DataTypeId::AccessRestrictionType.into())) + } + "DefaultRolePermissions" | "DefaultUserRolePermissions" => { + Variant::NodeId(Box::new(DataTypeId::RolePermissionType.into())) + } + "IsNamespaceSubset" => Variant::NodeId(Box::new(DataTypeId::Boolean.into())), + "NamespacePublicationDate" => { + Variant::NodeId(Box::new(DataTypeId::DateTime.into())) + } + "NamespaceUri" | "NamespaceVersion" | "StaticStringNodeIdPattern" => { + Variant::NodeId(Box::new(DataTypeId::String.into())) + } + "StaticNodeIdTypes" => Variant::NodeId(Box::new(DataTypeId::IdType.into())), + "StaticNumericNodeIdRange" => { + Variant::NodeId(Box::new(DataTypeId::NumericRange.into())) + } + _ => { + node_to_read.set_error(StatusCode::BadNodeIdUnknown); + return; + } + }, + AttributeId::ValueRank => match prop { + "DefaultRolePermissions" | "DefaultUserRolePermissions" | "StaticNodeIdTypes" => { + 1.into() + } + _ => (-1).into(), + }, + AttributeId::ArrayDimensions => match prop { + "DefaultRolePermissions" | "DefaultUserRolePermissions" | "StaticNodeIdTypes" => { + vec![0u32].into() + } + _ => Variant::Empty, + }, + AttributeId::AccessLevel | AttributeId::UserAccessLevel => { + AccessLevel::CURRENT_READ.bits().into() + } + AttributeId::AccessLevelEx => AccessLevelExType::CurrentRead.bits().into(), + AttributeId::MinimumSamplingInterval => 0.0.into(), + AttributeId::Historizing => false.into(), + AttributeId::WriteMask | AttributeId::UserWriteMask => 0u32.into(), + _ => { + node_to_read.set_error(StatusCode::BadAttributeIdInvalid); + return; + } + }; + + node_to_read.set_result(DataValue { + value: Some(v), + status: Some(StatusCode::Good), + source_timestamp: Some(start_time.clone()), + source_picoseconds: None, + server_timestamp: Some(start_time.clone()), + server_picoseconds: None, + }); + } + + fn read_namespace_node( + &self, + start_time: DateTime, + node_to_read: &mut ReadNode, + namespaces: &BTreeMap, + ns_node: &NamespaceNode, + ) { + let Some(namespace) = namespaces.get(&ns_node.namespace) else { + node_to_read.set_error(StatusCode::BadNodeIdUnknown); + return; + }; + + if let Some(prop) = &ns_node.property { + self.read_namespace_property_node(start_time, node_to_read, namespace, prop); + } else { + self.read_namespace_metadata_node(start_time, node_to_read, namespace); + } + } +} + +#[async_trait] +impl NodeManager for DiagnosticsNodeManager { + fn owns_node(&self, id: &NodeId) -> bool { + id.namespace == self.namespace_index + } + + fn name(&self) -> &str { + "diagnostics" + } + + fn namespaces_for_user(&self, context: &RequestContext) -> Vec { + vec![NamespaceMetadata { + namespace_uri: context.info.application_uri.as_ref().to_owned(), + is_namespace_subset: Some(false), + static_node_id_types: Some(vec![IdType::Opaque]), + namespace_index: self.namespace_index, + ..Default::default() + }] + } + + async fn init(&self, _type_tree: &mut TypeTree, context: ServerContext) { + let interval = context + .info + .config + .limits + .subscriptions + .min_sampling_interval_ms + .floor() as u64; + let sampler_interval = if interval > 0 { interval } else { 100 }; + self.sampler.run( + Duration::from_millis(sampler_interval), + context.subscriptions.clone(), + ); + } + + async fn browse( + &self, + context: &RequestContext, + nodes_to_browse: &mut [BrowseNode], + ) -> Result<(), StatusCode> { + let mut lazy_namespaces = None::>; + let type_tree = trace_read_lock!(context.type_tree); + + for node in nodes_to_browse { + if let Some(mut point) = node.take_continuation_point::() { + if node.remaining() <= 0 { + break; + } + let Some(ref_desc) = point.nodes.pop_back() else { + break; + }; + // Node is already filtered. + node.add_unchecked(ref_desc); + continue; + } + + if node.node_id().namespace == 0 { + let namespaces = lazy_namespaces.get_or_insert_with(|| self.namespaces(context)); + let Ok(obj_id) = node.node_id().as_object_id() else { + continue; + }; + match obj_id { + ObjectId::Server_Namespaces => { + self.browse_namespaces(node, &type_tree, namespaces); + } + _ => continue, + } + } else if node.node_id().namespace == self.namespace_index { + let Some(node_desc) = from_opaque_node_id::(node.node_id()) else { + node.set_status(StatusCode::BadNodeIdUnknown); + continue; + }; + match node_desc { + DiagnosticsNode::Namespace(ns) => { + let namespaces = + lazy_namespaces.get_or_insert_with(|| self.namespaces(context)); + self.browse_namespace_node(node, &type_tree, namespaces, &ns); + } + } + } + } + + Ok(()) + } + + async fn read( + &self, + context: &RequestContext, + _max_age: f64, + _timestamps_to_return: TimestampsToReturn, + nodes_to_read: &mut [&mut ReadNode], + ) -> Result<(), StatusCode> { + let mut lazy_namespaces = None::>; + let start_time = **context.info.start_time.load(); + + for node in nodes_to_read { + let Some(node_desc) = from_opaque_node_id::(&node.node().node_id) + else { + node.set_error(StatusCode::BadNodeIdUnknown); + continue; + }; + match node_desc { + DiagnosticsNode::Namespace(ns) => { + let namespaces = + lazy_namespaces.get_or_insert_with(|| self.namespaces(context)); + self.read_namespace_node(start_time, node, namespaces, &ns); + } + } + } + Ok(()) + } +} diff --git a/lib/src/server/node_manager/memory/implementation.rs b/lib/src/server/node_manager/memory/implementation.rs new file mode 100644 index 000000000..ea6ffd59f --- /dev/null +++ b/lib/src/server/node_manager/memory/implementation.rs @@ -0,0 +1,358 @@ +use async_trait::async_trait; + +use crate::{ + server::{ + address_space::AddressSpace, + node_manager::{ + AddNodeItem, AddReferenceItem, DeleteNodeItem, DeleteReferenceItem, HistoryNode, + HistoryUpdateNode, MethodCall, MonitoredItemRef, MonitoredItemUpdateRef, + ParsedReadValueId, RegisterNodeItem, RequestContext, ServerContext, WriteNode, + }, + subscriptions::CreateMonitoredItem, + }, + sync::RwLock, + types::{ + DataValue, ExpandedNodeId, MonitoringMode, NodeId, ReadAnnotationDataDetails, + ReadAtTimeDetails, ReadEventDetails, ReadProcessedDetails, ReadRawModifiedDetails, + StatusCode, TimestampsToReturn, + }, +}; + +use super::NamespaceMetadata; + +pub trait InMemoryNodeManagerImplBuilder { + type Impl: InMemoryNodeManagerImpl; + + fn build(self, context: ServerContext, address_space: &mut AddressSpace) -> Self::Impl; +} + +impl InMemoryNodeManagerImplBuilder for T +where + T: FnOnce(ServerContext, &mut AddressSpace) -> R, +{ + type Impl = R; + + fn build(self, context: ServerContext, address_space: &mut AddressSpace) -> Self::Impl { + self(context, address_space) + } +} + +#[async_trait] +#[allow(unused)] +pub trait InMemoryNodeManagerImpl: Send + Sync + 'static { + /// Populate the address space. + async fn init(&self, address_space: &mut AddressSpace, context: ServerContext); + + /// Name of this node manager, for debug purposes. + fn name(&self) -> &str; + + fn namespaces(&self) -> Vec; + + /// Return whether this node should handle requests to create a node + /// for the given parent ID. This is only called if no new node ID is + /// requested, otherwise owns_node is called on the requested node ID. + fn owns_server_events(&self) -> bool { + false + } + + fn handle_new_node(&self, parent_id: &ExpandedNodeId) -> bool { + false + } + + async fn register_nodes( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes: &mut [&mut RegisterNodeItem], + ) -> Result<(), StatusCode> { + for node in nodes { + node.set_registered(true); + } + + Ok(()) + } + + async fn read_values( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes: &[&ParsedReadValueId], + max_age: f64, + timestamps_to_return: TimestampsToReturn, + ) -> Vec { + let address_space = address_space.read(); + nodes + .iter() + .map(|n| address_space.read(context, n, max_age, timestamps_to_return)) + .collect() + } + + /// Create monitored items for the Value attribute, as needed. + /// This should, at the very least, read the current value of the nodes, + /// and set appropriate status on the monitored item request, see + /// default implementation. + /// + /// It may also begin sampling as given by the monitored item request. + async fn create_value_monitored_items( + &self, + context: &RequestContext, + address_space: &RwLock, + items: &mut [&mut &mut CreateMonitoredItem], + ) { + let to_read: Vec<_> = items.iter().map(|r| r.item_to_monitor()).collect(); + let values = self + .read_values( + context, + address_space, + &to_read, + 0.0, + TimestampsToReturn::Both, + ) + .await; + + for (value, node) in values.into_iter().zip(items.into_iter()) { + if value.status() != StatusCode::BadAttributeIdInvalid { + node.set_initial_value(value); + } + node.set_status(StatusCode::Good); + } + } + + /// Create monitored items for events. + /// + /// This does not need to do anything. + async fn create_event_monitored_items( + &self, + context: &RequestContext, + address_space: &RwLock, + items: &mut [&mut &mut CreateMonitoredItem], + ) { + // This is just a no-op by default. + } + + /// Handle the SetMonitoringMode request, to pause or resume sampling. + /// + /// This will only get monitored items for events or value. + async fn set_monitoring_mode( + &self, + context: &RequestContext, + mode: MonitoringMode, + items: &[&MonitoredItemRef], + ) { + } + + /// Handle modification of monitored items, this may adjust + /// sampling intervals or filters, and require action to update background + /// processes. + async fn modify_monitored_items( + &self, + context: &RequestContext, + items: &[&MonitoredItemUpdateRef], + ) { + } + + /// Handle deletion of monitored items. + async fn delete_monitored_items(&self, context: &RequestContext, items: &[&MonitoredItemRef]) {} + + async fn unregister_nodes( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes: &[&NodeId], + ) -> Result<(), StatusCode> { + // Again, just do nothing + Ok(()) + } + + /// Perform the history read raw modified service. This should write results + /// to the `nodes` list of type either `HistoryData` or `HistoryModifiedData` + /// + /// Nodes are verified to be readable before this is called. + async fn history_read_raw_modified( + &self, + context: &RequestContext, + details: &ReadRawModifiedDetails, + nodes: &mut [&mut &mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + /// Perform the history read processed service. This should write results + /// to the `nodes` list of type `HistoryData`. + /// + /// Nodes are verified to be readable before this is called. + async fn history_read_processed( + &self, + context: &RequestContext, + details: &ReadProcessedDetails, + nodes: &mut [&mut &mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + /// Perform the history read processed service. This should write results + /// to the `nodes` list of type `HistoryData`. + /// + /// Nodes are verified to be readable before this is called. + async fn history_read_at_time( + &self, + context: &RequestContext, + details: &ReadAtTimeDetails, + nodes: &mut [&mut &mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + /// Perform the history read events service. This should write results + /// to the `nodes` list of type `HistoryEvent`. + /// + /// Nodes are verified to be readable before this is called. + async fn history_read_events( + &self, + context: &RequestContext, + details: &ReadEventDetails, + nodes: &mut [&mut &mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + /// Perform the history read annotations data service. This should write + /// results to the `nodes` list of type `Annotation`. + /// + /// Nodes are verified to be readable before this is called. + async fn history_read_annotations( + &self, + context: &RequestContext, + details: &ReadAnnotationDataDetails, + nodes: &mut [&mut &mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + /// Perform the HistoryUpdate service. This should write result + /// status codes to the `nodes` list as appropriate. + /// + /// Nodes are verified to be writable before this is called. + async fn history_update( + &self, + context: &RequestContext, + nodes: &mut [&mut &mut HistoryUpdateNode], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + /// Perform the write service. This should write results + /// to the `nodes_to_write` list. The default result is `BadNodeIdUnknown` + /// + /// Writing is left almost entirely up to the node manager impl. If you do write + /// values you should call `context.subscriptions.notify_data_change` to trigger + /// any monitored items subscribed to the updated values. + async fn write( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes_to_write: &mut [&mut WriteNode], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Call a list of methods. + /// + /// The methods have already had their arguments verified to have valid length + /// and the method is verified to exist on the given object. This should try + /// to execute the methods, and set the result. + async fn call( + &self, + context: &RequestContext, + address_space: &RwLock, + methods_to_call: &mut [&mut &mut MethodCall], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Add a list of nodes. + /// + /// This should create the nodes, or set a failed status as appropriate. + /// If a node was created, the status should be set to Good. + async fn add_nodes( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes_to_add: &mut [&mut AddNodeItem], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Add a list of references. + /// + /// This will be given all references where the source _or_ + /// target belongs to this node manager. A reference is + /// considered successfully added if either source_status + /// or target_status are Good. + /// + /// If you want to explicitly set the reference to failed, + /// set both source and target status. Note that it may + /// already have been added in a different node manager, you are + /// responsible for any cleanup if you do this. + async fn add_references( + &self, + context: &RequestContext, + address_space: &RwLock, + references_to_add: &mut [&mut AddReferenceItem], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Delete a list of nodes. + /// + /// This will be given all nodes that belong to this node manager. + /// + /// Typically, you also want to implement `delete_node_references` if + /// there are other node managers that support deletes. + async fn delete_nodes( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes_to_delete: &mut [&mut DeleteNodeItem], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Delete references for the given list of nodes. + /// The node manager should respect `delete_target_references`. + /// + /// This is not allowed to fail, you should make it impossible to delete + /// nodes with immutable references. + async fn delete_node_references( + &self, + context: &RequestContext, + address_space: &RwLock, + to_delete: &[&DeleteNodeItem], + ) { + } + + /// Delete a list of references. + /// + /// This will be given all references where the source _or_ + /// target belongs to this node manager. A reference is + /// considered successfully added if either source_status + /// or target_status are Good. + /// + /// If you want to explicitly set the reference to failed, + /// set both source and target status. Note that it may + /// already have been deleted in a different node manager, you are + /// responsible for any cleanup if you do this. + async fn delete_references( + &self, + context: &RequestContext, + address_space: &RwLock, + references_to_delete: &mut [&mut DeleteReferenceItem], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } +} diff --git a/lib/src/server/node_manager/memory/mod.rs b/lib/src/server/node_manager/memory/mod.rs new file mode 100644 index 000000000..7a2cf64d3 --- /dev/null +++ b/lib/src/server/node_manager/memory/mod.rs @@ -0,0 +1,1079 @@ +mod core; +mod diagnostics; +mod implementation; +mod simple; + +pub use core::{CoreNodeManager, CoreNodeManagerBuilder, CoreNodeManagerImpl}; +pub use diagnostics::{DiagnosticsNodeManager, DiagnosticsNodeManagerBuilder, NamespaceMetadata}; +pub use implementation::*; +pub use simple::*; + +use std::{ + collections::{HashSet, VecDeque}, + sync::Arc, +}; + +use async_trait::async_trait; +use hashbrown::HashMap; + +use crate::{ + server::{ + address_space::{ + read_node_value, user_access_level, EventNotifier, NodeType, ReferenceDirection, + UserAccessLevel, + }, + subscriptions::CreateMonitoredItem, + SubscriptionCache, + }, + sync::RwLock, + types::{ + argument::Argument, AttributeId, BrowseDescriptionResultMask, BrowseDirection, DataValue, + DateTime, ExpandedNodeId, MonitoringMode, NodeClass, NodeId, NumericRange, QualifiedName, + ReadAnnotationDataDetails, ReadAtTimeDetails, ReadEventDetails, ReadProcessedDetails, + ReadRawModifiedDetails, ReferenceDescription, ReferenceTypeId, StatusCode, + TimestampsToReturn, Variant, + }, +}; + +use super::{ + build::NodeManagerBuilder, + view::{AddReferenceResult, ExternalReference, ExternalReferenceRequest, NodeMetadata}, + AddNodeItem, AddReferenceItem, BrowseNode, BrowsePathItem, DeleteNodeItem, DeleteReferenceItem, + DynNodeManager, HistoryNode, HistoryUpdateDetails, HistoryUpdateNode, MethodCall, + MonitoredItemRef, MonitoredItemUpdateRef, NodeManager, ReadNode, RegisterNodeItem, + RequestContext, ServerContext, TypeTree, WriteNode, +}; + +use crate::server::address_space::AddressSpace; + +#[derive(Default)] +struct BrowseContinuationPoint { + nodes: VecDeque, +} + +pub struct InMemoryNodeManager { + address_space: Arc>, + namespaces: HashMap, + inner: TImpl, +} + +pub struct InMemoryNodeManagerBuilder { + impl_builder: T, +} + +impl InMemoryNodeManagerBuilder { + pub fn new(impl_builder: T) -> Self { + Self { impl_builder } + } +} + +impl NodeManagerBuilder for InMemoryNodeManagerBuilder { + fn build(self: Box, context: ServerContext) -> Arc { + let mut address_space = AddressSpace::new(); + let inner = self.impl_builder.build(context, &mut address_space); + Arc::new(InMemoryNodeManager::new(inner, address_space)) + } +} + +impl InMemoryNodeManager { + pub(crate) fn new(inner: TImpl, address_space: AddressSpace) -> Self { + Self { + namespaces: address_space.namespaces().clone(), + address_space: Arc::new(RwLock::new(address_space)), + inner, + } + } + + pub fn inner(&self) -> &TImpl { + &self.inner + } + + pub fn address_space(&self) -> &Arc> { + &self.address_space + } + + pub fn namespaces(&self) -> &HashMap { + &self.namespaces + } + + pub fn set_attributes<'a>( + &self, + subscriptions: &SubscriptionCache, + values: impl Iterator, + ) -> Result<(), StatusCode> { + let mut address_space = trace_write_lock!(self.address_space); + let mut output = Vec::new(); + + for (id, attribute_id, value) in values { + let Some(node) = address_space.find_mut(id) else { + return Err(StatusCode::BadNodeIdUnknown); + }; + + let node_mut = node.as_mut_node(); + node_mut.set_attribute(attribute_id, value)?; + // Don't notify on changes to event notifier, subscribing to that + // specific attribute means subscribing to events. + if attribute_id != AttributeId::EventNotifier { + output.push((id, attribute_id)); + } + } + + subscriptions.maybe_notify( + output.into_iter(), + |node_id, attribute_id, index_range, data_encoding| { + let Some(node) = address_space.find(node_id) else { + return None; + }; + let node_ref = node.as_node(); + + node_ref.get_attribute( + TimestampsToReturn::Both, + attribute_id, + index_range.clone(), + data_encoding, + ) + }, + ); + + Ok(()) + } + + pub fn set_attribute( + &self, + subscriptions: &SubscriptionCache, + id: &NodeId, + attribute_id: AttributeId, + value: Variant, + ) -> Result<(), StatusCode> { + self.set_attributes(subscriptions, [(id, attribute_id, value)].into_iter()) + } + + pub fn set_values<'a>( + &self, + subscriptions: &SubscriptionCache, + values: impl Iterator, DataValue)>, + ) -> Result<(), StatusCode> { + let mut address_space = trace_write_lock!(self.address_space); + let now = DateTime::now(); + let mut output = Vec::new(); + + for (id, index_range, value) in values { + let Some(node) = address_space.find_mut(id) else { + return Err(StatusCode::BadNodeIdUnknown); + }; + + match node { + NodeType::Variable(v) => { + if let Some(range) = index_range { + let status = value.status(); + let source_timestamp = value.source_timestamp.unwrap_or(now); + let server_timestamp = value.server_timestamp.unwrap_or(now); + v.set_value_range( + value.value.unwrap_or_default(), + range, + status, + &server_timestamp, + &source_timestamp, + )? + } else { + v.set_data_value(value) + } + } + NodeType::VariableType(v) => v.set_value(value.value.unwrap_or_default()), + _ => return Err(StatusCode::BadAttributeIdInvalid), + } + + output.push((id, AttributeId::Value)); + } + + subscriptions.maybe_notify( + output.into_iter(), + |node_id, attribute_id, index_range, data_encoding| { + let Some(node) = address_space.find(node_id) else { + return None; + }; + let node_ref = node.as_node(); + + node_ref.get_attribute( + TimestampsToReturn::Both, + attribute_id, + index_range.clone(), + data_encoding, + ) + }, + ); + + Ok(()) + } + + pub fn set_value( + &self, + subscriptions: &SubscriptionCache, + id: &NodeId, + index_range: Option, + value: DataValue, + ) -> Result<(), StatusCode> { + self.set_values(subscriptions, [(id, index_range, value)].into_iter()) + } + + fn get_reference<'a>( + address_space: &AddressSpace, + type_tree: &TypeTree, + target_node: &'a NodeType, + result_mask: BrowseDescriptionResultMask, + ) -> NodeMetadata { + let node_ref = target_node.as_node(); + + let target_node_id = node_ref.node_id().clone(); + + let type_definition = + if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_TYPE_DEFINITION) { + // Type definition NodeId of the TargetNode. Type definitions are only available + // for the NodeClasses Object and Variable. For all other NodeClasses a null NodeId + // shall be returned. + match node_ref.node_class() { + NodeClass::Object | NodeClass::Variable => { + let mut type_defs = address_space.find_references( + &target_node_id, + Some((ReferenceTypeId::HasTypeDefinition, false)), + type_tree, + BrowseDirection::Forward, + ); + if let Some(type_def) = type_defs.next() { + ExpandedNodeId::new(type_def.target_node.clone()) + } else { + ExpandedNodeId::null() + } + } + _ => ExpandedNodeId::null(), + } + } else { + ExpandedNodeId::null() + }; + + NodeMetadata { + node_id: ExpandedNodeId::new(target_node_id), + browse_name: node_ref.browse_name().clone(), + display_name: node_ref.display_name().clone(), + node_class: node_ref.node_class(), + type_definition, + } + } + + /// Browses a single node, returns any external references found. + fn browse_node<'a>( + address_space: &'a AddressSpace, + type_tree: &TypeTree, + node: &mut BrowseNode, + namespaces: &hashbrown::HashMap, + ) { + let reference_type_id = if node.reference_type_id().is_null() { + None + } else if let Ok(reference_type_id) = node.reference_type_id().as_reference_type_id() { + Some((reference_type_id, node.include_subtypes())) + } else { + None + }; + + let mut cont_point = BrowseContinuationPoint::default(); + + let source_node_id = node.node_id().clone(); + + for reference in address_space.find_references( + &source_node_id, + reference_type_id, + type_tree, + node.browse_direction(), + ) { + if reference.target_node.is_null() { + warn!( + "Target node in reference from {} of type {} is null", + node.node_id(), + reference.reference_type + ); + continue; + } + let target_node = address_space.find_node(&reference.target_node); + let Some(target_node) = target_node else { + if namespaces.contains_key(&reference.target_node.namespace) { + warn!( + "Target node {} in reference from {} of type {} does not exist", + reference.target_node, + node.node_id(), + reference.reference_type + ); + } else { + node.push_external_reference(ExternalReference::new( + reference.target_node.into(), + reference.reference_type.clone(), + reference.direction, + )) + } + + continue; + }; + + let r_node = + Self::get_reference(address_space, type_tree, target_node, node.result_mask()); + + let ref_desc = ReferenceDescription { + reference_type_id: reference.reference_type.clone(), + is_forward: matches!(reference.direction, ReferenceDirection::Forward), + node_id: r_node.node_id, + browse_name: r_node.browse_name, + display_name: r_node.display_name, + node_class: r_node.node_class, + type_definition: r_node.type_definition, + }; + + if let AddReferenceResult::Full(c) = node.add(type_tree, ref_desc) { + cont_point.nodes.push_back(c); + } + } + + if !cont_point.nodes.is_empty() { + node.set_next_continuation_point(Box::new(cont_point)); + } + } + + fn translate_browse_paths( + address_space: &AddressSpace, + type_tree: &TypeTree, + context: &RequestContext, + namespaces: &hashbrown::HashMap, + item: &mut BrowsePathItem, + ) { + if let Some(name) = item.unmatched_browse_name() { + let is_full_match = address_space + .find_node(item.node_id()) + .is_some_and(|n| name.is_null() || n.as_node().browse_name() == name); + if !is_full_match { + return; + } else { + item.set_browse_name_matched(context.current_node_manager_index); + } + } + + let mut matching_nodes = HashSet::new(); + matching_nodes.insert(item.node_id()); + let mut next_matching_nodes = HashSet::new(); + let mut results = Vec::new(); + + let mut depth = 0; + for element in item.path() { + depth += 1; + for node_id in matching_nodes.drain() { + let reference_filter = { + if element.reference_type_id.is_null() { + None + } else { + Some((element.reference_type_id.clone(), element.include_subtypes)) + } + }; + + for rf in address_space.find_references( + &node_id, + reference_filter, + type_tree, + if element.is_inverse { + BrowseDirection::Inverse + } else { + BrowseDirection::Forward + }, + ) { + if !next_matching_nodes.contains(rf.target_node) { + let Some(node) = address_space.find_node(rf.target_node) else { + if !namespaces.contains_key(&rf.target_node.namespace) { + results.push(( + rf.target_node, + depth, + Some(element.target_name.clone()), + )); + } + continue; + }; + + if element.target_name.is_null() + || node.as_node().browse_name() == &element.target_name + { + next_matching_nodes.insert(rf.target_node); + results.push((rf.target_node, depth, None)); + } + } + } + } + std::mem::swap(&mut matching_nodes, &mut next_matching_nodes); + } + + for res in results { + item.add_element(res.0.clone(), res.1, res.2); + } + } + + fn validate_history_read_nodes<'a, 'b>( + &self, + context: &RequestContext, + nodes: &'b mut [&'a mut HistoryNode], + is_for_events: bool, + ) -> Vec<&'b mut &'a mut HistoryNode> { + let address_space = trace_read_lock!(self.address_space); + let mut valid = Vec::with_capacity(nodes.len()); + + for history_node in nodes { + let Some(node) = address_space.find(history_node.node_id()) else { + history_node.set_status(StatusCode::BadNodeIdUnknown); + continue; + }; + + if is_for_events { + // TODO: History read for events should forward to a global callback + // for the server node. + let NodeType::Object(object) = node else { + history_node.set_status(StatusCode::BadHistoryOperationUnsupported); + continue; + }; + + if !object + .event_notifier() + .contains(EventNotifier::HISTORY_READ) + { + history_node.set_status(StatusCode::BadHistoryOperationUnsupported); + continue; + } + } else { + let NodeType::Variable(_) = node else { + history_node.set_status(StatusCode::BadHistoryOperationUnsupported); + continue; + }; + + let user_access_level = user_access_level(context, node); + + if !user_access_level.contains(UserAccessLevel::HISTORY_READ) { + history_node.set_status(StatusCode::BadUserAccessDenied); + continue; + } + } + + valid.push(history_node); + } + + valid + } + + fn validate_history_write_nodes<'a, 'b>( + &self, + context: &RequestContext, + nodes: &'b mut [&'a mut HistoryUpdateNode], + ) -> Vec<&'b mut &'a mut HistoryUpdateNode> { + let address_space = trace_read_lock!(self.address_space); + let mut valid = Vec::with_capacity(nodes.len()); + + for history_node in nodes { + let Some(node) = address_space.find(history_node.details().node_id()) else { + history_node.set_status(StatusCode::BadNodeIdUnknown); + continue; + }; + + let is_for_events = matches!( + history_node.details(), + HistoryUpdateDetails::DeleteEvent(_) | HistoryUpdateDetails::UpdateEvent(_) + ); + + if is_for_events { + // TODO: History read for events should forward to a global callback + // for the server node. + let NodeType::Object(object) = node else { + history_node.set_status(StatusCode::BadHistoryOperationUnsupported); + continue; + }; + + if !object + .event_notifier() + .contains(EventNotifier::HISTORY_WRITE) + { + history_node.set_status(StatusCode::BadHistoryOperationUnsupported); + continue; + } + } else { + let NodeType::Variable(_) = node else { + history_node.set_status(StatusCode::BadHistoryOperationUnsupported); + continue; + }; + + let user_access_level = user_access_level(context, node); + + if !user_access_level.contains(UserAccessLevel::HISTORY_WRITE) { + history_node.set_status(StatusCode::BadUserAccessDenied); + continue; + } + } + + valid.push(history_node); + } + + valid + } + + fn validate_method_calls<'a, 'b>( + &self, + context: &RequestContext, + methods: &'b mut [&'a mut MethodCall], + ) -> Vec<&'b mut &'a mut MethodCall> { + let address_space = trace_read_lock!(self.address_space); + let type_tree = trace_read_lock!(context.type_tree); + let mut valid = Vec::with_capacity(methods.len()); + + for method in methods { + let Some(method_ref) = address_space + .find_references( + method.object_id(), + Some((ReferenceTypeId::HasComponent, false)), + &type_tree, + BrowseDirection::Forward, + ) + .find(|r| r.target_node == method.method_id()) + else { + method.set_status(StatusCode::BadMethodInvalid); + continue; + }; + + let Some(NodeType::Method(method_node)) = address_space.find(method_ref.target_node) + else { + method.set_status(StatusCode::BadMethodInvalid); + continue; + }; + + if !method_node.user_executable() + || !context + .authenticator + .is_user_executable(&context.token, method.method_id()) + { + method.set_status(StatusCode::BadUserAccessDenied); + continue; + } + + let input_arguments = address_space.find_node_by_browse_name( + method.method_id(), + Some((ReferenceTypeId::HasProperty, false)), + &type_tree, + BrowseDirection::Forward, + "InputArguments", + ); + + // If there are no input arguments, it means the method takes no inputs. + let Some(input_arguments) = input_arguments else { + if method.arguments().is_empty() { + valid.push(method); + } else { + method.set_status(StatusCode::BadTooManyArguments); + } + continue; + }; + + // If the input arguments object is invalid, we pass it along anyway and leave it up to + // the implementation to validate. + let NodeType::Variable(arg_var) = input_arguments else { + warn!( + "InputArguments for method with ID {} has incorrect node class", + method.method_id() + ); + valid.push(method); + continue; + }; + + let Some(Variant::Array(input_arguments_value)) = arg_var + .value( + TimestampsToReturn::Neither, + NumericRange::None, + &QualifiedName::null(), + 0.0, + ) + .value + else { + warn!( + "InputArguments for method with ID {} has incorrect type", + method.method_id() + ); + valid.push(method); + continue; + }; + + let options = context.info.decoding_options(); + let num_args = input_arguments_value.values.len(); + let arguments: Vec<_> = input_arguments_value + .values + .into_iter() + .filter_map(|v| match v { + Variant::ExtensionObject(o) => o.decode_inner::(&options).ok(), + _ => None, + }) + .collect(); + if arguments.len() != num_args { + warn!( + "InputArguments for method with ID {} has invalid arguments", + method.method_id() + ); + valid.push(method); + continue; + }; + + if arguments.len() < method.arguments().len() { + method.set_status(StatusCode::BadTooManyArguments); + continue; + } + + valid.push(method); + } + + valid + } +} + +#[async_trait] +impl NodeManager for InMemoryNodeManager { + fn owns_node(&self, id: &NodeId) -> bool { + self.namespaces.contains_key(&id.namespace) + } + + fn name(&self) -> &str { + self.inner.name() + } + + async fn init(&self, type_tree: &mut TypeTree, context: ServerContext) { + let mut address_space = trace_write_lock!(self.address_space); + + self.inner.init(&mut address_space, context).await; + + address_space.load_into_type_tree(type_tree); + } + + fn namespaces_for_user(&self, _context: &RequestContext) -> Vec { + self.inner.namespaces() + } + + fn handle_new_node(&self, parent_id: &ExpandedNodeId) -> bool { + self.inner.handle_new_node(parent_id) + } + + async fn resolve_external_references( + &self, + context: &RequestContext, + items: &mut [&mut ExternalReferenceRequest], + ) { + let address_space = trace_read_lock!(self.address_space); + let type_tree = trace_read_lock!(context.type_tree); + + for item in items.into_iter() { + let target_node = address_space.find_node(&item.node_id()); + + let Some(target_node) = target_node else { + continue; + }; + + item.set(Self::get_reference( + &*address_space, + &*type_tree, + target_node, + item.result_mask(), + )); + } + } + + async fn browse( + &self, + context: &RequestContext, + nodes_to_browse: &mut [BrowseNode], + ) -> Result<(), StatusCode> { + let address_space = trace_read_lock!(self.address_space); + let type_tree = trace_read_lock!(context.type_tree); + + for node in nodes_to_browse.iter_mut() { + if node.node_id().is_null() { + continue; + } + + node.set_status(StatusCode::Good); + + if let Some(mut point) = node.take_continuation_point::() { + loop { + if node.remaining() <= 0 { + break; + } + let Some(ref_desc) = point.nodes.pop_back() else { + break; + }; + // Node is already filtered. + node.add_unchecked(ref_desc); + } + if !point.nodes.is_empty() { + node.set_next_continuation_point(point); + } + } else { + Self::browse_node(&address_space, &*type_tree, node, &self.namespaces); + } + } + + Ok(()) + } + + async fn read( + &self, + context: &RequestContext, + max_age: f64, + timestamps_to_return: TimestampsToReturn, + nodes_to_read: &mut [&mut ReadNode], + ) -> Result<(), StatusCode> { + let mut read_values = Vec::new(); + { + let address_space = trace_read_lock!(self.address_space); + for node in nodes_to_read { + if node.node().attribute_id == AttributeId::Value { + read_values.push(node); + continue; + } + + node.set_result(address_space.read( + context, + &node.node(), + max_age, + timestamps_to_return, + )); + } + } + + if !read_values.is_empty() { + let ids: Vec<_> = read_values.iter().map(|r| r.node()).collect(); + let values = self + .inner + .read_values( + context, + &self.address_space, + &ids, + max_age, + timestamps_to_return, + ) + .await; + for (read, value) in read_values.iter_mut().zip(values) { + read.set_result(value); + } + } + + Ok(()) + } + + async fn translate_browse_paths_to_node_ids( + &self, + context: &RequestContext, + nodes: &mut [&mut BrowsePathItem], + ) -> Result<(), StatusCode> { + let address_space = trace_read_lock!(self.address_space); + let type_tree = trace_read_lock!(context.type_tree); + + for node in nodes { + Self::translate_browse_paths( + &*address_space, + &*type_tree, + context, + &self.namespaces, + node, + ); + } + + Ok(()) + } + + async fn register_nodes( + &self, + context: &RequestContext, + nodes: &mut [&mut RegisterNodeItem], + ) -> Result<(), StatusCode> { + self.inner + .register_nodes(context, &self.address_space, nodes) + .await + } + + async fn unregister_nodes( + &self, + context: &RequestContext, + nodes: &[&NodeId], + ) -> Result<(), StatusCode> { + self.inner + .unregister_nodes(context, &self.address_space, nodes) + .await + } + + async fn create_monitored_items( + &self, + context: &RequestContext, + items: &mut [&mut CreateMonitoredItem], + ) -> Result<(), StatusCode> { + let address_space = trace_read_lock!(self.address_space); + let mut value_items = Vec::new(); + let mut event_items = Vec::new(); + + for node in items { + if node.item_to_monitor().attribute_id == AttributeId::Value { + value_items.push(node); + continue; + } + + let n = match address_space.validate_node_read(context, node.item_to_monitor()) { + Ok(n) => n, + Err(e) => { + node.set_status(e); + continue; + } + }; + + let read_result = read_node_value( + n, + context, + node.item_to_monitor(), + 0.0, + node.timestamps_to_return(), + ); + + // Event monitored items are global, so all we need to do is to validate that the + // node allows subscribing to events. + if node.item_to_monitor().attribute_id == AttributeId::EventNotifier { + let Some(Variant::Byte(notifier)) = &read_result.value else { + node.set_status(StatusCode::BadAttributeIdInvalid); + continue; + }; + let notifier = EventNotifier::from_bits_truncate(*notifier); + if !notifier.contains(EventNotifier::SUBSCRIBE_TO_EVENTS) { + node.set_status(StatusCode::BadAttributeIdInvalid); + continue; + } + + // No further action beyond just validation. + node.set_status(StatusCode::Good); + event_items.push(node); + continue; + } + + // This specific status code here means that the value does not exist, so it is + // more appropriate to not set an initial value. + if read_result.status() != StatusCode::BadAttributeIdInvalid { + node.set_initial_value(read_result); + } + + node.set_status(StatusCode::Good); + } + drop(address_space); + + if !value_items.is_empty() { + self.inner + .create_value_monitored_items(context, &self.address_space, &mut value_items) + .await; + } + + if !event_items.is_empty() { + self.inner + .create_event_monitored_items(context, &self.address_space, &mut event_items) + .await; + } + + Ok(()) + } + + async fn modify_monitored_items( + &self, + context: &RequestContext, + items: &[&MonitoredItemUpdateRef], + ) { + let items: Vec<_> = items + .iter() + .filter(|it| { + matches!( + it.attribute(), + AttributeId::Value | AttributeId::EventNotifier + ) + }) + .copied() + .collect(); + self.inner.modify_monitored_items(context, &items).await; + } + + async fn set_monitoring_mode( + &self, + context: &RequestContext, + mode: MonitoringMode, + items: &[&MonitoredItemRef], + ) { + let items: Vec<_> = items + .iter() + .filter(|it| { + matches!( + it.attribute(), + AttributeId::Value | AttributeId::EventNotifier + ) + }) + .copied() + .collect(); + self.inner.set_monitoring_mode(context, mode, &items).await; + } + + async fn delete_monitored_items(&self, context: &RequestContext, items: &[&MonitoredItemRef]) { + let items: Vec<_> = items + .iter() + .filter(|it| { + matches!( + it.attribute(), + AttributeId::Value | AttributeId::EventNotifier + ) + }) + .copied() + .collect(); + self.inner.delete_monitored_items(context, &items).await; + } + + async fn history_read_raw_modified( + &self, + context: &RequestContext, + details: &ReadRawModifiedDetails, + nodes: &mut [&mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + let mut nodes = self.validate_history_read_nodes(context, nodes, false); + self.inner + .history_read_raw_modified(context, details, &mut nodes, timestamps_to_return) + .await + } + + async fn history_read_processed( + &self, + context: &RequestContext, + details: &ReadProcessedDetails, + nodes: &mut [&mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + let mut nodes = self.validate_history_read_nodes(context, nodes, false); + self.inner + .history_read_processed(context, details, &mut nodes, timestamps_to_return) + .await + } + + async fn history_read_at_time( + &self, + context: &RequestContext, + details: &ReadAtTimeDetails, + nodes: &mut [&mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + let mut nodes = self.validate_history_read_nodes(context, nodes, false); + self.inner + .history_read_at_time(context, details, &mut nodes, timestamps_to_return) + .await + } + + async fn history_read_events( + &self, + context: &RequestContext, + details: &ReadEventDetails, + nodes: &mut [&mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + let mut nodes = self.validate_history_read_nodes(context, nodes, false); + self.inner + .history_read_events(context, details, &mut nodes, timestamps_to_return) + .await + } + + async fn history_read_annotations( + &self, + context: &RequestContext, + details: &ReadAnnotationDataDetails, + nodes: &mut [&mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + let mut nodes = self.validate_history_read_nodes(context, nodes, false); + self.inner + .history_read_annotations(context, details, &mut nodes, timestamps_to_return) + .await + } + + async fn write( + &self, + context: &RequestContext, + nodes_to_write: &mut [&mut WriteNode], + ) -> Result<(), StatusCode> { + self.inner + .write(context, &self.address_space, nodes_to_write) + .await + } + + async fn history_update( + &self, + context: &RequestContext, + nodes: &mut [&mut HistoryUpdateNode], + ) -> Result<(), StatusCode> { + let mut nodes = self.validate_history_write_nodes(context, nodes); + self.inner.history_update(context, &mut nodes).await + } + + async fn call( + &self, + context: &RequestContext, + methods_to_call: &mut [&mut MethodCall], + ) -> Result<(), StatusCode> { + let mut to_call = self.validate_method_calls(context, methods_to_call); + self.inner + .call(context, &self.address_space, &mut to_call) + .await + } + + /// Add a list of nodes. + /// + /// This should create the nodes, or set a failed status as appropriate. + /// If a node was created, the status should be set to Good. + async fn add_nodes( + &self, + context: &RequestContext, + nodes_to_add: &mut [&mut AddNodeItem], + ) -> Result<(), StatusCode> { + self.inner + .add_nodes(context, &self.address_space, nodes_to_add) + .await + } + + async fn add_references( + &self, + context: &RequestContext, + references_to_add: &mut [&mut AddReferenceItem], + ) -> Result<(), StatusCode> { + self.inner + .add_references(context, &self.address_space, references_to_add) + .await + } + + async fn delete_nodes( + &self, + context: &RequestContext, + nodes_to_delete: &mut [&mut DeleteNodeItem], + ) -> Result<(), StatusCode> { + self.inner + .delete_nodes(context, &self.address_space, nodes_to_delete) + .await + } + + async fn delete_node_references( + &self, + context: &RequestContext, + to_delete: &[&DeleteNodeItem], + ) { + self.inner + .delete_node_references(context, &self.address_space, to_delete) + .await + } + + async fn delete_references( + &self, + context: &RequestContext, + references_to_delete: &mut [&mut DeleteReferenceItem], + ) -> Result<(), StatusCode> { + self.inner + .delete_references(context, &self.address_space, references_to_delete) + .await + } +} diff --git a/lib/src/server/node_manager/memory/simple.rs b/lib/src/server/node_manager/memory/simple.rs new file mode 100644 index 000000000..9df57f134 --- /dev/null +++ b/lib/src/server/node_manager/memory/simple.rs @@ -0,0 +1,384 @@ +use std::{collections::HashMap, sync::Arc, time::Duration}; + +use async_trait::async_trait; + +use crate::{ + server::{ + address_space::{read_node_value, AddressSpace, NodeBase, NodeType}, + node_manager::{ + MethodCall, MonitoredItemRef, MonitoredItemUpdateRef, NodeManagerBuilder, + NodeManagersRef, ParsedReadValueId, RequestContext, ServerContext, SyncSampler, + TypeTree, WriteNode, + }, + CreateMonitoredItem, + }, + sync::RwLock, + types::{ + AttributeId, DataValue, MonitoringMode, NodeId, NumericRange, StatusCode, + TimestampsToReturn, Variant, + }, +}; + +use super::{ + InMemoryNodeManager, InMemoryNodeManagerBuilder, InMemoryNodeManagerImpl, + InMemoryNodeManagerImplBuilder, NamespaceMetadata, +}; + +pub type SimpleNodeManager = InMemoryNodeManager; + +type WriteCB = Arc StatusCode + Send + Sync + 'static>; +type ReadCB = Arc< + dyn Fn(NumericRange, TimestampsToReturn, f64) -> Result + + Send + + Sync + + 'static, +>; +type MethodCB = Arc Result, StatusCode> + Send + Sync + 'static>; + +pub struct SimpleNodeManagerBuilder { + namespace: NamespaceMetadata, + name: String, +} + +impl SimpleNodeManagerBuilder { + pub fn new(namespace: NamespaceMetadata, name: &str) -> Self { + Self { + namespace, + name: name.to_owned(), + } + } +} + +impl InMemoryNodeManagerImplBuilder for SimpleNodeManagerBuilder { + type Impl = SimpleNodeManagerImpl; + + fn build(mut self, context: ServerContext, address_space: &mut AddressSpace) -> Self::Impl { + { + let mut type_tree = context.type_tree.write(); + self.namespace.namespace_index = type_tree + .namespaces_mut() + .add_namespace(&self.namespace.namespace_uri); + } + address_space.add_namespace( + &self.namespace.namespace_uri, + self.namespace.namespace_index, + ); + SimpleNodeManagerImpl::new(self.namespace, &self.name, context.node_managers.clone()) + } +} + +pub fn simple_node_manager(namespace: NamespaceMetadata, name: &str) -> impl NodeManagerBuilder { + InMemoryNodeManagerBuilder::new(SimpleNodeManagerBuilder::new(namespace, name)) +} + +/// Node manager designed to deal with simple, entirely in-memory, synchronous OPC-UA servers. +/// +/// Use this if +/// +/// - Your node hierarchy is known and small enough to fit in memory. +/// - No read, write, or method call operations are async or particularly time consuming. +/// - and you don't need to be able to write attributes other than `Value`. +pub struct SimpleNodeManagerImpl { + write_cbs: RwLock>, + read_cbs: RwLock>, + method_cbs: RwLock>, + namespace: NamespaceMetadata, + #[allow(unused)] + node_managers: NodeManagersRef, + name: String, + samplers: SyncSampler, +} + +#[async_trait] +impl InMemoryNodeManagerImpl for SimpleNodeManagerImpl { + async fn init(&self, _address_space: &mut AddressSpace, context: ServerContext) { + self.samplers.run( + Duration::from_millis( + context + .info + .config + .limits + .subscriptions + .min_sampling_interval_ms as u64, + ), + context.subscriptions.clone(), + ); + } + + fn namespaces(&self) -> Vec { + vec![self.namespace.clone()] + } + + fn name(&self) -> &str { + &self.name + } + + async fn read_values( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes: &[&ParsedReadValueId], + max_age: f64, + timestamps_to_return: TimestampsToReturn, + ) -> Vec { + let address_space = address_space.read(); + let cbs = trace_read_lock!(self.read_cbs); + + nodes + .iter() + .map(|n| { + self.read_node_value( + &*cbs, + context, + &address_space, + n, + max_age, + timestamps_to_return, + ) + }) + .collect() + } + + async fn create_value_monitored_items( + &self, + context: &RequestContext, + address_space: &RwLock, + items: &mut [&mut &mut CreateMonitoredItem], + ) { + let cbs = trace_read_lock!(self.read_cbs); + + let to_read: Vec<_> = items.iter().map(|r| r.item_to_monitor()).collect(); + let values = self + .read_values( + context, + address_space, + &to_read, + 0.0, + TimestampsToReturn::Both, + ) + .await; + + for (value, node) in values.into_iter().zip(items.into_iter()) { + if value.status() != StatusCode::BadAttributeIdInvalid { + node.set_initial_value(value); + } + node.set_status(StatusCode::Good); + let rf = &node.item_to_monitor().node_id; + + if let Some(cb) = cbs.get(rf).cloned() { + let tss = node.timestamps_to_return(); + let index_range = node.item_to_monitor().index_range.clone(); + + self.samplers.add_sampler( + node.item_to_monitor().node_id.clone(), + AttributeId::Value, + move || { + Some( + // TODO: Make everything take index range by reference. + match cb(index_range.clone(), tss, 0.0) { + Err(e) => DataValue { + status: Some(e), + ..Default::default() + }, + Ok(v) => v, + }, + ) + }, + node.monitoring_mode(), + node.handle(), + Duration::from_millis(node.sampling_interval() as u64), + ) + } + } + } + + async fn modify_monitored_items( + &self, + _context: &RequestContext, + items: &[&MonitoredItemUpdateRef], + ) { + for it in items { + self.samplers.update_sampler( + it.node_id(), + it.attribute(), + it.handle(), + Duration::from_millis(it.update().revised_sampling_interval as u64), + ); + } + } + + async fn set_monitoring_mode( + &self, + _context: &RequestContext, + mode: MonitoringMode, + items: &[&MonitoredItemRef], + ) { + for it in items { + self.samplers + .set_sampler_mode(it.node_id(), it.attribute(), it.handle(), mode); + } + } + + async fn delete_monitored_items(&self, _context: &RequestContext, items: &[&MonitoredItemRef]) { + for it in items { + self.samplers + .remove_sampler(it.node_id(), it.attribute(), it.handle()); + } + } + + async fn write( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes_to_write: &mut [&mut WriteNode], + ) -> Result<(), StatusCode> { + let mut address_space = trace_write_lock!(address_space); + let type_tree = trace_read_lock!(context.type_tree); + let cbs = trace_read_lock!(self.write_cbs); + + for write in nodes_to_write { + self.write_node_value(&*cbs, context, &mut address_space, &type_tree, *write); + } + + Ok(()) + } + + async fn call( + &self, + _context: &RequestContext, + _address_space: &RwLock, + methods_to_call: &mut [&mut &mut MethodCall], + ) -> Result<(), StatusCode> { + let cbs = trace_read_lock!(self.method_cbs); + for method in methods_to_call { + if let Some(cb) = cbs.get(method.method_id()) { + match cb(method.arguments()) { + Ok(r) => { + method.set_outputs(r); + method.set_status(StatusCode::Good); + } + Err(e) => method.set_status(e), + } + } + } + + Ok(()) + } +} + +impl SimpleNodeManagerImpl { + pub fn new(namespace: NamespaceMetadata, name: &str, node_managers: NodeManagersRef) -> Self { + Self { + write_cbs: Default::default(), + read_cbs: Default::default(), + method_cbs: Default::default(), + namespace, + name: name.to_owned(), + node_managers, + samplers: SyncSampler::new(), + } + } + + fn read_node_value( + &self, + cbs: &HashMap, + context: &RequestContext, + address_space: &AddressSpace, + node_to_read: &ParsedReadValueId, + max_age: f64, + timestamps_to_return: TimestampsToReturn, + ) -> DataValue { + let mut result_value = DataValue::null(); + // Check that the read is permitted. + let node = match address_space.validate_node_read(context, node_to_read) { + Ok(n) => n, + Err(e) => { + result_value.status = Some(e); + return result_value; + } + }; + + // If there is a callback registered, call that, otherwise read it from the node hierarchy. + if let Some(cb) = cbs.get(&node_to_read.node_id) { + match cb( + node_to_read.index_range.clone(), + timestamps_to_return, + max_age, + ) { + Err(e) => { + return DataValue { + status: Some(e), + ..Default::default() + } + } + Ok(v) => v, + } + } else { + // If it can't be found, read it from the node hierarchy. + read_node_value(node, context, node_to_read, max_age, timestamps_to_return) + } + } + + fn write_node_value( + &self, + cbs: &HashMap, + context: &RequestContext, + address_space: &mut AddressSpace, + type_tree: &TypeTree, + write: &mut WriteNode, + ) { + let node = match address_space.validate_node_write(context, write.value(), &type_tree) { + Ok(v) => v, + Err(e) => { + write.set_status(e); + return; + } + }; + + let (NodeType::Variable(var), AttributeId::Value) = (node, write.value().attribute_id) + else { + write.set_status(StatusCode::BadNotWritable); + return; + }; + + let Some(cb) = cbs.get(var.node_id()) else { + write.set_status(StatusCode::BadNotWritable); + return; + }; + + write.set_status(cb( + write.value().value.clone(), + write.value().index_range.clone(), + )); + } + + pub fn add_write_callback( + &self, + id: NodeId, + cb: impl Fn(DataValue, NumericRange) -> StatusCode + Send + Sync + 'static, + ) { + let mut cbs = trace_write_lock!(self.write_cbs); + cbs.insert(id, Arc::new(cb)); + } + + pub fn add_read_callback( + &self, + id: NodeId, + cb: impl Fn(NumericRange, TimestampsToReturn, f64) -> Result + + Send + + Sync + + 'static, + ) { + let mut cbs = trace_write_lock!(self.read_cbs); + cbs.insert(id, Arc::new(cb)); + } + + pub fn add_method_callback( + &self, + id: NodeId, + cb: impl Fn(&[Variant]) -> Result, StatusCode> + Send + Sync + 'static, + ) { + let mut cbs = trace_write_lock!(self.method_cbs); + cbs.insert(id, Arc::new(cb)); + } +} diff --git a/lib/src/server/node_manager/method.rs b/lib/src/server/node_manager/method.rs new file mode 100644 index 000000000..b48939ed8 --- /dev/null +++ b/lib/src/server/node_manager/method.rs @@ -0,0 +1,78 @@ +use crate::types::{CallMethodRequest, CallMethodResult, NodeId, StatusCode, Variant}; + +#[derive(Debug)] +/// Container for a single method call in a `Call` service call. +pub struct MethodCall { + object_id: NodeId, + method_id: NodeId, + arguments: Vec, + + status: StatusCode, + argument_results: Vec, + outputs: Vec, +} + +impl MethodCall { + pub(crate) fn new(request: CallMethodRequest) -> Self { + Self { + object_id: request.object_id, + method_id: request.method_id, + arguments: request.input_arguments.unwrap_or_default(), + status: StatusCode::BadMethodInvalid, + argument_results: Vec::new(), + outputs: Vec::new(), + } + } + + /// Set the argument results to a list of errors. + /// This will update the `status` to `BadInvalidArgument`. + /// + /// The length of `argument_results` must be equal to the length of `arguments`. + pub fn set_argument_error(&mut self, argument_results: Vec) { + self.argument_results = argument_results; + self.status = StatusCode::BadInvalidArgument; + } + + /// Set the result of this method call. + pub fn set_status(&mut self, status: StatusCode) { + self.status = status; + } + + /// Set the outputs of this method call. + pub fn set_outputs(&mut self, outputs: Vec) { + self.outputs = outputs; + } + + /// Get the arguments to this method call. + pub fn arguments(&self) -> &[Variant] { + &self.arguments + } + + /// Get the ID of the method to call. + pub fn method_id(&self) -> &NodeId { + &self.method_id + } + + /// Get the ID of the object the method is a part of. + pub fn object_id(&self) -> &NodeId { + &self.object_id + } + + /// Get the current status. + pub fn status(&self) -> StatusCode { + self.status + } + + pub(crate) fn into_result(self) -> CallMethodResult { + CallMethodResult { + status_code: self.status, + input_argument_diagnostic_infos: None, + input_argument_results: if !self.argument_results.is_empty() { + Some(self.argument_results) + } else { + None + }, + output_arguments: Some(self.outputs), + } + } +} diff --git a/lib/src/server/node_manager/mod.rs b/lib/src/server/node_manager/mod.rs new file mode 100644 index 000000000..2f9902437 --- /dev/null +++ b/lib/src/server/node_manager/mod.rs @@ -0,0 +1,622 @@ +use std::{ + any::{Any, TypeId}, + ops::Index, + sync::{Arc, Weak}, +}; + +use crate::{ + sync::RwLock, + types::{ + ExpandedNodeId, MonitoringMode, NodeId, ReadAnnotationDataDetails, ReadAtTimeDetails, + ReadEventDetails, ReadProcessedDetails, ReadRawModifiedDetails, StatusCode, + TimestampsToReturn, + }, +}; +use async_trait::async_trait; +use memory::NamespaceMetadata; +use tokio::sync::OnceCell; + +mod attributes; +mod build; +mod context; +mod history; +pub mod memory; +mod method; +mod monitored_items; +mod node_management; +mod query; +mod type_tree; +mod utils; +mod view; + +use self::view::ExternalReferenceRequest; + +use super::{ + authenticator::AuthManager, info::ServerInfo, subscriptions::CreateMonitoredItem, + SubscriptionCache, +}; + +pub use { + attributes::{ParsedReadValueId, ParsedWriteValue, ReadNode, WriteNode}, + build::{add_namespaces, NamespaceMap, NodeManagerBuilder}, + context::RequestContext, + history::{HistoryNode, HistoryResult, HistoryUpdateDetails, HistoryUpdateNode}, + method::MethodCall, + monitored_items::{MonitoredItemRef, MonitoredItemUpdateRef}, + node_management::{AddNodeItem, AddReferenceItem, DeleteNodeItem, DeleteReferenceItem}, + query::{ParsedNodeTypeDescription, ParsedQueryDataDescription, QueryRequest}, + type_tree::{TypePropertyInverseRef, TypeTree, TypeTreeNode}, + utils::*, + view::{BrowseNode, BrowsePathItem, RegisterNodeItem}, +}; + +pub(crate) use context::resolve_external_references; +pub(crate) use history::HistoryReadDetails; +pub(crate) use query::QueryContinuationPoint; +pub(crate) use view::{BrowseContinuationPoint, ExternalReferencesContPoint}; + +/// Trait for a collection of node managers, to allow abstracting over +/// weak or strong references to the node manager collection. +pub trait NodeManagerCollection { + /// Iterate over the node managers on the server. + fn iter_node_managers(&self) -> impl Iterator>; +} + +/// Type alias for a dyn reference to a node manager. +pub type DynNodeManager = dyn NodeManager + Send + Sync + 'static; + +#[derive(Clone)] +/// Wrapper around the server managed list of node managers. +pub struct NodeManagers { + node_managers: Arc>>, +} + +impl NodeManagerCollection for NodeManagers { + fn iter_node_managers(&self) -> impl Iterator> { + self.iter().cloned() + } +} + +impl NodeManagers { + /// Iterate by reference over the node managers. + pub fn iter<'a>(&'a self) -> impl Iterator> { + (&self).into_iter() + } + + /// Get the length of the node manager collection. + pub fn len(&self) -> usize { + self.node_managers.len() + } + + /// Return `true` if the node manager collection is empty. + pub fn is_empty(&self) -> bool { + self.node_managers.is_empty() + } + + /// Create a new node manager collection from a vector of node managers. + pub fn new(node_managers: Vec>) -> Self { + Self { + node_managers: Arc::new(node_managers), + } + } + + /// Get a node manager by index. + pub fn get(&self, index: usize) -> Option<&Arc> { + self.node_managers.get(index) + } + + /// Get the first node manager with the specified type. + pub fn get_of_type(&self) -> Option> { + for m in self { + let r = &**m; + if r.type_id() == TypeId::of::() { + if let Some(k) = m.clone().into_any_arc().downcast().ok() { + return Some(k); + } + } + } + + None + } + + /// Get the first node manager with the specified name and try to cast it to the type `T`. + /// + /// If there are multiple node managers with the same name, only the first will ever + /// be returned by this. Avoid having duplicate node managers. + pub fn get_by_name(&self, name: &str) -> Option> { + for m in self { + let r = &**m; + if r.name() == name { + return m.clone().into_any_arc().downcast().ok(); + } + } + None + } + + /// Create a weak reference to the node managers. + /// A node manager should avoid holding a copy of the `NodeManagers` object since that + /// results in a circular reference which will leak memory once dropped. + /// (This does not really matter if you don't care about memory leaks when the server is dropped.) + pub fn as_weak(&self) -> NodeManagersRef { + let weak = Arc::downgrade(&self.node_managers); + NodeManagersRef { + node_managers: Arc::new(OnceCell::new_with(Some(weak))), + } + } +} + +impl Index for NodeManagers { + type Output = Arc; + + fn index(&self, index: usize) -> &Self::Output { + &self.node_managers[index] + } +} + +impl<'a> IntoIterator for &'a NodeManagers { + type Item = &'a Arc; + + type IntoIter = <&'a Vec> as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.node_managers.iter() + } +} + +#[derive(Clone)] +/// A weak reference to the node manager collection. +pub struct NodeManagersRef { + /// This complex structure is here because node managers need to be able to store a reference + /// to a _future_ weak reference to the node managers. + node_managers: Arc>>>>, +} + +impl NodeManagerCollection for NodeManagersRef { + fn iter_node_managers(&self) -> impl Iterator> { + self.iter() + } +} + +impl NodeManagersRef { + pub(crate) fn new_empty() -> Self { + Self { + node_managers: Default::default(), + } + } + + pub(crate) fn init_from_node_managers(&self, node_managers: NodeManagers) { + self.node_managers + .set(Arc::downgrade(&node_managers.node_managers)) + .expect("Node manager ref initialized more than once"); + } + + /// Upgrade this node manager ref. Note that node managers should avoid keeping + /// a permanent copy of the NodeManagers struct, to avoid circular references leading + /// to a memory leak when the server is dropped. + /// + /// If this fails, it means that the server is dropped, so feel free to abort anything going on. + pub fn upgrade(&self) -> Option { + let node_managers = self.node_managers.get()?.upgrade()?; + Some(NodeManagers { node_managers }) + } + + /// Iterate over node managers. If the server is dropped this iterator will be _empty_. + pub fn iter<'a>(&'a self) -> impl Iterator> { + let node_managers = self.upgrade(); + let len = node_managers.as_ref().map(|l| l.len()).unwrap_or_default(); + (0..len).filter_map(move |i| node_managers.as_ref().map(move |r| r[i].clone())) + } + + /// Get the first node manager with the specified type. + pub fn get_of_type(&self) -> Option> { + self.upgrade().and_then(|m| m.get_of_type()) + } + + /// Get the first node manager with the specified name and try to cast it to the type `T`. + /// + /// If there are multiple node managers with the same name, only the first will ever + /// be returned by this. Avoid having duplicate node managers. + pub fn get_by_name(&self, name: &str) -> Option> { + self.upgrade().and_then(|m| m.get_by_name(name)) + } + + /// Get the node manager at the specified index. + pub fn get(&self, index: usize) -> Option> { + self.upgrade().and_then(|m| m.get(index).cloned()) + } +} + +#[derive(Clone)] +/// General server context, passed when requests are made to the node managers on +/// behalf of the server itself, and not a user. +pub struct ServerContext { + /// Weak reference to the node manager collection. + pub node_managers: NodeManagersRef, + /// Cache containing the subscriptions managed by the server. + pub subscriptions: Arc, + /// General server state and configuration. + pub info: Arc, + /// Global authenticator object. + pub authenticator: Arc, + pub type_tree: Arc>, +} + +/// This trait is a workaround for the lack of +/// dyn upcasting coercion. +pub trait IntoAnyArc { + fn into_any_arc(self: Arc) -> Arc; +} + +impl IntoAnyArc for T { + fn into_any_arc(self: Arc) -> Arc { + self + } +} + +/// Trait for a type that implements logic for responding to requests. +/// Implementations of this trait may make external calls for node information, +/// or do other complex tasks. +/// +/// Note that each request is passed to every node manager concurrently. +/// It is up to each node manager to avoid responding to requests for nodes +/// managed by a different node manager. +/// +/// Requests are spawned on the tokio thread pool. Avoid making blocking calls in +/// methods on this trait. If you need to do blocking work use `tokio::spawn_blocking`, +/// though you should use async IO as much as possible. +/// +/// For a simpler interface see InMemoryNodeManager, use this trait directly +/// if you need to control how all node information is stored. +#[allow(unused_variables)] +#[async_trait] +pub trait NodeManager: IntoAnyArc + Any { + /// Return whether this node manager owns the given node, this is used for + /// propagating service-level errors. + /// + /// If a service returns an error, all nodes it owns will get that error, + /// even if this is a cross node-manager request like Browse. + fn owns_node(&self, id: &NodeId) -> bool; + + /// Name of this node manager, for debug purposes. + fn name(&self) -> &str; + + /// Return whether this node manager owns events on the server. + /// The first node manager that returns true here will be called when + /// reading or updating historical server events. + fn owns_server_events(&self) -> bool { + false + } + + /// Return whether this node should handle requests to create a node + /// for the given parent ID. This is only called if no new node ID is + /// requested, otherwise owns_node is called on the requested node ID. + /// + /// Returning true here doesn't mean that creating the new node must + /// succeed, only that _if_ the parent node exists, this node manager + /// would be the one to create the requested node. + fn handle_new_node(&self, parent_id: &ExpandedNodeId) -> bool { + false + } + + /// Namespaces for a given user, used to populate the namespace array. + /// This being a method allows different users to see different namespaces. + fn namespaces_for_user(&self, context: &RequestContext) -> Vec; + + /// Perform any necessary loading of nodes, should populate the type tree if + /// needed. + async fn init(&self, type_tree: &mut TypeTree, context: ServerContext); + + /// Resolve a list of references given by a different node manager. + async fn resolve_external_references( + &self, + context: &RequestContext, + items: &mut [&mut ExternalReferenceRequest], + ) { + } + + // ATTRIBUTES + /// Execute the Read service. This should set results on the given nodes_to_read as needed. + async fn read( + &self, + context: &RequestContext, + max_age: f64, + timestamps_to_return: TimestampsToReturn, + nodes_to_read: &mut [&mut ReadNode], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Perform the history read raw modified service. This should write results + /// to the `nodes` list of type either `HistoryData` or `HistoryModifiedData` + async fn history_read_raw_modified( + &self, + context: &RequestContext, + details: &ReadRawModifiedDetails, + nodes: &mut [&mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + /// Perform the history read processed service. This should write results + /// to the `nodes` list of type `HistoryData`. + async fn history_read_processed( + &self, + context: &RequestContext, + details: &ReadProcessedDetails, + nodes: &mut [&mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + /// Perform the history read processed service. This should write results + /// to the `nodes` list of type `HistoryData`. + async fn history_read_at_time( + &self, + context: &RequestContext, + details: &ReadAtTimeDetails, + nodes: &mut [&mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + /// Perform the history read events service. This should write results + /// to the `nodes` list of type `HistoryEvent`. + async fn history_read_events( + &self, + context: &RequestContext, + details: &ReadEventDetails, + nodes: &mut [&mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + /// Perform the history read annotations data service. This should write + /// results to the `nodes` list of type `Annotation`. + async fn history_read_annotations( + &self, + context: &RequestContext, + details: &ReadAnnotationDataDetails, + nodes: &mut [&mut HistoryNode], + timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + /// Perform the write service. This should write results + /// to the `nodes_to_write` list. The default result is `BadNodeIdUnknown` + async fn write( + &self, + context: &RequestContext, + nodes_to_write: &mut [&mut WriteNode], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Perform the HistoryUpdate service. This should write result + /// status codes to the `nodes` list as appropriate. + async fn history_update( + &self, + context: &RequestContext, + nodes: &mut [&mut HistoryUpdateNode], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadHistoryOperationUnsupported) + } + + // VIEW + /// Perform the Browse or BrowseNext service. + async fn browse( + &self, + context: &RequestContext, + nodes_to_browse: &mut [BrowseNode], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Perform the translate browse paths to node IDs service. + async fn translate_browse_paths_to_node_ids( + &self, + context: &RequestContext, + nodes: &mut [&mut BrowsePathItem], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Perform the register nodes service. The default behavior for this service is to + /// do nothing and pretend the nodes were registered. + async fn register_nodes( + &self, + context: &RequestContext, + nodes: &mut [&mut RegisterNodeItem], + ) -> Result<(), StatusCode> { + // Most servers don't actually do anything with node registration, it is reasonable + // to just pretend the nodes are registered. + for node in nodes { + node.set_registered(true); + } + + Ok(()) + } + + /// Perform the unregister nodes service. The default behavior for this service is to + /// do nothing. + async fn unregister_nodes( + &self, + context: &RequestContext, + _nodes: &[&NodeId], + ) -> Result<(), StatusCode> { + // Again, just do nothing + Ok(()) + } + + /// Prepare for monitored item creation, the node manager must take action to + /// sample data for each produced monitored item, according to the parameters. + /// Monitored item parameters have already been revised according to server limits, + /// but the node manager is allowed to further revise sampling interval. + /// + /// The node manager should also read the initial value of each monitored item, + /// and set the status code if monitored item creation failed. + /// + /// The node manager is responsible for tracking the subscription no matter what + /// the value of monitoring_mode is, but should only sample if monitoring_mode + /// is not Disabled. + async fn create_monitored_items( + &self, + context: &RequestContext, + items: &mut [&mut CreateMonitoredItem], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Modify monitored items. This method is purely informative for the node manager, + /// to let it modify sampling intervals, apply a new filter, or similar. + /// + /// Node managers are not required to take any action here, and this method is not + /// allowed to fail. + async fn modify_monitored_items( + &self, + context: &RequestContext, + items: &[&MonitoredItemUpdateRef], + ) { + } + + /// Modify monitored items. This method is purely informative for the node manager, + /// to let it pause or resume sampling. Note that this should _not_ delete context + /// stored from `create_monitored_items`, since it may be called again to resume sampling. + /// + /// The node manager should sample so long as monitoring mode is not `Disabled`, the difference + /// between `Reporting` and `Sampling` is handled by the server. + /// + /// Node managers are not required to take any action here, and this method is not + /// allowed to fail. + async fn set_monitoring_mode( + &self, + context: &RequestContext, + mode: MonitoringMode, + items: &[&MonitoredItemRef], + ) { + } + + /// Delete monitored items. This method is purely informative for the node manager, + /// to let it stop sampling, or similar. + /// + /// Node managers are not required to take any action here, and this method is not + /// allowed to fail. Most node managers that implement subscriptions will want to do + /// something with this. + /// + /// This method may be given monitored items that were never created, or were + /// created for a different node manager. Attempting to delete a monitored item + /// that does not exist is handled elsewhere and should be a no-op here. + async fn delete_monitored_items(&self, context: &RequestContext, items: &[&MonitoredItemRef]) {} + + /// Perform a query on the address space. + /// + /// All node managers must be able to query in order for the + /// server to support querying. + /// + /// The node manager should set a continuation point if it reaches + /// limits, but is responsible for not exceeding max_data_sets_to_return + /// and max_references_to_return. + async fn query( + &self, + context: &RequestContext, + request: &mut QueryRequest, + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Call a list of methods. + /// + /// The node manager should validate the method arguments and set + /// an output error if the arguments are invalid. + /// + /// The node manager _must_ ensure that argument output lists and + /// method output lists are of the correct length according to the + /// method definition. + async fn call( + &self, + context: &RequestContext, + methods_to_call: &mut [&mut MethodCall], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Add a list of nodes. + /// + /// This should create the nodes, or set a failed status as appropriate. + /// If a node was created, the status should be set to Good. + async fn add_nodes( + &self, + context: &RequestContext, + nodes_to_add: &mut [&mut AddNodeItem], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Add a list of references. + /// + /// This will be given all references where the source _or_ + /// target belongs to this node manager. A reference is + /// considered successfully added if either source_status + /// or target_status are Good. + /// + /// If you want to explicitly set the reference to failed, + /// set both source and target status. Note that it may + /// already have been added in a different node manager, you are + /// responsible for any cleanup if you do this. + async fn add_references( + &self, + context: &RequestContext, + references_to_add: &mut [&mut AddReferenceItem], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Delete a list of nodes. + /// + /// This will be given all nodes that belong to this node manager. + /// + /// Typically, you also want to implement `delete_node_references` if + /// there are other node managers that support deletes. + async fn delete_nodes( + &self, + context: &RequestContext, + nodes_to_delete: &mut [&mut DeleteNodeItem], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } + + /// Delete references for the given list of nodes. + /// The node manager should respect `delete_target_references`. + /// + /// This is not allowed to fail, you should make it impossible to delete + /// nodes with immutable references. + async fn delete_node_references( + &self, + context: &RequestContext, + to_delete: &[&DeleteNodeItem], + ) { + } + + /// Delete a list of references. + /// + /// This will be given all references where the source _or_ + /// target belongs to this node manager. A reference is + /// considered successfully added if either source_status + /// or target_status are Good. + /// + /// If you want to explicitly set the reference to failed, + /// set both source and target status. Note that it may + /// already have been deleted in a different node manager, you are + /// responsible for any cleanup if you do this. + async fn delete_references( + &self, + context: &RequestContext, + references_to_delete: &mut [&mut DeleteReferenceItem], + ) -> Result<(), StatusCode> { + Err(StatusCode::BadServiceUnsupported) + } +} diff --git a/lib/src/server/node_manager/monitored_items.rs b/lib/src/server/node_manager/monitored_items.rs new file mode 100644 index 000000000..04da54c0b --- /dev/null +++ b/lib/src/server/node_manager/monitored_items.rs @@ -0,0 +1,95 @@ +use crate::{ + server::MonitoredItemHandle, + types::{AttributeId, MonitoredItemModifyResult, NodeId, StatusCode}, +}; + +#[derive(Debug, Clone)] +/// Reference to a monitored item in the server subscription cache. +pub struct MonitoredItemRef { + handle: MonitoredItemHandle, + node_id: NodeId, + attribute: AttributeId, +} + +impl<'a> MonitoredItemRef { + pub(crate) fn new( + handle: MonitoredItemHandle, + node_id: NodeId, + attribute: AttributeId, + ) -> Self { + Self { + handle, + node_id, + attribute, + } + } + + /// Monitored item handle, uniquely identifies a monitored item. + pub fn handle(&self) -> MonitoredItemHandle { + self.handle + } + + /// Node ID of the monitored item. + pub fn node_id(&self) -> &NodeId { + &self.node_id + } + + /// Attribute ID of the monitored item. + pub fn attribute(&self) -> AttributeId { + self.attribute + } +} + +#[derive(Debug, Clone)] +/// Reference to a monitored item with information from an update operation. +pub struct MonitoredItemUpdateRef { + handle: MonitoredItemHandle, + node_id: NodeId, + attribute: AttributeId, + update: MonitoredItemModifyResult, +} + +impl<'a> MonitoredItemUpdateRef { + pub(crate) fn new( + handle: MonitoredItemHandle, + node_id: NodeId, + attribute: AttributeId, + update: MonitoredItemModifyResult, + ) -> Self { + Self { + handle, + node_id, + attribute, + update, + } + } + + /// Monitored item handle, uniquely identifies a monitored item. + pub fn handle(&self) -> MonitoredItemHandle { + self.handle + } + + /// Node ID of the monitored item. + pub fn node_id(&self) -> &NodeId { + &self.node_id + } + + /// Attribute ID of the monitored item. + pub fn attribute(&self) -> AttributeId { + self.attribute + } + + /// Result of the monitored item update. + pub fn update(&self) -> &MonitoredItemModifyResult { + &self.update + } + + /// Status code of the update. + pub fn status_code(&self) -> StatusCode { + self.update.status_code + } + + pub(crate) fn into_result(self) -> MonitoredItemModifyResult { + self.update + } +} diff --git a/lib/src/server/node_manager/node_management.rs b/lib/src/server/node_manager/node_management.rs new file mode 100644 index 000000000..5f567faa6 --- /dev/null +++ b/lib/src/server/node_manager/node_management.rs @@ -0,0 +1,371 @@ +use crate::types::{ + AddNodeAttributes, AddNodesItem, AddNodesResult, AddReferencesItem, DecodingOptions, + DeleteNodesItem, DeleteReferencesItem, ExpandedNodeId, NodeClass, NodeId, QualifiedName, + StatusCode, +}; + +#[derive(Debug, Clone)] +/// Container for a single node being added in an `AddNode` service call. +pub struct AddNodeItem { + parent_node_id: ExpandedNodeId, + reference_type_id: NodeId, + requested_new_node_id: NodeId, + browse_name: QualifiedName, + node_class: NodeClass, + node_attributes: AddNodeAttributes, + type_definition_id: ExpandedNodeId, + + result_node_id: NodeId, + status: StatusCode, +} + +impl AddNodeItem { + pub(crate) fn new(item: AddNodesItem, options: &DecodingOptions) -> Self { + let mut status = StatusCode::BadNotSupported; + let attributes = + match AddNodeAttributes::from_extension_object(item.node_attributes, options) { + Ok(attr) => attr, + Err(e) => { + status = e; + AddNodeAttributes::None + } + }; + if item.requested_new_node_id.server_index != 0 { + status = StatusCode::BadNodeIdRejected; + } + + Self::validate_attributes(item.node_class, &attributes, &mut status); + + if item.reference_type_id.is_null() { + status = StatusCode::BadReferenceTypeIdInvalid; + } + if item.parent_node_id.is_null() { + status = StatusCode::BadParentNodeIdInvalid; + } + + match (item.node_class, item.type_definition.is_null()) { + (NodeClass::Object | NodeClass::Variable, true) => { + status = StatusCode::BadTypeDefinitionInvalid + } + (NodeClass::Object | NodeClass::Variable, false) => (), + (_, false) => status = StatusCode::BadTypeDefinitionInvalid, + _ => (), + } + + Self { + parent_node_id: item.parent_node_id, + reference_type_id: item.reference_type_id, + requested_new_node_id: item.requested_new_node_id.node_id, + browse_name: item.browse_name, + node_class: item.node_class, + node_attributes: attributes, + type_definition_id: item.type_definition, + result_node_id: NodeId::null(), + status, + } + } + + fn validate_attributes( + node_class: NodeClass, + attributes: &AddNodeAttributes, + status: &mut StatusCode, + ) { + match (node_class, attributes) { + (NodeClass::Object, AddNodeAttributes::Object(_)) + | (NodeClass::Variable, AddNodeAttributes::Variable(_)) + | (NodeClass::Method, AddNodeAttributes::Method(_)) + | (NodeClass::ObjectType, AddNodeAttributes::ObjectType(_)) + | (NodeClass::VariableType, AddNodeAttributes::VariableType(_)) + | (NodeClass::ReferenceType, AddNodeAttributes::ReferenceType(_)) + | (NodeClass::DataType, AddNodeAttributes::DataType(_)) + | (NodeClass::View, AddNodeAttributes::View(_)) => {} + (NodeClass::Unspecified, _) => *status = StatusCode::BadNodeClassInvalid, + (_, AddNodeAttributes::None | AddNodeAttributes::Generic(_)) => {} + _ => *status = StatusCode::BadNodeAttributesInvalid, + } + } + + /// Set the result of the operation. `node_id` is the node ID of the created node. + pub fn set_result(&mut self, node_id: NodeId, status: StatusCode) { + self.result_node_id = node_id; + self.status = status; + } + + /// The requested parent node ID. + pub fn parent_node_id(&self) -> &ExpandedNodeId { + &self.parent_node_id + } + + /// The requested reference type ID. + pub fn reference_type_id(&self) -> &NodeId { + &self.reference_type_id + } + + /// The requested new node ID. May be null, in which case the node manager picks the new + /// node ID. + pub fn requested_new_node_id(&self) -> &NodeId { + &self.requested_new_node_id + } + + /// Requested browse name of the new node. + pub fn browse_name(&self) -> &QualifiedName { + &self.browse_name + } + + /// Requested node class of the new node. + pub fn node_class(&self) -> NodeClass { + self.node_class + } + + /// Collection of requested attributes for the new node. + pub fn node_attributes(&self) -> &AddNodeAttributes { + &self.node_attributes + } + + /// Requested type definition ID. + pub fn type_definition_id(&self) -> &ExpandedNodeId { + &self.type_definition_id + } + + /// Current result status code. + pub fn status(&self) -> StatusCode { + self.status + } + + pub(crate) fn into_result(self) -> AddNodesResult { + AddNodesResult { + status_code: self.status, + added_node_id: self.result_node_id, + } + } +} + +#[derive(Debug, Clone)] +/// Container for a single reference being added in an `AddReferences` service call. +pub struct AddReferenceItem { + source_node_id: NodeId, + reference_type_id: NodeId, + target_node_id: ExpandedNodeId, + is_forward: bool, + + source_status: StatusCode, + target_status: StatusCode, +} + +impl AddReferenceItem { + pub(crate) fn new(item: AddReferencesItem) -> Self { + let mut status = StatusCode::BadNotSupported; + if item.source_node_id.is_null() { + status = StatusCode::BadSourceNodeIdInvalid; + } + if item.target_node_id.is_null() { + status = StatusCode::BadTargetNodeIdInvalid; + } + if item.reference_type_id.is_null() { + status = StatusCode::BadReferenceTypeIdInvalid; + } + if !item.target_server_uri.is_null() || item.target_node_id.server_index != 0 { + status = StatusCode::BadReferenceLocalOnly; + } + Self { + source_node_id: item.source_node_id, + reference_type_id: item.reference_type_id, + target_node_id: item.target_node_id, + is_forward: item.is_forward, + source_status: status, + target_status: status, + } + } + + /// Requested source node ID. + pub fn source_node_id(&self) -> &NodeId { + &self.source_node_id + } + + /// Requested reference type ID. + pub fn reference_type_id(&self) -> &NodeId { + &self.reference_type_id + } + + /// Requested target node ID. + pub fn target_node_id(&self) -> &ExpandedNodeId { + &self.target_node_id + } + + /// Current result status, as a summary of source status and target status. + pub(crate) fn result_status(&self) -> StatusCode { + if self.source_status.is_good() { + return self.source_status; + } + if self.target_status.is_good() { + return self.target_status; + } + self.source_status + } + + /// Set the result of this operation for the _source_ end of the reference. + pub fn set_source_result(&mut self, status: StatusCode) { + self.source_status = status; + } + + /// Set the result of this operation for the _target_ end of the reference. + pub fn set_target_result(&mut self, status: StatusCode) { + self.target_status = status; + } + + /// Requested reference direction. + pub fn is_forward(&self) -> bool { + self.is_forward + } + + /// Current target status. + pub fn target_status(&self) -> StatusCode { + self.target_status + } + + /// Current source status. + pub fn source_status(&self) -> StatusCode { + self.source_status + } +} + +#[derive(Debug)] +/// Container for a single item in a `DeleteNodes` service call. +pub struct DeleteNodeItem { + node_id: NodeId, + delete_target_references: bool, + + status: StatusCode, +} + +impl DeleteNodeItem { + pub(crate) fn new(item: DeleteNodesItem) -> Self { + let mut status = StatusCode::BadNodeIdUnknown; + if item.node_id.is_null() { + status = StatusCode::BadNodeIdInvalid; + } + + Self { + node_id: item.node_id, + delete_target_references: item.delete_target_references, + status, + } + } + + /// Current status of the operation. + pub fn status(&self) -> StatusCode { + self.status + } + + /// Set the result of the node deletion operation. + pub fn set_result(&mut self, status: StatusCode) { + self.status = status; + } + + /// Whether the request should delete references that point to this node or not. + pub fn delete_target_references(&self) -> bool { + self.delete_target_references + } + + /// Node ID to delete. + pub fn node_id(&self) -> &NodeId { + &self.node_id + } +} + +#[derive(Debug)] +/// Container for a single reference being deleted in an `DeleteReferences` service call. +pub struct DeleteReferenceItem { + source_node_id: NodeId, + reference_type_id: NodeId, + is_forward: bool, + target_node_id: ExpandedNodeId, + delete_bidirectional: bool, + + source_status: StatusCode, + target_status: StatusCode, +} + +impl DeleteReferenceItem { + pub(crate) fn new(item: DeleteReferencesItem) -> Self { + let mut status = StatusCode::BadNotSupported; + if item.source_node_id.is_null() { + status = StatusCode::BadSourceNodeIdInvalid; + } + if item.target_node_id.is_null() { + status = StatusCode::BadTargetNodeIdInvalid; + } + if item.reference_type_id.is_null() { + status = StatusCode::BadReferenceTypeIdInvalid; + } + if item.target_node_id.server_index != 0 { + status = StatusCode::BadReferenceLocalOnly; + } + + Self { + source_node_id: item.source_node_id, + reference_type_id: item.reference_type_id, + is_forward: item.is_forward, + target_node_id: item.target_node_id, + delete_bidirectional: item.delete_bidirectional, + + source_status: status, + target_status: status, + } + } + + /// Source node ID of the reference being deleted. + pub fn source_node_id(&self) -> &NodeId { + &self.source_node_id + } + + /// Reference type ID of the reference being deleted. + pub fn reference_type_id(&self) -> &NodeId { + &self.reference_type_id + } + + /// Target node ID of the reference being deleted. + pub fn target_node_id(&self) -> &ExpandedNodeId { + &self.target_node_id + } + + pub(crate) fn result_status(&self) -> StatusCode { + if self.source_status.is_good() { + return self.source_status; + } + if self.target_status.is_good() { + return self.target_status; + } + self.source_status + } + + /// Set the result of this operation for the _source_ end of the reference. + pub fn set_source_result(&mut self, status: StatusCode) { + self.source_status = status; + } + + /// Set the result of this operation for the _target_ end of the reference. + pub fn set_target_result(&mut self, status: StatusCode) { + self.target_status = status; + } + + /// Direction of the reference being deleted. + pub fn is_forward(&self) -> bool { + self.is_forward + } + + /// Current target status. + pub fn target_status(&self) -> StatusCode { + self.target_status + } + + /// Current source status. + pub fn source_status(&self) -> StatusCode { + self.source_status + } + + /// Whether to delete the reference in both directions. + pub fn delete_bidirectional(&self) -> bool { + self.delete_bidirectional + } +} diff --git a/lib/src/server/node_manager/query.rs b/lib/src/server/node_manager/query.rs new file mode 100644 index 000000000..4d05623a7 --- /dev/null +++ b/lib/src/server/node_manager/query.rs @@ -0,0 +1,282 @@ +use crate::{ + crypto::random, + server::{ + session::{ + continuation_points::{ContinuationPoint, EmptyContinuationPoint}, + instance::Session, + }, + ParsedContentFilter, + }, + types::{ + AttributeId, ByteString, ExpandedNodeId, NodeTypeDescription, NumericRange, ParsingResult, + QueryDataDescription, QueryDataSet, RelativePath, StatusCode, + }, +}; + +pub(crate) struct QueryContinuationPoint { + pub node_manager_index: usize, + pub continuation_point: ContinuationPoint, + pub id: ByteString, + + node_types: Vec, + filter: ParsedContentFilter, + max_data_sets_to_return: usize, + max_references_to_return: usize, +} + +#[derive(Debug)] +/// Parsed and validated version of the OPC-UA `QueryDataDescription`. +pub struct ParsedQueryDataDescription { + /// The relative path to the node being referenced. + pub relative_path: RelativePath, + /// Attribute ID of the attribute being referenced. + pub attribute_id: AttributeId, + /// Index range for the read. + pub index_range: NumericRange, +} + +impl ParsedQueryDataDescription { + pub(crate) fn parse(desc: QueryDataDescription) -> Result { + let attribute_id = AttributeId::from_u32(desc.attribute_id) + .map_err(|_| StatusCode::BadAttributeIdInvalid)?; + let index_range = desc + .index_range + .as_ref() + .parse::() + .map_err(|_| StatusCode::BadIndexRangeInvalid)?; + + Ok(Self { + relative_path: desc.relative_path, + attribute_id, + index_range, + }) + } +} + +#[derive(Debug)] +/// Parsed and validated version of the OPC-UA `NodeTypeDescription`. +pub struct ParsedNodeTypeDescription { + /// Type definition to query. + pub type_definition_node: ExpandedNodeId, + /// Whether to include sub types of the type definition. + pub include_sub_types: bool, + /// List of values to return. + pub data_to_return: Vec, +} + +impl ParsedNodeTypeDescription { + pub(crate) fn parse(desc: NodeTypeDescription) -> (ParsingResult, Result) { + let num_descs = desc + .data_to_return + .as_ref() + .map(|d| d.len()) + .unwrap_or_default(); + let mut desc_results = Vec::with_capacity(num_descs); + let mut final_descs = Vec::with_capacity(num_descs); + for child in desc.data_to_return.into_iter().flatten() { + match ParsedQueryDataDescription::parse(child) { + Ok(c) => { + desc_results.push(StatusCode::Good); + final_descs.push(c); + } + Err(e) => desc_results.push(e), + } + } + + if final_descs.len() < num_descs { + return ( + ParsingResult { + status_code: StatusCode::BadInvalidArgument, + data_status_codes: Some(desc_results), + data_diagnostic_infos: None, + }, + Err(StatusCode::BadInvalidArgument), + ); + } + + ( + ParsingResult { + status_code: StatusCode::Good, + data_diagnostic_infos: None, + data_status_codes: None, + }, + Ok(ParsedNodeTypeDescription { + type_definition_node: desc.type_definition_node, + include_sub_types: desc.include_sub_types, + data_to_return: final_descs, + }), + ) + } +} + +/// Container for a `Query` service call. +pub struct QueryRequest { + node_types: Vec, + filter: ParsedContentFilter, + max_data_sets_to_return: usize, + max_references_to_return: usize, + continuation_point: Option, + next_continuation_point: Option, + status: StatusCode, + node_manager_index: usize, + + data_sets: Vec, +} + +impl QueryRequest { + pub(crate) fn new( + node_types: Vec, + filter: ParsedContentFilter, + max_data_sets_to_return: usize, + max_references_to_return: usize, + ) -> Self { + Self { + node_types, + filter, + max_data_sets_to_return, + max_references_to_return, + continuation_point: None, + next_continuation_point: None, + data_sets: Vec::new(), + status: StatusCode::Good, + node_manager_index: 0, + } + } + + pub(crate) fn from_continuation_point(point: QueryContinuationPoint) -> Self { + Self { + node_types: point.node_types, + filter: point.filter, + max_data_sets_to_return: point.max_data_sets_to_return, + max_references_to_return: point.max_references_to_return, + continuation_point: Some(point.continuation_point), + next_continuation_point: None, + status: StatusCode::Good, + data_sets: Vec::new(), + node_manager_index: point.node_manager_index, + } + } + + /// Data sets to query. + pub fn data_sets(&self) -> &[QueryDataSet] { + &self.data_sets + } + + /// Continuation point, if present. + pub fn continuation_point(&self) -> Option<&ContinuationPoint> { + self.continuation_point.as_ref() + } + + /// Maximum number of references to return. + pub fn max_references_to_return(&self) -> usize { + self.max_references_to_return + } + + /// Maximum number of data sets to return. + pub fn max_data_sets_to_return(&self) -> usize { + self.max_data_sets_to_return + } + + /// Content filter that the results must match. + pub fn filter(&self) -> &ParsedContentFilter { + &self.filter + } + + /// Node types to query. + pub fn node_types(&self) -> &[ParsedNodeTypeDescription] { + &self.node_types + } + + /// Space for data sets left. + pub fn remaining_data_sets(&self) -> usize { + if self.data_sets.len() >= self.max_data_sets_to_return { + 0 + } else { + self.max_data_sets_to_return - self.data_sets.len() + } + } + + /// Whether this query is completed. + pub fn is_completed(&self) -> bool { + self.remaining_data_sets() == 0 || self.next_continuation_point.is_some() + } + + pub(crate) fn into_result( + self, + node_manager_index: usize, + node_manager_count: usize, + session: &mut Session, + ) -> (Vec, ByteString, StatusCode) { + // If the status is bad, assume the results are suspect and return nothing. + if self.status.is_bad() { + return (Vec::new(), ByteString::null(), self.status); + } + // There may be a continuation point defined for the current node manager, + // in that case return that. There is also a corner case here where + // remaining == 0 and there is no continuation point. + // In this case we need to pass an empty continuation point + // to the next node manager. + let inner = self + .next_continuation_point + .map(|c| (c, node_manager_index)) + .or_else(|| { + if node_manager_index < node_manager_count - 1 { + Some(( + ContinuationPoint::new(Box::new(EmptyContinuationPoint)), + node_manager_index + 1, + )) + } else { + None + } + }); + + let continuation_point = inner.map(|(p, node_manager_index)| QueryContinuationPoint { + node_manager_index, + continuation_point: p, + id: random::byte_string(6), + node_types: self.node_types, + filter: self.filter, + max_data_sets_to_return: self.max_data_sets_to_return, + max_references_to_return: self.max_references_to_return, + }); + + let mut status = self.status; + let mut cp_id = continuation_point + .as_ref() + .map(|c| c.id.clone()) + .unwrap_or_default(); + + // If we're out of continuation points, the correct response is to not store it, and + // set the status code to BadNoContinuationPoints. + if let Some(c) = continuation_point { + if session.add_query_continuation_point(&cp_id, c).is_err() { + status = StatusCode::BadNoContinuationPoints; + cp_id = ByteString::null(); + } + } + + (self.data_sets, cp_id, status) + } + + /// Current result status code. + pub fn status(&self) -> StatusCode { + self.status + } + + /// Set the general result of this query. + pub fn set_status(&mut self, status: StatusCode) { + self.status = status; + } + + /// Set the next continuation point for this query. + pub fn set_next_continuation_point( + &mut self, + next_continuation_point: Option, + ) { + self.next_continuation_point = next_continuation_point; + } + + pub(crate) fn node_manager_index(&self) -> usize { + self.node_manager_index + } +} diff --git a/lib/src/server/node_manager/type_tree.rs b/lib/src/server/node_manager/type_tree.rs new file mode 100644 index 000000000..e76e8b45d --- /dev/null +++ b/lib/src/server/node_manager/type_tree.rs @@ -0,0 +1,228 @@ +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet}, +}; + +use crate::types::{ + DataTypeId, NodeClass, NodeId, ObjectTypeId, QualifiedName, ReferenceTypeId, VariableTypeId, +}; + +use super::build::NamespaceMap; + +#[derive(PartialEq, Eq, Hash)] +struct TypePropertyKey { + path: Vec, +} +// NOTE: This implementation means that TypePropertyKey must have the same +// hash as an equivalent &[QualifiedName] +impl Borrow<[QualifiedName]> for TypePropertyKey { + fn borrow(&self) -> &[QualifiedName] { + &self.path + } +} + +#[derive(Clone, Debug)] +/// A single property of a type in the type tree. +pub struct TypeProperty { + /// Node ID of the property. + pub node_id: NodeId, + /// Node class of the property. + pub node_class: NodeClass, +} + +#[derive(Clone, Debug)] +/// Inverse reference to a type from a property. +pub struct TypePropertyInverseRef { + /// Node ID of the type. + pub type_id: NodeId, + /// Path to the property. + pub path: Vec, +} + +/// Type managing the types in an OPC-UA server. +/// The server needs to know about all available types, to handle things like +/// event filters, browse filtering, etc. +/// +/// Each node manager is responsible for populating the type tree with +/// its types. +pub struct TypeTree { + nodes: HashMap, + subtypes_by_source: HashMap>, + subtypes_by_target: HashMap, + property_to_type: HashMap, + type_properties: HashMap>, + namespaces: NamespaceMap, +} + +#[derive(Clone, Debug)] +/// A node in the type tree. +pub enum TypeTreeNode<'a> { + /// A registered type. + Type(NodeClass), + /// A property of a type. + Property(&'a TypePropertyInverseRef), +} + +impl TypeTree { + /// Return `true` if `child` is a subtype of `ancestor`, or if `child` and + /// `ancestor` is the same node, i.e. subtype in the OPC-UA sense. + pub fn is_subtype_of(&self, child: &NodeId, ancestor: &NodeId) -> bool { + let mut node = child; + loop { + if node == ancestor { + break true; + } + + let Some(class) = self.nodes.get(node) else { + break false; + }; + + if !matches!( + class, + NodeClass::DataType + | NodeClass::ObjectType + | NodeClass::ReferenceType + | NodeClass::VariableType + ) { + break false; + } + + match self.subtypes_by_target.get(node) { + Some(n) => node = n, + None => break false, + } + } + } + + /// Get a reference to a node in the type tree. + pub fn get_node<'a>(&'a self, node: &NodeId) -> Option> { + if let Some(n) = self.nodes.get(node) { + return Some(TypeTreeNode::Type(*n)); + } + if let Some(p) = self.property_to_type.get(node) { + return Some(TypeTreeNode::Property(p)); + } + None + } + + /// Get a type from the type tree. + pub fn get(&self, node: &NodeId) -> Option { + self.nodes.get(node).cloned() + } + + /// Create a new type tree with just the root nodes added. + pub fn new() -> Self { + let mut type_tree = Self { + nodes: HashMap::new(), + subtypes_by_source: HashMap::new(), + subtypes_by_target: HashMap::new(), + type_properties: HashMap::new(), + property_to_type: HashMap::new(), + namespaces: NamespaceMap::new(), + }; + type_tree + .namespaces + .add_namespace("http://opcfoundation.org/UA/"); + type_tree + .nodes + .insert(ObjectTypeId::BaseObjectType.into(), NodeClass::ObjectType); + type_tree + .nodes + .insert(ReferenceTypeId::References.into(), NodeClass::ReferenceType); + type_tree.nodes.insert( + VariableTypeId::BaseVariableType.into(), + NodeClass::VariableType, + ); + type_tree + .nodes + .insert(DataTypeId::BaseDataType.into(), NodeClass::DataType); + type_tree + } + + /// Add a new type to the type tree. + pub fn add_type_node(&mut self, id: &NodeId, parent: &NodeId, node_class: NodeClass) { + self.nodes.insert(id.clone(), node_class); + self.subtypes_by_source + .entry(parent.clone()) + .or_default() + .insert(id.clone()); + self.subtypes_by_target.insert(id.clone(), parent.clone()); + } + + /// Add a new property to the type tree. + pub fn add_type_property( + &mut self, + id: &NodeId, + typ: &NodeId, + path: &[&QualifiedName], + node_class: NodeClass, + ) { + let props = match self.type_properties.get_mut(typ) { + Some(x) => x, + None => self.type_properties.entry(typ.clone()).or_default(), + }; + + let path_owned: Vec<_> = path.iter().map(|n| (*n).to_owned()).collect(); + + props.insert( + TypePropertyKey { + path: path_owned.clone(), + }, + TypeProperty { + node_class, + node_id: id.clone(), + }, + ); + + self.property_to_type.insert( + id.clone(), + TypePropertyInverseRef { + type_id: typ.clone(), + path: path_owned, + }, + ); + } + + /// Find a property by browse and type ID. + pub fn find_type_prop_by_browse_path( + &self, + type_id: &NodeId, + path: &[QualifiedName], + ) -> Option<&TypeProperty> { + self.type_properties.get(type_id).and_then(|p| p.get(path)) + } + + /// Remove a node from the type tree. + pub fn remove(&mut self, node_id: &NodeId) -> bool { + if self.nodes.remove(node_id).is_some() { + let props = self.type_properties.remove(node_id); + if let Some(props) = props { + for prop in props.values() { + self.property_to_type.remove(&prop.node_id); + } + } + if let Some(parent) = self.subtypes_by_target.remove(node_id) { + if let Some(types) = self.subtypes_by_source.get_mut(&parent) { + types.remove(node_id); + } + } + return true; + } + if let Some(prop) = self.property_to_type.remove(node_id) { + let props = self.type_properties.get_mut(&prop.type_id); + if let Some(props) = props { + props.remove(&prop.path as &[QualifiedName]); + } + return true; + } + false + } + + pub fn namespaces(&self) -> &NamespaceMap { + &self.namespaces + } + + pub fn namespaces_mut(&mut self) -> &mut NamespaceMap { + &mut self.namespaces + } +} diff --git a/lib/src/server/node_manager/utils/mod.rs b/lib/src/server/node_manager/utils/mod.rs new file mode 100644 index 000000000..b897fa6be --- /dev/null +++ b/lib/src/server/node_manager/utils/mod.rs @@ -0,0 +1,7 @@ +mod opaque_node_id; +mod operations; +mod sync_sampler; + +pub use opaque_node_id::*; +pub use operations::get_node_metadata; +pub use sync_sampler::SyncSampler; diff --git a/lib/src/server/node_manager/utils/opaque_node_id.rs b/lib/src/server/node_manager/utils/opaque_node_id.rs new file mode 100644 index 000000000..e185a09c8 --- /dev/null +++ b/lib/src/server/node_manager/utils/opaque_node_id.rs @@ -0,0 +1,20 @@ +//! Utils for working with opaque node IDs containing JSON data. + +use crate::types::{ByteString, Identifier, NodeId}; +use serde::{de::DeserializeOwned, Serialize}; + +pub fn as_opaque_node_id(value: &T, namespace: u16) -> Option { + let v = serde_json::to_vec(&value).ok()?; + Some(NodeId { + namespace, + identifier: Identifier::ByteString(ByteString { value: Some(v) }), + }) +} + +pub fn from_opaque_node_id(id: &NodeId) -> Option { + let v = match &id.identifier { + Identifier::ByteString(b) => b.value.as_ref()?, + _ => return None, + }; + serde_json::from_slice(&v).ok() +} diff --git a/lib/src/server/node_manager/utils/operations.rs b/lib/src/server/node_manager/utils/operations.rs new file mode 100644 index 000000000..a5ae600da --- /dev/null +++ b/lib/src/server/node_manager/utils/operations.rs @@ -0,0 +1,28 @@ +use crate::{ + server::node_manager::{ + view::{ExternalReferenceRequest, NodeMetadata}, + NodeManagerCollection, RequestContext, + }, + types::{BrowseDescriptionResultMask, NodeId}, +}; + +pub async fn get_node_metadata( + context: &RequestContext, + node_managers: &impl NodeManagerCollection, + ids: &[NodeId], +) -> Vec> { + let mut reqs: Vec<_> = ids + .iter() + .map(|n| ExternalReferenceRequest::new(n, BrowseDescriptionResultMask::all())) + .collect(); + for mgr in node_managers.iter_node_managers() { + let mut owned: Vec<_> = reqs + .iter_mut() + .filter(|n| mgr.owns_node(n.node_id())) + .collect(); + + mgr.resolve_external_references(context, &mut owned).await; + } + + reqs.into_iter().map(|r| r.into_inner()).collect() +} diff --git a/lib/src/server/node_manager/utils/sync_sampler.rs b/lib/src/server/node_manager/utils/sync_sampler.rs new file mode 100644 index 000000000..488b4c317 --- /dev/null +++ b/lib/src/server/node_manager/utils/sync_sampler.rs @@ -0,0 +1,199 @@ +use std::{ + collections::HashMap, + sync::Arc, + time::{Duration, Instant}, +}; + +use tokio_util::sync::{CancellationToken, DropGuard}; + +use crate::{ + server::{MonitoredItemHandle, SubscriptionCache}, + sync::Mutex, + types::{AttributeId, DataValue, MonitoringMode, NodeId}, +}; + +struct ItemRef { + mode: MonitoringMode, + sampling_interval: Duration, +} + +struct SamplerItem { + sampler: Box Option + Send>, + sampling_interval: Duration, + last_sample: Instant, + enabled: bool, + items: HashMap, +} + +impl SamplerItem { + pub fn refresh_values(&mut self) { + let mut interval = Duration::MAX; + let mut enabled = false; + for item in self.items.values() { + if item.mode != MonitoringMode::Disabled { + if interval > item.sampling_interval { + interval = item.sampling_interval; + } + enabled = true; + } + } + self.sampling_interval = interval; + self.enabled = enabled; + if self.last_sample > (Instant::now() + self.sampling_interval) { + self.last_sample = Instant::now() + self.sampling_interval; + } + } +} + +/// Utility for periodically sampling a list of nodes/attributes. +/// When using this you should call `run` to start the sampler once you have access +/// to the server context. +pub struct SyncSampler { + samplers: Arc>>, + _guard: DropGuard, + token: CancellationToken, +} + +impl SyncSampler { + /// Create a new sync sampler. + pub fn new() -> Self { + let token = CancellationToken::new(); + Self { + samplers: Default::default(), + _guard: token.clone().drop_guard(), + token, + } + } + + /// Start the sampler. You should avoid calling this multiple times, typically + /// this is called in `build_nodes` or `init`. The sampler will automatically shut down + /// once it is dropped. + pub fn run(&self, interval: Duration, subscriptions: Arc) { + let token = self.token.clone(); + let samplers = self.samplers.clone(); + tokio::spawn(async move { + tokio::select! { + _ = Self::run_internal(samplers, interval, subscriptions) => {}, + _ = token.cancelled() => {} + } + }); + } + + /// Add a periodic sampler for a monitored item. + /// Note that if a sampler for the given nodeId/attributeId pair already exists, + /// no new sampler will be created. It is assumed that each nodeId/attributeId + /// pair has a single sampler function. + pub fn add_sampler( + &self, + node_id: NodeId, + attribute: AttributeId, + sampler: impl FnMut() -> Option + Send + 'static, + mode: MonitoringMode, + handle: MonitoredItemHandle, + sampling_interval: Duration, + ) { + let mut samplers = self.samplers.lock(); + let id = (node_id, attribute); + let sampler = samplers.entry(id).or_insert(SamplerItem { + sampler: Box::new(sampler), + sampling_interval, + last_sample: Instant::now(), + items: HashMap::new(), + enabled: false, + }); + sampler.items.insert( + handle, + ItemRef { + mode, + sampling_interval, + }, + ); + sampler.refresh_values(); + } + + /// Update the sample rate of a monitored item. + /// The smallest registered sampling interval for each nodeId/attributeId pair is + /// used. This is also bounded from below by the rate of the SyncSampler itself. + pub fn update_sampler( + &self, + node_id: &NodeId, + attribute: AttributeId, + handle: MonitoredItemHandle, + sampling_interval: Duration, + ) { + let mut samplers = self.samplers.lock(); + if let Some(sampler) = samplers.get_mut(&(node_id.clone(), attribute)) { + if let Some(item) = sampler.items.get_mut(&handle) { + item.sampling_interval = sampling_interval; + sampler.refresh_values(); + } + } + } + + /// Set the sampler mode for a node. + pub fn set_sampler_mode( + &self, + node_id: &NodeId, + attribute: AttributeId, + handle: MonitoredItemHandle, + mode: MonitoringMode, + ) { + let mut samplers = self.samplers.lock(); + if let Some(sampler) = samplers.get_mut(&(node_id.clone(), attribute)) { + if let Some(item) = sampler.items.get_mut(&handle) { + item.mode = mode; + sampler.refresh_values(); + } + } + } + + /// Remove a sampler. The actual sampler will only be fully removed once + /// all samplers for the attribute are gone. + pub fn remove_sampler( + &self, + node_id: &NodeId, + attribute: AttributeId, + handle: MonitoredItemHandle, + ) { + let mut samplers = self.samplers.lock(); + let id = (node_id.clone(), attribute); + + let Some(sampler) = samplers.get_mut(&id) else { + return; + }; + sampler.items.remove(&handle); + if sampler.items.is_empty() { + samplers.remove(&id); + } + } + + async fn run_internal( + samplers: Arc>>, + interval: Duration, + subscriptions: Arc, + ) { + let mut tick = tokio::time::interval(interval); + tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + loop { + tick.tick().await; + let now = Instant::now(); + let mut samplers = samplers.lock(); + let values = samplers + .iter_mut() + .filter_map(|((node_id, attribute), sampler)| { + if !sampler.enabled { + return None; + } + if sampler.last_sample + sampler.sampling_interval > now { + return None; + } + let Some(value) = (sampler.sampler)() else { + return None; + }; + sampler.last_sample = now; + Some((value, node_id, *attribute)) + }); + subscriptions.notify_data_change(values); + } + } +} diff --git a/lib/src/server/node_manager/view.rs b/lib/src/server/node_manager/view.rs new file mode 100644 index 000000000..337513a27 --- /dev/null +++ b/lib/src/server/node_manager/view.rs @@ -0,0 +1,718 @@ +use std::collections::{HashMap, VecDeque}; + +use crate::{ + crypto::random, + server::{ + address_space::ReferenceDirection, + session::{ + continuation_points::{ContinuationPoint, EmptyContinuationPoint}, + instance::Session, + }, + }, + types::{ + BrowseDescription, BrowseDescriptionResultMask, BrowseDirection, BrowsePath, BrowseResult, + ByteString, ExpandedNodeId, LocalizedText, NodeClass, NodeClassMask, NodeId, QualifiedName, + ReferenceDescription, RelativePathElement, StatusCode, + }, +}; + +use super::type_tree::TypeTree; + +#[derive(Debug, Clone)] +/// Object describing a node with sufficient context to construct +/// a `ReferenceDescription`. +pub struct NodeMetadata { + /// Node ID of the node. + pub node_id: ExpandedNodeId, + /// Type definition of the node. + pub type_definition: ExpandedNodeId, + /// Browse name of the node. + pub browse_name: QualifiedName, + /// Display name of the node. + pub display_name: LocalizedText, + /// Node class of the node. + pub node_class: NodeClass, +} + +#[derive(Debug)] +/// Container for a request for the metadata of a single node. +pub struct ExternalReferenceRequest { + node_id: NodeId, + result_mask: BrowseDescriptionResultMask, + item: Option, +} + +impl ExternalReferenceRequest { + /// Create a new external reference request from the node ID of the node being requested + /// and a result mask. + pub fn new(reference: &NodeId, result_mask: BrowseDescriptionResultMask) -> Self { + Self { + node_id: reference.clone(), + result_mask, + item: None, + } + } + + /// Node ID of the node being requested. + pub fn node_id(&self) -> &NodeId { + &self.node_id + } + + /// Set the result to a `NodeMetadata` object. + pub fn set(&mut self, reference: NodeMetadata) { + self.item = Some(reference); + } + + /// Get the mask for fields that should be included in the returned `NodeMetadata`. + pub fn result_mask(&self) -> BrowseDescriptionResultMask { + self.result_mask + } + + /// Consume this request and return the result. + pub fn into_inner(self) -> Option { + self.item + } +} + +#[derive(Debug)] +/// A reference pointing to some node in a different node manager. +pub struct ExternalReference { + target_id: ExpandedNodeId, + reference_type_id: NodeId, + direction: ReferenceDirection, +} + +impl ExternalReference { + /// Create a new external reference. + pub fn new( + target_id: ExpandedNodeId, + reference_type_id: NodeId, + direction: ReferenceDirection, + ) -> Self { + Self { + target_id, + reference_type_id, + direction, + } + } + + /// Create a reference description from this and a `NodeMetadata` object. + pub fn into_reference(self, meta: NodeMetadata) -> ReferenceDescription { + ReferenceDescription { + reference_type_id: self.reference_type_id, + is_forward: matches!(self.direction, ReferenceDirection::Forward), + node_id: self.target_id, + browse_name: meta.browse_name, + display_name: meta.display_name, + node_class: meta.node_class, + type_definition: meta.type_definition, + } + } +} + +#[derive(Debug)] +/// Result of adding a reference to a browse node. +pub enum AddReferenceResult { + /// The reference was added + Added, + /// The reference does not match the filters and was rejected + Rejected, + /// The reference does match the filters, but the node is full. + Full(ReferenceDescription), +} + +/// Container for a node being browsed and the result of the browse operation. +pub struct BrowseNode { + node_id: NodeId, + browse_direction: BrowseDirection, + reference_type_id: NodeId, + include_subtypes: bool, + node_class_mask: NodeClassMask, + result_mask: BrowseDescriptionResultMask, + references: Vec, + status_code: StatusCode, + // It is feasible to only keep one continuation point, by using the + // fact that node managers are sequential. If the first node manager is done reading, + // we move on to the next. + // All we need to do is keep track of which node manager made the last continuation point. + input_continuation_point: Option, + next_continuation_point: Option, + max_references_per_node: usize, + input_index: usize, + pub(crate) start_node_manager: usize, + + /// List of references to nodes not owned by the node manager that generated the + /// reference. These are resolved after the initial browse, and any excess is stored + /// in a continuation point. + external_references: Vec, +} + +pub(crate) struct BrowseContinuationPoint { + pub node_manager_index: usize, + pub continuation_point: ContinuationPoint, + pub id: ByteString, + + node_id: NodeId, + browse_direction: BrowseDirection, + reference_type_id: NodeId, + include_subtypes: bool, + node_class_mask: NodeClassMask, + result_mask: BrowseDescriptionResultMask, + pub(crate) max_references_per_node: usize, + + external_references: Vec, +} + +impl BrowseNode { + /// Create a new empty browse node + pub(crate) fn new( + description: BrowseDescription, + max_references_per_node: usize, + input_index: usize, + ) -> Self { + Self { + node_id: description.node_id, + browse_direction: description.browse_direction, + reference_type_id: description.reference_type_id, + include_subtypes: description.include_subtypes, + node_class_mask: NodeClassMask::from_bits_truncate(description.node_class_mask), + result_mask: BrowseDescriptionResultMask::from_bits_truncate(description.result_mask), + input_continuation_point: None, + next_continuation_point: None, + max_references_per_node, + references: Vec::new(), + status_code: StatusCode::BadNodeIdUnknown, + input_index, + start_node_manager: 0, + external_references: Vec::new(), + } + } + + pub(crate) fn from_continuation_point( + point: BrowseContinuationPoint, + input_index: usize, + ) -> Self { + Self { + node_id: point.node_id, + browse_direction: point.browse_direction, + reference_type_id: point.reference_type_id, + include_subtypes: point.include_subtypes, + node_class_mask: point.node_class_mask, + result_mask: point.result_mask, + references: Vec::new(), + status_code: StatusCode::BadNodeIdUnknown, + input_continuation_point: Some(point.continuation_point), + next_continuation_point: None, + max_references_per_node: point.max_references_per_node, + input_index, + start_node_manager: point.node_manager_index, + external_references: point.external_references, + } + } + + /// Set the response status, you should make sure to set this + /// if you own the node being browsed. It defaults to BadNodeIdUnknown. + pub fn set_status(&mut self, status: StatusCode) { + self.status_code = status; + } + + /// Get the continuation point created during the last request. + pub fn continuation_point(&self) -> Option<&T> { + self.input_continuation_point.as_ref().and_then(|c| c.get()) + } + + /// Get the continuation point created during the last request. + pub fn continuation_point_mut(&mut self) -> Option<&mut T> { + self.input_continuation_point + .as_mut() + .and_then(|c| c.get_mut()) + } + + /// Consume the continuation point created during the last request. + pub fn take_continuation_point(&mut self) -> Option> { + self.input_continuation_point.take().and_then(|c| c.take()) + } + + /// Set the continuation point that will be returned to the client. + pub fn set_next_continuation_point( + &mut self, + continuation_point: Box, + ) { + self.next_continuation_point = Some(ContinuationPoint::new(continuation_point)); + } + + /// Get the current number of added references. + pub fn result_len(&self) -> usize { + self.references.len() + } + + /// Get the number of references that can be added to this result before + /// stopping and returning a continuation point. + pub fn remaining(&self) -> usize { + if self.result_len() >= self.max_references_per_node { + 0 + } else { + self.max_references_per_node - self.result_len() + } + } + + /// Add a reference to the results list, without verifying that it is valid. + /// If you do this, you are responsible for validating filters, + /// and requested fields on each reference. + pub fn add_unchecked(&mut self, reference: ReferenceDescription) { + self.references.push(reference); + } + + /// Return `true` if nodes with the given reference type ID should be returned. + pub fn allows_reference_type(&self, ty: &NodeId, type_tree: &TypeTree) -> bool { + if self.reference_type_id.is_null() { + return true; + } + + if !matches!( + type_tree.get(&self.reference_type_id), + Some(NodeClass::ReferenceType) + ) { + return false; + } + if self.include_subtypes { + if !type_tree.is_subtype_of(ty, &self.reference_type_id) { + return false; + } + } else { + if ty != &self.reference_type_id { + return false; + } + } + true + } + + /// Return `true` if nodes with the given node class should be returned. + pub fn allows_node_class(&self, node_class: NodeClass) -> bool { + if !self.node_class_mask.is_empty() + && !self + .node_class_mask + .contains(NodeClassMask::from_bits_truncate(node_class as u32)) + { + false + } else { + true + } + } + + /// Return `true` if the given reference should be returned. + pub fn matches_filter(&self, type_tree: &TypeTree, reference: &ReferenceDescription) -> bool { + if reference.node_id.is_null() { + warn!("Skipping reference with null NodeId"); + return false; + } + if matches!(reference.node_class, NodeClass::Unspecified) { + warn!( + "Skipping reference {} with unspecified node class and NodeId", + reference.node_id + ); + return false; + } + // Validate the reference and reference type + if !reference.reference_type_id.is_null() + && !matches!( + type_tree.get(&reference.reference_type_id), + Some(NodeClass::ReferenceType) + ) + { + warn!( + "Skipping reference {} with reference type that does not exist or is not a ReferenceType", + reference.node_id + ); + return false; + } + + if !self.allows_node_class(reference.node_class) { + return false; + } + + // Check the reference type filter. + self.allows_reference_type(&reference.reference_type_id, type_tree) + } + + /// Add a reference, validating that it matches the filters, and returning `Added` if it was added. + /// If the browse node is full, this will return `Full` containing the given reference if + /// `max_references_per_node` would be exceeded. In this case you are responsible for + /// setting a `ContinuationPoint` to ensure all references are included. + /// This will clear any fields not required by ResultMask. + pub fn add( + &mut self, + type_tree: &TypeTree, + mut reference: ReferenceDescription, + ) -> AddReferenceResult { + // First, validate that the reference is valid at all. + if !self.matches_filter(type_tree, &reference) { + return AddReferenceResult::Rejected; + } + + if !self + .result_mask + .contains(BrowseDescriptionResultMask::RESULT_MASK_BROWSE_NAME) + { + reference.browse_name = QualifiedName::null(); + } + + if !self + .result_mask + .contains(BrowseDescriptionResultMask::RESULT_MASK_DISPLAY_NAME) + { + reference.display_name = LocalizedText::null(); + } + + if !self + .result_mask + .contains(BrowseDescriptionResultMask::RESULT_MASK_NODE_CLASS) + { + reference.node_class = NodeClass::Unspecified; + } + + if !self + .result_mask + .contains(BrowseDescriptionResultMask::RESULT_MASK_REFERENCE_TYPE) + { + reference.reference_type_id = NodeId::null(); + } + + if !self + .result_mask + .contains(BrowseDescriptionResultMask::RESULT_MASK_TYPE_DEFINITION) + { + reference.type_definition = ExpandedNodeId::null(); + } + + if self.remaining() > 0 { + self.references.push(reference); + AddReferenceResult::Added + } else { + AddReferenceResult::Full(reference) + } + } + + /// Whether to include subtypes of the `reference_type_id`. + pub fn include_subtypes(&self) -> bool { + self.include_subtypes + } + + /// Node ID to browse. + pub fn node_id(&self) -> &NodeId { + &self.node_id + } + + /// Direction to browse. + pub fn browse_direction(&self) -> BrowseDirection { + self.browse_direction + } + + /// Mask for node classes to return. If this is empty, all node classes should be returned. + pub fn node_class_mask(&self) -> &NodeClassMask { + &self.node_class_mask + } + + /// Mask for attributes to return. + pub fn result_mask(&self) -> BrowseDescriptionResultMask { + self.result_mask + } + + /// Reference type ID of references to return. Subject to `include_subtypes`. + pub fn reference_type_id(&self) -> &NodeId { + &self.reference_type_id + } + + pub(crate) fn into_result( + self, + node_manager_index: usize, + node_manager_count: usize, + session: &mut Session, + ) -> (BrowseResult, usize) { + // There may be a continuation point defined for the current node manager, + // in that case return that. There is also a corner case here where + // remaining == 0 and there is no continuation point. + // In this case we need to pass an empty continuation point + // to the next node manager. + let inner = self + .next_continuation_point + .map(|c| (c, node_manager_index)) + .or_else(|| { + if node_manager_index < node_manager_count - 1 { + Some(( + ContinuationPoint::new(Box::new(EmptyContinuationPoint)), + node_manager_index + 1, + )) + } else { + None + } + }); + + let continuation_point = inner.map(|(p, node_manager_index)| BrowseContinuationPoint { + node_manager_index, + continuation_point: p, + id: random::byte_string(6), + node_id: self.node_id, + browse_direction: self.browse_direction, + reference_type_id: self.reference_type_id, + include_subtypes: self.include_subtypes, + node_class_mask: self.node_class_mask, + result_mask: self.result_mask, + max_references_per_node: self.max_references_per_node, + external_references: self.external_references, + }); + + let mut result = BrowseResult { + status_code: self.status_code, + continuation_point: continuation_point + .as_ref() + .map(|c| c.id.clone()) + .unwrap_or_default(), + references: Some(self.references), + }; + + // If we're out of continuation points, the correct response is to not store it, and + // set the status code to BadNoContinuationPoints. + if let Some(c) = continuation_point { + if session.add_browse_continuation_point(c).is_err() { + result.status_code = StatusCode::BadNoContinuationPoints; + result.continuation_point = ByteString::null(); + } + } + + (result, self.input_index) + } + + /// Returns whether this node is completed in this invocation of the Browse or + /// BrowseNext service. If this returns true, no new nodes should be added. + pub fn is_completed(&self) -> bool { + self.remaining() <= 0 || self.next_continuation_point.is_some() + } + + /// Add an external reference to the result. This will be resolved by + /// calling into a different node manager. + pub fn push_external_reference(&mut self, reference: ExternalReference) { + self.external_references.push(reference); + } + + /// Get an iterator over the external references. + pub fn get_external_refs(&self) -> impl Iterator { + self.external_references + .iter() + .map(|n| &n.target_id.node_id) + } + + /// Return `true` if there are any external references to evaluate. + pub fn any_external_refs(&self) -> bool { + !self.external_references.is_empty() + } + + pub(crate) fn resolve_external_references( + &mut self, + type_tree: &TypeTree, + resolved_nodes: &HashMap<&NodeId, &NodeMetadata>, + ) { + let mut cont_point = ExternalReferencesContPoint { + items: VecDeque::new(), + }; + + let refs = std::mem::take(&mut self.external_references); + for rf in refs { + if let Some(meta) = resolved_nodes.get(&rf.target_id.node_id) { + let rf = rf.into_reference((*meta).clone()); + if !self.matches_filter(type_tree, &rf) { + continue; + } + if self.remaining() > 0 { + self.add_unchecked(rf); + } else { + cont_point.items.push_back(rf); + } + } + } + + if !cont_point.items.is_empty() { + self.set_next_continuation_point(Box::new(cont_point)); + } + } +} + +pub(crate) struct ExternalReferencesContPoint { + pub items: VecDeque, +} + +// The node manager model works somewhat poorly with translate browse paths. +// In theory a node manager should only need to know about references relating to its own nodes, +// but if a browse path crosses a boundary between node managers it isn't obvious +// how to handle that. +// If it becomes necessary there may be ways to handle this, but it may be we just leave it up +// to the user. + +#[derive(Debug, Clone)] +pub(crate) struct BrowsePathResultElement { + pub(crate) node: NodeId, + pub(crate) depth: usize, + pub(crate) unmatched_browse_name: Option, +} + +/// Container for a node being discovered in a browse path operation. +#[derive(Debug, Clone)] +pub struct BrowsePathItem<'a> { + pub(crate) node: NodeId, + input_index: usize, + depth: usize, + node_manager_index: usize, + iteration_number: usize, + path: &'a [RelativePathElement], + results: Vec, + status: StatusCode, + unmatched_browse_name: Option, +} + +impl<'a> BrowsePathItem<'a> { + pub(crate) fn new( + elem: BrowsePathResultElement, + input_index: usize, + root: &BrowsePathItem<'a>, + node_manager_index: usize, + iteration_number: usize, + ) -> Self { + Self { + node: elem.node, + input_index, + depth: elem.depth, + node_manager_index, + path: if elem.depth <= root.path.len() { + &root.path[elem.depth..] + } else { + &[] + }, + results: Vec::new(), + status: StatusCode::Good, + iteration_number, + unmatched_browse_name: elem.unmatched_browse_name, + } + } + + pub(crate) fn new_root(path: &'a BrowsePath, input_index: usize) -> Self { + let mut status = StatusCode::Good; + let elements = path.relative_path.elements.as_ref(); + if elements.is_none() || elements.is_some_and(|e| e.is_empty()) { + status = StatusCode::BadNothingToDo; + } else if elements.unwrap().iter().any(|el| el.target_name.is_null()) { + status = StatusCode::BadBrowseNameInvalid; + } + + Self { + node: path.starting_node.clone(), + input_index, + depth: 0, + node_manager_index: usize::MAX, + path: if let Some(elements) = path.relative_path.elements.as_ref() { + &*elements + } else { + &[] + }, + results: Vec::new(), + status, + iteration_number: 0, + unmatched_browse_name: None, + } + } + + /// Full browse path for this item. + pub fn path(&self) -> &'a [RelativePathElement] { + self.path + } + + /// Root node ID to evaluate the path from. + pub fn node_id(&self) -> &NodeId { + &self.node + } + + /// Add a path result element. + pub fn add_element( + &mut self, + node: NodeId, + relative_depth: usize, + unmatched_browse_name: Option, + ) { + self.results.push(BrowsePathResultElement { + node, + depth: self.depth + relative_depth, + unmatched_browse_name: unmatched_browse_name, + }) + } + + /// Set the status code for this operation. + pub fn set_status(&mut self, status: StatusCode) { + self.status = status; + } + + pub(crate) fn results_mut(&mut self) -> &mut Vec { + &mut self.results + } + + pub(crate) fn input_index(&self) -> usize { + self.input_index + } + + pub(crate) fn node_manager_index(&self) -> usize { + self.node_manager_index + } + + /// Get the current result status code. + pub fn status(&self) -> StatusCode { + self.status + } + + /// Get the current interation number. + pub fn iteration_number(&self) -> usize { + self.iteration_number + } + + /// Get the last unmatched browse name, if present. + pub fn unmatched_browse_name(&self) -> Option<&QualifiedName> { + self.unmatched_browse_name.as_ref() + } + + /// Set the browse name as matched by the node manager given by `node_manager_index`. + pub fn set_browse_name_matched(&mut self, node_manager_index: usize) { + self.unmatched_browse_name = None; + self.node_manager_index = node_manager_index; + } +} + +#[derive(Debug)] +/// Container for a single node in a `RegisterNodes` call. +pub struct RegisterNodeItem { + node_id: NodeId, + registered: bool, +} + +impl RegisterNodeItem { + pub(crate) fn new(node_id: NodeId) -> Self { + Self { + node_id, + registered: false, + } + } + + /// Node ID to register. + pub fn node_id(&self) -> &NodeId { + &self.node_id + } + + /// Set the node registered status. This is returned to the client. + pub fn set_registered(&mut self, registered: bool) { + self.registered = registered; + } + + pub(crate) fn into_result(self) -> Option { + if self.registered { + Some(self.node_id) + } else { + None + } + } +} diff --git a/lib/src/server/server.iml b/lib/src/server/server.iml deleted file mode 100644 index 7fe828a3a..000000000 --- a/lib/src/server/server.iml +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/lib/src/server/server.rs b/lib/src/server/server.rs index 4bb666000..38cddc8f1 100644 --- a/lib/src/server/server.rs +++ b/lib/src/server/server.rs @@ -1,126 +1,99 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Provides the [`Server`] type and functionality related to it. - -use std::{marker::Sync, net::SocketAddr, sync::Arc}; +use std::{ + collections::HashMap, + net::{SocketAddr, ToSocketAddrs}, + sync::{ + atomic::{AtomicU16, AtomicU8}, + Arc, + }, + time::Duration, +}; +use arc_swap::ArcSwap; +use futures::{future::Either, never::Never, stream::FuturesUnordered, FutureExt, StreamExt}; use tokio::{ - self, - net::{TcpListener, TcpStream, ToSocketAddrs}, - sync::oneshot::{self, Sender}, - time::{interval_at, Duration, Instant}, + net::TcpListener, + sync::Notify, + task::{JoinError, JoinHandle}, +}; +use tokio_util::sync::CancellationToken; + +use crate::{ + core::{config::Config, handle::AtomicHandle}, + crypto::CertificateStore, + server::{node_manager::ServerContext, session::controller::SessionController}, + sync::RwLock, + types::{DateTime, LocalizedText, ServerState, UAString}, }; -use crate::core::{config::Config, prelude::*}; -use crate::crypto::*; -use crate::sync::*; -use crate::types::service_types::ServerState as ServerStateType; - -use crate::server::{ - address_space::types::AddressSpace, - comms::tcp_transport::*, - comms::transport::Transport, +use super::{ + authenticator::DefaultAuthenticator, + builder::ServerBuilder, config::ServerConfig, - constants, - diagnostics::ServerDiagnostics, - events::audit::AuditLog, - metrics::ServerMetrics, - session::SessionManager, - state::{OperationalLimits, ServerState}, - util::PollingAction, + discovery::periodic_discovery_server_registration, + info::ServerInfo, + node_manager::{NodeManagers, NodeManagersRef, TypeTree}, + server_handle::ServerHandle, + session::{controller::ControllerCommand, manager::SessionManager}, + subscriptions::SubscriptionCache, + ServerCapabilities, }; -pub type Connections = Vec>>; - -/// A `Server` represents a running instance of an OPC UA server. There can be more than one `Server` -/// running at any given time providing they do not share the same ports. -/// -/// A `Server` is initialised from a [`ServerConfig`]. The `ServerConfig` sets what port the server -/// runs on, the endpoints it supports, the identity tokens it supports, identity tokens and so forth. -/// A single server can offer multiple endpoints with different security policies. A server can -/// also be configured to register itself with a discovery server. -/// -/// Once the `Server` is configured, it is run by calling [`run`] which consumes the `Server`. -/// Alternatively if you have reason to maintain access to the server object, -/// you may call the static function [`run_server`] providing the server wrapped as -/// `Arc>`. -/// -/// The server's [`AddressSpace`] is initialised with the default OPC UA node set, but may also -/// be extended with additional nodes representing folders, variables, methods etc. -/// -/// The server's [`CertificateStore`] manages the server's private key and public certificate. It -/// also manages public certificates of incoming clients and arranges them into trusted and rejected -/// collections. -/// -/// [`run`]: #method.run -/// [`run_server`]: #method.run_server -/// [`ServerConfig`]: ../config/struct.ServerConfig.html -/// [`AddressSpace`]: ../address_space/address_space/struct.AddressSpace.html -/// [`CertificateStore`]: ../../opcua_core/crypto/certificate_store/struct.CertificateStore.html -/// +struct ConnectionInfo { + command_send: tokio::sync::mpsc::Sender, +} + +/// The server struct. This is consumed when run, so you will typically not hold onto this for longer +/// periods of time. pub struct Server { - /// List of pending polling actions to add to the server once run is called - pending_polling_actions: Vec<(u64, Box)>, - /// Certificate store for certs + /// Certificate store certificate_store: Arc>, - /// Server metrics - diagnostics and anything else that someone might be interested in that - /// describes the current state of the server - server_metrics: Arc>, - /// The server state is everything that sessions share that can possibly change. State - /// is initialised from a [`ServerConfig`]. - server_state: Arc>, - /// Address space - address_space: Arc>, - /// List of open connections - connections: Arc>, /// Session manager session_manager: Arc>, -} - -impl From for Server { - fn from(config: ServerConfig) -> Server { - Server::new(config) - } + /// Open connections. + connections: FuturesUnordered>, + /// Map to metadata about each open connection + connection_map: HashMap, + /// Server configuration, fixed after the server is started + config: Arc, + /// Context for use by connections to access general server state. + info: Arc, + /// Subscription cache, global because subscriptions outlive sessions. + subscriptions: Arc, + /// List of node managers + node_managers: NodeManagers, + /// Cancellation token + token: CancellationToken, + /// Notify that is woken up if a new session is added to the session manager. + session_notify: Arc, } impl Server { - /// Creates a new [`Server`] instance, initialising it from a [`ServerConfig`]. - /// - /// [`Server`]: ./struct.Server.html - /// [`ServerConfig`]: ../config/struct.ServerConfig.html - pub fn new(mut config: ServerConfig) -> Server { - if !config.is_valid() { - panic!("Cannot create a server using an invalid configuration."); + pub(crate) fn new_from_builder(builder: ServerBuilder) -> Result<(Self, ServerHandle), String> { + if !builder.config.is_valid() { + return Err("Configuration is invalid".to_owned()); } - // Set from config + let mut config = builder.config; + let application_name = config.application_name.clone(); let application_uri = UAString::from(&config.application_uri); let product_uri = UAString::from(&config.product_uri); - let start_time = DateTime::now(); let servers = vec![config.application_uri.clone()]; - let base_endpoint = format!( + /* let base_endpoint = format!( "opc.tcp://{}:{}", config.tcp_config.host, config.tcp_config.port - ); - let max_subscriptions = config.limits.max_subscriptions; - let max_monitored_items_per_sub = config.limits.max_monitored_items_per_sub; - let max_monitored_item_queue_size = config.limits.max_monitored_item_queue_size; + ); */ - let diagnostics = Arc::new(RwLock::new(ServerDiagnostics::default())); - let min_publishing_interval_ms = config.limits.min_publishing_interval * 1000.0; - let min_sampling_interval_ms = config.limits.min_sampling_interval * 1000.0; + // let diagnostics = Arc::new(RwLock::new(ServerDiagnostics::default())); let send_buffer_size = config.limits.send_buffer_size; let receive_buffer_size = config.limits.receive_buffer_size; - // Security, pki auto create cert let application_description = if config.create_sample_keypair { Some(config.application_description()) } else { None }; + let (mut certificate_store, server_certificate, server_pkey) = CertificateStore::new_with_x509_data( &config.pki_dir, @@ -129,332 +102,299 @@ impl Server { config.private_key_path.as_deref(), application_description, ); + if server_certificate.is_none() || server_pkey.is_none() { - error!("Server is missing its application instance certificate and/or its private key. Encrypted endpoints will not function correctly.") + warn!("Server is missing its application instance certificate and/or its private key. Encrypted endpoints will not function correctly."); } - // Load thumbprints of every user token config.read_x509_thumbprints(); - // Servers may choose to auto trust clients to save some messing around with rejected certs. - // This is strongly not advised in production. if config.certificate_validation.trust_client_certs { info!("Server has chosen to auto trust client certificates. You do not want to do this in production code."); certificate_store.set_trust_unknown_certs(true); } certificate_store.set_check_time(config.certificate_validation.check_time); - let config = Arc::new(RwLock::new(config)); + let config = Arc::new(config); - // Set some values in the address space from the server state - let address_space = Arc::new(RwLock::new(AddressSpace::new())); + let service_level = Arc::new(AtomicU8::new(255)); - let audit_log = Arc::new(RwLock::new(AuditLog::new(address_space.clone()))); + let type_tree = Arc::new(RwLock::new(TypeTree::new())); - let server_state = ServerState { + let info = ServerInfo { + authenticator: builder + .authenticator + .unwrap_or_else(|| Arc::new(DefaultAuthenticator::new(config.user_tokens.clone()))), application_uri, product_uri, application_name: LocalizedText { locale: UAString::null(), text: UAString::from(application_name), }, + start_time: ArcSwap::new(Arc::new(crate::types::DateTime::now())), servers, - base_endpoint, - state: ServerStateType::Shutdown, - start_time, - config, + config: config.clone(), server_certificate, server_pkey, - last_subscription_id: 0, - max_subscriptions, - max_monitored_items_per_sub, - max_monitored_item_queue_size, - min_publishing_interval_ms, - min_sampling_interval_ms, - default_keep_alive_count: constants::DEFAULT_KEEP_ALIVE_COUNT, - max_keep_alive_count: constants::MAX_KEEP_ALIVE_COUNT, - max_lifetime_count: constants::MAX_KEEP_ALIVE_COUNT * 3, - diagnostics, - abort: false, - audit_log, - register_nodes_callback: None, - unregister_nodes_callback: None, - historical_data_provider: None, - historical_event_provider: None, - operational_limits: OperationalLimits::default(), + operational_limits: config.limits.operational.clone(), + state: ArcSwap::new(Arc::new(ServerState::Shutdown)), send_buffer_size, receive_buffer_size, + type_tree: type_tree.clone(), + subscription_id_handle: AtomicHandle::new(1), + monitored_item_id_handle: AtomicHandle::new(1), + secure_channel_id_handle: Arc::new(AtomicHandle::new(1)), + capabilities: ServerCapabilities::default(), + service_level: service_level.clone(), + port: AtomicU16::new(0), }; - let server_state = Arc::new(RwLock::new(server_state)); - { - let mut address_space = trace_write_lock!(address_space); - address_space.set_server_state(server_state.clone()); - } - - // Server metrics - let server_metrics = Arc::new(RwLock::new(ServerMetrics::new())); - - // Cert store let certificate_store = Arc::new(RwLock::new(certificate_store)); - let server = Server { - pending_polling_actions: Vec::new(), - server_state, - server_metrics: server_metrics.clone(), - address_space, - certificate_store, - connections: Arc::new(RwLock::new(Vec::new())), - session_manager: Arc::new(RwLock::new(SessionManager::default())), - }; + let info = Arc::new(info); + let subscriptions = Arc::new(SubscriptionCache::new(config.limits.subscriptions)); - let mut server_metrics = trace_write_lock!(server_metrics); - server_metrics.set_server_info(&server); + let node_managers_ref = NodeManagersRef::new_empty(); + let context = ServerContext { + node_managers: node_managers_ref.clone(), + subscriptions: subscriptions.clone(), + info: info.clone(), + authenticator: info.authenticator.clone(), + type_tree: type_tree.clone(), + }; - server - } + let mut final_node_managers = Vec::new(); + for nm_builder in builder.node_managers { + final_node_managers.push(nm_builder.build(context.clone())); + } - /// Runs the server and blocks until it completes either by aborting or by error. Typically - /// a server should be run on its own thread. - /// - /// Calling this function consumes the server. - pub fn run(self) { - let server = Arc::new(RwLock::new(self)); - Self::run_server(server); + let node_managers = NodeManagers::new(final_node_managers); + node_managers_ref.init_from_node_managers(node_managers.clone()); + + let session_notify = Arc::new(Notify::new()); + let session_manager = Arc::new(RwLock::new(SessionManager::new( + info.clone(), + session_notify.clone(), + ))); + + let handle = ServerHandle::new( + info.clone(), + service_level, + subscriptions.clone(), + node_managers.clone(), + session_manager.clone(), + type_tree.clone(), + builder.token.clone(), + ); + Ok(( + Self { + certificate_store, + session_manager, + connections: FuturesUnordered::new(), + connection_map: HashMap::new(), + subscriptions, + config, + info, + node_managers, + token: builder.token, + session_notify, + }, + handle, + )) } - /// Runs the supplied server and blocks until it completes either by aborting or - /// by error. - pub fn run_server(server: Arc>) { - let single_threaded_executor = { - let server = trace_read_lock!(server); - let server_state = trace_read_lock!(server.server_state); - let config = trace_read_lock!(server_state.config); - config.performance.single_threaded_executor - }; - let server_task = Self::new_server_task(server); - // Launch - let mut builder = if !single_threaded_executor { - tokio::runtime::Builder::new_multi_thread() - } else { - tokio::runtime::Builder::new_current_thread() - }; - let runtime = builder.enable_all().build().unwrap(); - Self::run_server_on_runtime(runtime, server_task, true); + /// Get a reference to the SubscriptionCache containing all subscriptions on the server. + pub fn subscriptions(&self) -> Arc { + self.subscriptions.clone() } - /// Allow the server to be run on a caller supplied runtime. If block is set, the task - /// runs to completion (abort or by error), otherwise, the task is spawned and a join handle is - /// returned by the function. Spawning might be suitable if the runtime is being used for other - /// async tasks. - pub fn run_server_on_runtime( - runtime: tokio::runtime::Runtime, - server_task: F, - block: bool, - ) -> Option::Output>> - where - F: std::future::Future + Send + 'static, - F::Output: Send + 'static, - { - if block { - runtime.block_on(server_task); - info!("Server has finished"); - None - } else { - Some(runtime.spawn(server_task)) - } - } + async fn initialize_node_managers(&self, context: &ServerContext) -> Result<(), String> { + info!("Initializing node managers"); + { + if self.node_managers.is_empty() { + return Err("No node managers defined, server is invalid".to_string()); + } - /// Returns the main server task - the loop that waits for connections and processes them. - pub async fn new_server_task(server: Arc>) { - // Get the address and discovery url - let (sock_addr, discovery_server_url) = { - let server = trace_read_lock!(server); - - // Debug endpoints - server.log_endpoint_info(); - - let sock_addr = server.get_socket_address(); - let server_state = trace_read_lock!(server.server_state); - let config = trace_read_lock!(server_state.config); - - // Discovery url must be present and valid - let discovery_server_url = - if let Some(ref discovery_server_url) = config.discovery_server_url { - if is_valid_opc_ua_url(discovery_server_url) { - Some(discovery_server_url.clone()) - } else { - None - } - } else { - None - }; + let mut type_tree = trace_write_lock!(self.info.type_tree); - (sock_addr, discovery_server_url) - }; - match sock_addr { - None => { - error!("Cannot resolve server address, check configuration of server"); + for mgr in self.node_managers.iter() { + mgr.init(&mut *type_tree, context.clone()).await; } - Some(sock_addr) => Self::server_task(server, sock_addr, discovery_server_url).await, } + Ok(()) } - async fn server_task( - server: Arc>, - sock_addr: A, - discovery_server_url: Option, - ) { - // This is returned as the main server task - info!("Waiting for Connection"); - // Listen for connections (or abort) - let listener = match TcpListener::bind(&sock_addr).await { - Ok(listener) => listener, - Err(err) => { - panic!("Could not bind to socket {:?}", err) + async fn run_discovery_server_registration(info: Arc) -> Never { + let registered_server = info.registered_server(); + let Some(discovery_server_url) = info.config.discovery_server_url.as_ref() else { + loop { + futures::future::pending::<()>().await; } }; + periodic_discovery_server_registration( + discovery_server_url, + registered_server, + info.config.pki_dir.clone(), + Duration::from_secs(5 * 60), + ) + .await + } - let (tx_abort, rx_abort) = oneshot::channel(); + /// Run the server using a given TCP listener. + /// Note that the configured TCP endpoint is still used to create the endpoint + /// descriptions, you must properly set `host` and `port` even when using this. + /// + /// This is useful for testing, as you can bind a `TcpListener` to port `0` auto-assign + /// a port. + pub async fn run_with(mut self, listener: TcpListener) -> Result<(), String> { + let context = ServerContext { + node_managers: self.node_managers.as_weak(), + subscriptions: self.subscriptions.clone(), + info: self.info.clone(), + authenticator: self.info.authenticator.clone(), + type_tree: self.info.type_tree.clone(), + }; - // Put the server into a running state - { - let mut server = trace_write_lock!(server); - // Running - { - let mut server_state = trace_write_lock!(server.server_state); - server_state.start_time = DateTime::now(); - server_state.set_state(ServerStateType::Running); - } + self.initialize_node_managers(&context).await?; - // Start a timer that registers the server with a discovery server - if let Some(ref discovery_server_url) = discovery_server_url { - server.start_discovery_server_registration_timer(discovery_server_url); - } else { - info!("Server has not set a discovery server url, so no registration will happen"); - } + self.info + .set_state(ServerState::Running, &self.subscriptions); + self.info.start_time.store(Arc::new(DateTime::now())); - // Start any pending polling action timers - server.start_pending_polling_actions(); - } + let addr = listener + .local_addr() + .map_err(|e| format!("Failed to bind socket: {e:?}"))?; + info!("Now listening for connections on {addr}"); + + self.info + .port + .store(addr.port(), std::sync::atomic::Ordering::Relaxed); - // Start a server abort task loop - Self::start_abort_poll(server.clone(), tx_abort); - - // This isn't nice syntax, but basically there are two async actions - // going on, one of which has to complete - either the listener breaks out of its - // loop, or the rx_abort receives an abort message. - tokio::select! { - _ = async { - loop { - match listener.accept().await { - Ok((socket, _addr)) => { - // Clear out dead sessions - info!("Handling new connection {:?}", socket); - // Check for abort - let mut server = trace_write_lock!(server); - let is_abort = { - let server_state = trace_read_lock!(server.server_state); - server_state.is_abort() - }; - if is_abort { - info!("Server is aborting so it will not accept new connections"); - break; - } else { - server.handle_connection(socket); - } + self.log_endpoint_info(); + + let mut connection_counter = 0; + + loop { + let conn_fut = if self.connections.is_empty() { + if self.token.is_cancelled() { + break; + } + Either::Left(futures::future::pending::>>()) + } else { + Either::Right(self.connections.next()) + }; + + tokio::select! { + conn_res = conn_fut => { + match conn_res.unwrap() { + Ok(id) => { + info!("Connection {} terminated", id); + self.connection_map.remove(&id); + }, + Err(e) => error!("Connection panic! {e}") + } + } + _ = Self::run_subscription_ticks(self.config.subscription_poll_interval_ms, &context) => {} + _ = Self::run_discovery_server_registration(self.info.clone()) => {} + _ = Self::run_session_expiry(&self.session_manager, &self.session_notify) => {} + rs = listener.accept() => { + match rs { + Ok((socket, addr)) => { + info!("Accept new connection from {addr} ({connection_counter})"); + let conn = SessionController::new( + socket, + self.session_manager.clone(), + self.certificate_store.clone(), + self.info.clone(), + self.node_managers.clone(), + self.subscriptions.clone() + ); + let (send, recv) = tokio::sync::mpsc::channel(5); + let handle = tokio::spawn(conn.run(recv).map(move |_| connection_counter)); + self.connections.push(handle); + self.connection_map.insert(connection_counter, ConnectionInfo { + command_send: send + }); + connection_counter += 1; } Err(e) => { - error!("couldn't accept connection to client: {:?}", e); + error!("Failed to accept client connection: {:?}", e); } } } - // Help the rust type inferencer out - Ok::<_, tokio::io::Error>(()) - } => {} - _ = rx_abort => { - info!("abort received"); + _ = self.token.cancelled() => { + for conn in self.connection_map.values() { + let _ = conn.command_send.send(ControllerCommand::Close).await; + } + } } } - info!("main server task is finished"); - } - /// Returns the current [`ServerState`] for the server. - /// - /// [`ServerState`]: ../state/struct.ServerState.html - pub fn server_state(&self) -> Arc> { - self.server_state.clone() + Ok(()) } - /// Returns the `CertificateStore` for the server. - pub fn certificate_store(&self) -> Arc> { - self.certificate_store.clone() - } + /// Run the server. The provided `token` can be used to stop the server gracefully. + pub async fn run(self) -> Result<(), String> { + let addr = self.get_socket_address(); - /// Returns the [`AddressSpace`] for the server. - /// - /// [`AddressSpace`]: ../address_space/address_space/struct.AddressSpace.html - pub fn address_space(&self) -> Arc> { - self.address_space.clone() - } + let Some(addr) = addr else { + error!("Cannot resolve server address, check server configuration"); + return Err("Cannot resolve server address, check server configuration".to_owned()); + }; - /// Returns the [`Connections`] for the server. - /// - /// [`Connections`]: ./type.Connections.html - pub fn connections(&self) -> Arc> { - self.connections.clone() - } + info!("Try to bind address at {addr}"); + let listener = match TcpListener::bind(&addr).await { + Ok(listener) => listener, + Err(e) => { + error!("Failed to bind socket: {:?}", e); + return Err(format!("Failed to bind socket: {:?}", e)); + } + }; - /// Returns the [`ServerMetrics`] for the server. - /// - /// [`ServerMetrics`]: ../metrics/struct.ServerMetrics.html - pub fn server_metrics(&self) -> Arc> { - self.server_metrics.clone() + self.run_with(listener).await } - /// Returns the `single_threaded_executor` for the server. - pub fn single_threaded_executor(&self) -> bool { - let server_state = trace_read_lock!(self.server_state); - let config = trace_read_lock!(server_state.config); - config.performance.single_threaded_executor - } + async fn run_subscription_ticks(interval: u64, context: &ServerContext) -> Never { + if interval == 0 { + futures::future::pending().await + } else { + let context = context.clone(); + let mut tick = tokio::time::interval(Duration::from_millis(interval)); + tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + loop { + tick.tick().await; - /// Sets a flag telling the running server to abort. The abort will happen asynchronously after - /// all sessions have disconnected. - pub fn abort(&mut self) { - info!("Server has been instructed to abort"); - let mut server_state = trace_write_lock!(self.server_state); - server_state.abort(); + context.subscriptions.periodic_tick(&context).await; + } + } } - /// Strip out dead connections, i.e those which have disconnected. Returns `true` if there are - /// still open connections after this function completes. - fn remove_dead_connections(&self) -> bool { - // Go through all connections, removing those that have terminated - let mut connections = trace_write_lock!(self.connections); - connections.retain(|transport| { - // Try to obtain the lock on the transport and the session and check if session is terminated - // if it is, then we'll use its termination status to sweep it out. - let lock = transport.try_read(); - if let Some(ref transport) = lock { - let session_manager = transport.session_manager(); - let session_manager = trace_read_lock!(session_manager); - !session_manager.sessions_terminated() - } else { - true + async fn run_session_expiry(sessions: &RwLock, notify: &Notify) -> Never { + loop { + let ((expiry, expired), notified) = { + let session_lck = trace_read_lock!(sessions); + // Make sure to create the notified future while we still hold the lock. + (session_lck.check_session_expiry(), notify.notified()) + }; + if !expired.is_empty() { + let mut session_lck = trace_write_lock!(sessions); + for id in expired { + session_lck.expire_session(&id); + } } - }); - !connections.is_empty() + tokio::select! { + _ = tokio::time::sleep_until(expiry.into()) => {} + _ = notified => {} + } + } } /// Log information about the endpoints on this server fn log_endpoint_info(&self) { - let server_state = trace_read_lock!(self.server_state); - let config = trace_read_lock!(server_state.config); - info!("OPC UA Server: {}", server_state.application_name); - info!("Base url: {}", server_state.base_endpoint); + info!("OPC UA Server: {}", self.info.application_name); + info!("Base url: {}", self.info.base_endpoint()); info!("Supported endpoints:"); - for (id, endpoint) in &config.endpoints { + for (id, endpoint) in &self.config.endpoints { let users: Vec = endpoint.user_token_ids.iter().cloned().collect(); let users = users.join(", "); info!("Endpoint \"{}\": {}", id, endpoint.path); @@ -466,199 +406,15 @@ impl Server { /// Returns the server socket address. fn get_socket_address(&self) -> Option { - use std::net::ToSocketAddrs; - let server_state = trace_read_lock!(self.server_state); - let config = trace_read_lock!(server_state.config); // Resolve this host / port to an address (or not) - let address = format!("{}:{}", config.tcp_config.host, config.tcp_config.port); + let address = format!( + "{}:{}", + self.config.tcp_config.host, self.config.tcp_config.port + ); if let Ok(mut addrs_iter) = address.to_socket_addrs() { addrs_iter.next() } else { None } } - - /// This timer will poll the server to see if it has aborted. It also cleans up dead connections. - /// If it determines to abort it will signal the tx_abort so that the main listener loop can - /// be broken at its convenience. - fn start_abort_poll(server: Arc>, tx_abort: Sender<()>) { - tokio::spawn(async move { - let mut timer = interval_at(Instant::now(), Duration::from_millis(1000)); - loop { - trace!("abort_poll_task.take_while"); - // Check if there are any open sessions - { - let server = trace_read_lock!(server); - let has_open_connections = server.remove_dead_connections(); - let server_state = trace_read_lock!(server.server_state); - // Predicate breaks on abort & no open connections - if server_state.is_abort() { - if has_open_connections { - warn!("Abort called while there were still open connections"); - } - info!("Server has aborted so, sending a command to break the listen loop"); - tx_abort.send(()).unwrap(); - break; - } - } - timer.tick().await; - } - info!("Abort poll task is finished"); - }); - } - - /// Discovery registration is disabled. - #[cfg(not(feature = "discovery-server-registration"))] - fn start_discovery_server_registration_timer(&self, discovery_server_url: &str) { - info!("Discovery server registration is disabled in code so registration with {} will not happen", discovery_server_url); - } - - /// Discovery registration runs a timer that triggers every 5 minutes and causes the server - /// to register itself with a discovery server. - #[cfg(feature = "discovery-server-registration")] - fn start_discovery_server_registration_timer(&self, discovery_server_url: &str) { - use crate::server::discovery; - - let discovery_server_url = discovery_server_url.to_string(); - info!( - "Server has set a discovery server url {} which will be used to register the server", - discovery_server_url - ); - let server_state = self.server_state.clone(); - - // The registration timer fires on a duration, so make that duration and pretend the - // last time it fired was now - duration, so it should instantly fire when polled next. - let register_duration = Duration::from_secs(5 * 60); - let last_registered = Instant::now() - register_duration; - let last_registered = Arc::new(Mutex::new(last_registered)); - - tokio::spawn(async move { - // Polling happens fairly quickly so task can terminate on server abort, however - // it is looking for the registration duration to have elapsed until it actually does - // anything. - let mut timer = interval_at(Instant::now(), Duration::from_millis(1000)); - loop { - trace!("discovery_server_register.take_while"); - { - let server_state = trace_read_lock!(server_state); - if !server_state.is_running() || server_state.is_abort() { - break; - } - } - - timer.tick().await; - - // Test if registration needs to happen, i.e. if this is first time around, - // or if duration has elapsed since last attempt. - trace!("discovery_server_register.for_each"); - let now = Instant::now(); - let mut last_registered = trace_lock!(last_registered); - if now.duration_since(*last_registered) >= register_duration { - *last_registered = now; - drop(last_registered); - let (is_running, pki_dir, registered_server) = { - let server_state = trace_read_lock!(server_state); - let pki_dir = { - let config = server_state.config.read(); - config.pki_dir.clone() - }; - ( - server_state.is_running(), - pki_dir, - server_state.registered_server(), - ) - }; - if is_running { - discovery::register_with_discovery_server( - &discovery_server_url, - registered_server, - pki_dir, - ) - .await; - } - } - } - info!("Discovery timer task is finished"); - }); - } - - /// Creates a polling action that happens continuously on an interval while the server - /// is running. For example, a server might run a polling action every 100ms to synchronous - /// address space state between variables and their physical backends. - /// - /// The function that is supplied does not take any arguments. It is expected that the - /// implementation will move any variables into the function that are required to perform its - /// action. - pub fn add_polling_action(&mut self, interval_ms: u64, action: F) - where - F: Fn() + Send + Sync + 'static, - { - // If the server is not yet running, the action is queued and is started later - let server_state = trace_read_lock!(self.server_state); - if server_state.is_abort() { - error!("Polling action added when server is aborting"); - // DO NOTHING - } else if !server_state.is_running() { - self.pending_polling_actions - .push((interval_ms, Box::new(action))); - } else { - // Start the action immediately - let _ = PollingAction::spawn(self.server_state.clone(), interval_ms, move || { - // Call the provided closure with the address space - action(); - }); - } - } - - /// Starts any polling actions which were queued ready to start but not yet - fn start_pending_polling_actions(&mut self) { - let server_state = self.server_state.clone(); - self.pending_polling_actions - .drain(..) - .for_each(|(interval_ms, action)| { - debug!( - "Starting a pending polling action at rate of {} ms", - interval_ms - ); - let _ = PollingAction::spawn(server_state.clone(), interval_ms, move || { - // Call the provided action - action(); - }); - }); - } - - /// Create a new transport. - pub fn new_transport(&self) -> TcpTransport { - TcpTransport::new( - self.certificate_store.clone(), - self.server_state.clone(), - self.address_space.clone(), - self.session_manager.clone(), - ) - } - - /// Handles the incoming request - fn handle_connection(&mut self, socket: TcpStream) { - trace!("Connection thread spawning"); - - // Spawn a task for the connection - let connection = Arc::new(RwLock::new(self.new_transport())); - { - let mut connections = trace_write_lock!(self.connections); - connections.push(connection.clone()); - } - - // Looping interval has to cope with whatever sampling rate server needs - let looping_interval_ms = { - let server_state = trace_read_lock!(self.server_state); - // Get the minimum interval in ms - f64::min( - server_state.min_publishing_interval_ms, - server_state.min_sampling_interval_ms, - ) - }; - - // Run adds a session task to the tokio session - TcpTransport::run(connection, socket, looping_interval_ms); - } } diff --git a/lib/src/server/server_handle.rs b/lib/src/server/server_handle.rs new file mode 100644 index 000000000..00cc12412 --- /dev/null +++ b/lib/src/server/server_handle.rs @@ -0,0 +1,109 @@ +use std::sync::{atomic::AtomicU8, Arc}; + +use tokio_util::sync::CancellationToken; + +use crate::{ + sync::RwLock, + types::{AttributeId, DataValue, ServerState, VariableId}, +}; + +use super::{ + info::ServerInfo, + node_manager::{NodeManagers, TypeTree}, + session::manager::SessionManager, + SubscriptionCache, +}; + +/// Reference to a server instance containing tools to modify the server +/// while it is running. +#[derive(Clone)] +pub struct ServerHandle { + info: Arc, + service_level: Arc, + subscriptions: Arc, + node_managers: NodeManagers, + session_manager: Arc>, + type_tree: Arc>, + token: CancellationToken, +} + +impl ServerHandle { + pub(crate) fn new( + info: Arc, + service_level: Arc, + subscriptions: Arc, + node_managers: NodeManagers, + session_manager: Arc>, + type_tree: Arc>, + token: CancellationToken, + ) -> Self { + Self { + info, + service_level, + subscriptions, + node_managers, + session_manager, + type_tree, + token, + } + } + + /// Get a reference to the ServerInfo, containing configuration and other shared server data. + pub fn info(&self) -> &Arc { + &self.info + } + + /// Get a reference to the subscription cache. + pub fn subscriptions(&self) -> &Arc { + &self.subscriptions + } + + /// Set the service level, properly notifying subscribed clients of the change. + pub fn set_service_level(&self, sl: u8) { + self.service_level + .store(sl, std::sync::atomic::Ordering::Relaxed); + self.subscriptions.notify_data_change( + [( + DataValue::new_now(sl), + &VariableId::Server_ServiceLevel.into(), + AttributeId::Value, + )] + .into_iter(), + ); + } + + /// Get a reference to the node managers on the server. + pub fn node_managers(&self) -> &NodeManagers { + &self.node_managers + } + + /// Get a reference to the session manager, containing all currently active sessions. + pub fn session_manager(&self) -> &RwLock { + &self.session_manager + } + + /// Get a reference to the type tree, containing shared information about types in the server. + pub fn type_tree(&self) -> &RwLock { + &self.type_tree + } + + /// Set the server state. Note that this does not do anything beyond just setting + /// the state and notifying clients. + pub fn set_server_state(&self, state: ServerState) { + self.info.set_state(state, &self.subscriptions); + } + + /// Get the cancellation token. + pub fn token(&self) -> &CancellationToken { + &self.token + } + + /// Signal the server to stop. + pub fn cancel(&self) { + self.token.cancel(); + } + + pub fn get_namespace_index(&self, namespace: &str) -> Option { + self.type_tree.read().namespaces().get_index(namespace) + } +} diff --git a/lib/src/server/services/attribute.rs b/lib/src/server/services/attribute.rs deleted file mode 100644 index faa06fa56..000000000 --- a/lib/src/server/services/attribute.rs +++ /dev/null @@ -1,857 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::{result::Result, sync::Arc}; - -use crate::{ - core::supported_message::SupportedMessage, - sync::*, - types::{status_code::StatusCode, *}, -}; - -use crate::server::{ - address_space::{ - node::{HasNodeId, NodeBase, NodeType}, - variable::Variable, - AddressSpace, UserAccessLevel, - }, - services::Service, - session::Session, - state::ServerState, -}; - -enum ReadDetails { - ReadEventDetails(ReadEventDetails), - ReadRawModifiedDetails(ReadRawModifiedDetails), - ReadProcessedDetails(ReadProcessedDetails), - ReadAtTimeDetails(ReadAtTimeDetails), -} - -enum UpdateDetails { - UpdateDataDetails(UpdateDataDetails), - UpdateStructureDataDetails(UpdateStructureDataDetails), - UpdateEventDetails(UpdateEventDetails), - DeleteRawModifiedDetails(DeleteRawModifiedDetails), - DeleteAtTimeDetails(DeleteAtTimeDetails), - DeleteEventDetails(DeleteEventDetails), -} - -/// The attribute service. Allows attributes to be read and written from the address space. -pub(crate) struct AttributeService {} - -impl Service for AttributeService { - fn name(&self) -> String { - String::from("AttributeService") - } -} - -impl AttributeService { - pub fn new() -> AttributeService { - AttributeService {} - } - - /// Used to read historical values or Events of one or more Nodes. For - /// constructed Attribute values whose elements are indexed, such as an array, this Service - /// allows Clients to read the entire set of indexed values as a composite, to read individual - /// elements or to read ranges of elements of the composite. Servers may make historical - /// values available to Clients using this Service, although the historical values themselves - /// are not visible in the AddressSpace. - pub fn read( - &self, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - request: &ReadRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.nodes_to_read) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else if request.max_age < 0f64 { - // Negative values are invalid for max_age - warn!("ReadRequest max age is invalid"); - self.service_fault(&request.request_header, StatusCode::BadMaxAgeInvalid) - } else if request.timestamps_to_return == TimestampsToReturn::Invalid { - warn!("ReadRequest invalid timestamps to return"); - self.service_fault( - &request.request_header, - StatusCode::BadTimestampsToReturnInvalid, - ) - } else { - let server_state = trace_read_lock!(server_state); - let nodes_to_read = request.nodes_to_read.as_ref().unwrap(); - if nodes_to_read.len() <= server_state.operational_limits.max_nodes_per_read { - // Read nodes and their attributes - let session = trace_read_lock!(session); - let address_space = trace_read_lock!(address_space); - let timestamps_to_return = request.timestamps_to_return; - let results = nodes_to_read - .iter() - .map(|node_to_read| { - Self::read_node_value( - &session, - &address_space, - node_to_read, - request.max_age, - timestamps_to_return, - ) - }) - .collect(); - - let diagnostic_infos = None; - let response = ReadResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results: Some(results), - diagnostic_infos, - }; - response.into() - } else { - warn!("ReadRequest too many nodes to read {}", nodes_to_read.len()); - self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) - } - } - } - - /// Used to read historical values - pub fn history_read( - &self, - server_state: Arc>, - _session: Arc>, - address_space: Arc>, - request: &HistoryReadRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.nodes_to_read) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let decoding_options = { - let server_state = trace_read_lock!(server_state); - server_state.decoding_options() - }; - match Self::do_history_read_details( - &decoding_options, - server_state, - address_space, - request, - ) { - Ok(results) => { - let diagnostic_infos = None; - let response = HistoryReadResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results: Some(results), - diagnostic_infos, - }; - response.into() - } - Err(status_code) => self.service_fault(&request.request_header, status_code), - } - } - } - - /// Used to write values to one or more Attributes of one or more Nodes. For - /// constructed Attribute values whose elements are indexed, such as an array, this Service - /// allows Clients to write the entire set of indexed values as a composite, to write individual - /// elements or to write ranges of elements of the composite. - pub fn write( - &self, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - request: &WriteRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.nodes_to_write) { - debug!("Empty list passed to write {:?}", request); - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - // TODO audit - generate AuditWriteUpdateEventType event - let server_state = trace_read_lock!(server_state); - let session = trace_read_lock!(session); - let mut address_space = trace_write_lock!(address_space); - - let nodes_to_write = request.nodes_to_write.as_ref().unwrap(); - if nodes_to_write.len() <= server_state.operational_limits.max_nodes_per_write { - let results = nodes_to_write - .iter() - .map(|node_to_write| { - Self::write_node_value(&session, &mut address_space, node_to_write) - }) - .collect(); - - let diagnostic_infos = None; - WriteResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results: Some(results), - diagnostic_infos, - } - .into() - } else { - warn!( - "WriteRequest too many nodes to write {}", - nodes_to_write.len() - ); - self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) - } - } - } - - /// Used to update or update historical values - pub fn history_update( - &self, - server_state: Arc>, - _session: Arc>, - address_space: Arc>, - request: &HistoryUpdateRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.history_update_details) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - // TODO audit - generate AuditHistoryUpdateEventType event - let decoding_options = { - let server_state = trace_read_lock!(server_state); - server_state.decoding_options() - }; - let history_update_details = request.history_update_details.as_ref().unwrap(); - let results = history_update_details - .iter() - .map(|u| { - // Decode the update/delete action - let (status_code, operation_results) = Self::do_history_update_details( - &decoding_options, - server_state.clone(), - address_space.clone(), - u, - ); - HistoryUpdateResult { - status_code, - operation_results, - diagnostic_infos: None, - } - }) - .collect(); - HistoryUpdateResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results: Some(results), - diagnostic_infos: None, - } - .into() - } - } - - fn node_id_to_action(node_id: &NodeId, actions: &[ObjectId]) -> Result { - let object_id = node_id.as_object_id().map_err(|_| ())?; - actions.iter().find(|v| object_id == **v).copied().ok_or(()) - } - - fn node_id_to_historical_read_action(node_id: &NodeId) -> Result { - Self::node_id_to_action( - node_id, - &[ - ObjectId::ReadEventDetails_Encoding_DefaultBinary, - ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, - ObjectId::ReadProcessedDetails_Encoding_DefaultBinary, - ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary, - ], - ) - } - - fn node_id_to_historical_update_action(node_id: &NodeId) -> Result { - Self::node_id_to_action( - node_id, - &[ - ObjectId::UpdateDataDetails_Encoding_DefaultBinary, - ObjectId::UpdateStructureDataDetails_Encoding_DefaultBinary, - ObjectId::UpdateEventDetails_Encoding_DefaultBinary, - ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, - ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary, - ObjectId::DeleteEventDetails_Encoding_DefaultBinary, - ], - ) - } - - fn decode_history_read_details( - history_read_details: &ExtensionObject, - decoding_options: &DecodingOptions, - ) -> Result { - let action = Self::node_id_to_historical_read_action(&history_read_details.node_id) - .map_err(|_| StatusCode::BadHistoryOperationInvalid)?; - match action { - ObjectId::ReadEventDetails_Encoding_DefaultBinary => Ok(ReadDetails::ReadEventDetails( - history_read_details.decode_inner::(decoding_options)?, - )), - ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary => { - Ok(ReadDetails::ReadRawModifiedDetails( - history_read_details - .decode_inner::(decoding_options)?, - )) - } - ObjectId::ReadProcessedDetails_Encoding_DefaultBinary => { - Ok(ReadDetails::ReadProcessedDetails( - history_read_details.decode_inner::(decoding_options)?, - )) - } - ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary => { - Ok(ReadDetails::ReadAtTimeDetails( - history_read_details.decode_inner::(decoding_options)?, - )) - } - _ => panic!(), - } - } - - fn decode_history_update_details( - history_update_details: &ExtensionObject, - decoding_options: &DecodingOptions, - ) -> Result { - let action = Self::node_id_to_historical_update_action(&history_update_details.node_id) - .map_err(|_| StatusCode::BadHistoryOperationInvalid)?; - match action { - ObjectId::UpdateDataDetails_Encoding_DefaultBinary => { - Ok(UpdateDetails::UpdateDataDetails( - history_update_details.decode_inner::(decoding_options)?, - )) - } - ObjectId::UpdateStructureDataDetails_Encoding_DefaultBinary => { - Ok(UpdateDetails::UpdateStructureDataDetails( - history_update_details - .decode_inner::(decoding_options)?, - )) - } - ObjectId::UpdateEventDetails_Encoding_DefaultBinary => { - Ok(UpdateDetails::UpdateEventDetails( - history_update_details.decode_inner::(decoding_options)?, - )) - } - ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary => { - Ok(UpdateDetails::DeleteRawModifiedDetails( - history_update_details - .decode_inner::(decoding_options)?, - )) - } - ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary => { - Ok(UpdateDetails::DeleteAtTimeDetails( - history_update_details.decode_inner::(decoding_options)?, - )) - } - ObjectId::DeleteEventDetails_Encoding_DefaultBinary => { - Ok(UpdateDetails::DeleteEventDetails( - history_update_details.decode_inner::(decoding_options)?, - )) - } - _ => panic!(), - } - } - - fn do_history_update_details( - decoding_options: &DecodingOptions, - server_state: Arc>, - address_space: Arc>, - u: &ExtensionObject, - ) -> (StatusCode, Option>) { - match Self::decode_history_update_details(u, decoding_options) { - Ok(details) => { - let server_state = trace_read_lock!(server_state); - // Call the provider (data or event) - let result = match details { - UpdateDetails::UpdateDataDetails(details) => { - if let Some(historical_data_provider) = - server_state.historical_data_provider.as_ref() - { - historical_data_provider.update_data_details(address_space, details) - } else { - Err(StatusCode::BadHistoryOperationUnsupported) - } - } - UpdateDetails::UpdateStructureDataDetails(details) => { - if let Some(historical_data_provider) = - server_state.historical_data_provider.as_ref() - { - historical_data_provider - .update_structure_data_details(address_space, details) - } else { - Err(StatusCode::BadHistoryOperationUnsupported) - } - } - UpdateDetails::UpdateEventDetails(details) => { - if let Some(historical_event_provider) = - server_state.historical_event_provider.as_ref() - { - historical_event_provider.update_event_details(address_space, details) - } else { - Err(StatusCode::BadHistoryOperationUnsupported) - } - } - UpdateDetails::DeleteRawModifiedDetails(details) => { - if let Some(historical_data_provider) = - server_state.historical_data_provider.as_ref() - { - historical_data_provider - .delete_raw_modified_details(address_space, details) - } else { - Err(StatusCode::BadHistoryOperationUnsupported) - } - } - UpdateDetails::DeleteAtTimeDetails(details) => { - if let Some(historical_data_provider) = - server_state.historical_data_provider.as_ref() - { - historical_data_provider.delete_at_time_details(address_space, details) - } else { - Err(StatusCode::BadHistoryOperationUnsupported) - } - } - UpdateDetails::DeleteEventDetails(details) => { - if let Some(historical_event_provider) = - server_state.historical_event_provider.as_ref() - { - historical_event_provider.delete_event_details(address_space, details) - } else { - Err(StatusCode::BadHistoryOperationUnsupported) - } - } - }; - match result { - Ok(operation_results) => (StatusCode::Good, Some(operation_results)), - Err(status_code) => (status_code, None), - } - } - Err(status_code) => (status_code, None), - } - } - - fn do_history_read_details( - decoding_options: &DecodingOptions, - server_state: Arc>, - address_space: Arc>, - request: &HistoryReadRequest, - ) -> Result, StatusCode> { - // TODO enforce operation limits - - // Validate the action being performed - let nodes_to_read = &request.nodes_to_read.as_ref().unwrap(); - let timestamps_to_return = request.timestamps_to_return; - let release_continuation_points = request.release_continuation_points; - let read_details = - Self::decode_history_read_details(&request.history_read_details, decoding_options)?; - - let server_state = trace_read_lock!(server_state); - let results = match read_details { - ReadDetails::ReadEventDetails(details) => { - let historical_event_provider = server_state - .historical_event_provider - .as_ref() - .ok_or(StatusCode::BadHistoryOperationUnsupported)?; - historical_event_provider.read_event_details( - address_space, - details, - timestamps_to_return, - release_continuation_points, - nodes_to_read, - )? - } - ReadDetails::ReadRawModifiedDetails(details) => { - let historical_data_provider = server_state - .historical_data_provider - .as_ref() - .ok_or(StatusCode::BadHistoryOperationUnsupported)?; - historical_data_provider.read_raw_modified_details( - address_space, - details, - timestamps_to_return, - release_continuation_points, - nodes_to_read, - )? - } - ReadDetails::ReadProcessedDetails(details) => { - let historical_data_provider = server_state - .historical_data_provider - .as_ref() - .ok_or(StatusCode::BadHistoryOperationUnsupported)?; - historical_data_provider.read_processed_details( - address_space, - details, - timestamps_to_return, - release_continuation_points, - nodes_to_read, - )? - } - ReadDetails::ReadAtTimeDetails(details) => { - let historical_data_provider = server_state - .historical_data_provider - .as_ref() - .ok_or(StatusCode::BadHistoryOperationUnsupported)?; - historical_data_provider.read_at_time_details( - address_space, - details, - timestamps_to_return, - release_continuation_points, - nodes_to_read, - )? - } - }; - Ok(results) - } - - fn is_supported_data_encoding(data_encoding: &QualifiedName) -> bool { - if data_encoding.is_null() { - true - } else { - data_encoding.namespace_index == 0 && data_encoding.name.eq("Default Binary") - } - } - - fn read_node_value( - session: &Session, - address_space: &AddressSpace, - node_to_read: &ReadValueId, - max_age: f64, - timestamps_to_return: TimestampsToReturn, - ) -> DataValue { - // Node node found - // debug!("read_node_value asked to read node id {}, attribute {}", node_to_read.node_id, node_to_read.attribute_id); - let mut result_value = DataValue::null(); - if let Some(node) = address_space.find_node(&node_to_read.node_id) { - if let Ok(attribute_id) = AttributeId::from_u32(node_to_read.attribute_id) { - let index_range = match node_to_read - .index_range - .as_ref() - .parse::() - .map_err(|_| StatusCode::BadIndexRangeInvalid) - { - Ok(index_range) => index_range, - Err(err) => { - return DataValue { - value: None, - status: Some(err), - source_timestamp: None, - source_picoseconds: None, - server_timestamp: None, - server_picoseconds: None, - }; - } - }; - - if !Self::is_readable(session, node, attribute_id) { - // Can't read this node - debug!( - "read_node_value result for read node id {}, attribute {} is unreadable", - node_to_read.node_id, node_to_read.attribute_id - ); - result_value.status = Some(StatusCode::BadNotReadable); - } else if attribute_id != AttributeId::Value && index_range != NumericRange::None { - // Can't supply an index range on a non-Value attribute - debug!( - "read_node_value result for read node id {}, attribute {} is invalid range", - node_to_read.node_id, node_to_read.attribute_id - ); - result_value.status = Some(StatusCode::BadIndexRangeNoData); - } else if !Self::is_supported_data_encoding(&node_to_read.data_encoding) { - // Caller must request binary - debug!("read_node_value result for read node id {}, attribute {} is invalid data encoding", node_to_read.node_id, node_to_read.attribute_id); - result_value.status = Some(StatusCode::BadDataEncodingInvalid); - } else if let Some(attribute) = node.as_node().get_attribute_max_age( - timestamps_to_return, - attribute_id, - index_range, - &node_to_read.data_encoding, - max_age, - ) { - // If caller was reading the user access level, this needs to be modified to - // take account of the effective level based on who is logged in. - let value = if attribute_id == AttributeId::UserAccessLevel { - if let Some(value) = attribute.value { - if let Variant::Byte(value) = value { - // The bits from the node are further modified by the session - let user_access_level = UserAccessLevel::from_bits_truncate(value); - let user_access_level = session.effective_user_access_level( - user_access_level, - &node.node_id(), - attribute_id, - ); - Some(Variant::from(user_access_level.bits())) - } else { - Some(value) - } - } else { - None - } - } else { - attribute.value.clone() - }; - - // Result value is clone from the attribute - result_value.value = value; - result_value.status = attribute.status; - - if let Some(status_code) = attribute.status { - if status_code.is_bad() { - debug!("read_node_value result for read node id {}, attribute {} is bad {}", node_to_read.node_id, node_to_read.attribute_id, status_code); - } - } - - // Timestamps to return only applies to variable value - if let NodeType::Variable(_) = node { - if attribute_id == AttributeId::Value { - match timestamps_to_return { - TimestampsToReturn::Source => { - result_value.source_timestamp = attribute.source_timestamp; - result_value.source_picoseconds = attribute.source_picoseconds; - } - TimestampsToReturn::Server => { - result_value.server_timestamp = attribute.server_timestamp; - result_value.server_picoseconds = attribute.server_picoseconds; - } - TimestampsToReturn::Both => { - result_value.source_timestamp = attribute.source_timestamp; - result_value.source_picoseconds = attribute.source_picoseconds; - result_value.server_timestamp = attribute.server_timestamp; - result_value.server_picoseconds = attribute.server_picoseconds; - } - TimestampsToReturn::Neither | TimestampsToReturn::Invalid => { - // Nothing needs to change - } - } - } - } - } else { - debug!( - "read_node_value result for read node id {}, attribute {} is invalid/1", - node_to_read.node_id, node_to_read.attribute_id - ); - result_value.status = Some(StatusCode::BadAttributeIdInvalid); - } - } else { - debug!( - "read_node_value result for read node id {}, attribute {} is invalid/2", - node_to_read.node_id, node_to_read.attribute_id - ); - result_value.status = Some(StatusCode::BadAttributeIdInvalid); - } - } else { - debug!( - "read_node_value result for read node id {}, attribute {} cannot find node", - node_to_read.node_id, node_to_read.attribute_id - ); - result_value.status = Some(StatusCode::BadNodeIdUnknown); - } - result_value - } - - fn user_access_level( - session: &Session, - node: &NodeType, - attribute_id: AttributeId, - ) -> UserAccessLevel { - let user_access_level = if let NodeType::Variable(ref node) = node { - node.user_access_level() - } else { - UserAccessLevel::CURRENT_READ - }; - session.effective_user_access_level(user_access_level, &node.node_id(), attribute_id) - } - - fn is_readable(session: &Session, node: &NodeType, attribute_id: AttributeId) -> bool { - // TODO session for current user - // Check for access level, user access level - Self::user_access_level(session, node, attribute_id).contains(UserAccessLevel::CURRENT_READ) - } - - fn is_writable(session: &Session, node: &NodeType, attribute_id: AttributeId) -> bool { - // TODO session for current user - // For a variable, the access level controls access to the variable - if let NodeType::Variable(_) = node { - if attribute_id == AttributeId::Value { - return Self::user_access_level(session, node, attribute_id) - .contains(UserAccessLevel::CURRENT_WRITE); - } - } - - if let Some(write_mask) = node.as_node().write_mask() { - match attribute_id { - AttributeId::Value => { - if let NodeType::VariableType(_) = node { - write_mask.contains(WriteMask::VALUE_FOR_VARIABLE_TYPE) - } else { - false - } - } - AttributeId::NodeId => write_mask.contains(WriteMask::NODE_ID), - AttributeId::NodeClass => write_mask.contains(WriteMask::NODE_CLASS), - AttributeId::BrowseName => write_mask.contains(WriteMask::BROWSE_NAME), - AttributeId::DisplayName => write_mask.contains(WriteMask::DISPLAY_NAME), - AttributeId::Description => write_mask.contains(WriteMask::DESCRIPTION), - AttributeId::WriteMask => write_mask.contains(WriteMask::WRITE_MASK), - AttributeId::UserWriteMask => write_mask.contains(WriteMask::USER_WRITE_MASK), - AttributeId::IsAbstract => write_mask.contains(WriteMask::IS_ABSTRACT), - AttributeId::Symmetric => write_mask.contains(WriteMask::SYMMETRIC), - AttributeId::InverseName => write_mask.contains(WriteMask::INVERSE_NAME), - AttributeId::ContainsNoLoops => write_mask.contains(WriteMask::CONTAINS_NO_LOOPS), - AttributeId::EventNotifier => write_mask.contains(WriteMask::EVENT_NOTIFIER), - AttributeId::DataType => write_mask.contains(WriteMask::DATA_TYPE), - AttributeId::ValueRank => write_mask.contains(WriteMask::VALUE_RANK), - AttributeId::ArrayDimensions => write_mask.contains(WriteMask::ARRAY_DIMENSIONS), - AttributeId::AccessLevel => write_mask.contains(WriteMask::ACCESS_LEVEL), - AttributeId::UserAccessLevel => write_mask.contains(WriteMask::USER_ACCESS_LEVEL), - AttributeId::MinimumSamplingInterval => { - write_mask.contains(WriteMask::MINIMUM_SAMPLING_INTERVAL) - } - AttributeId::Historizing => write_mask.contains(WriteMask::HISTORIZING), - AttributeId::Executable => write_mask.contains(WriteMask::EXECUTABLE), - AttributeId::UserExecutable => write_mask.contains(WriteMask::USER_EXECUTABLE), - AttributeId::DataTypeDefinition => { - write_mask.contains(WriteMask::DATA_TYPE_DEFINITION) - } - AttributeId::RolePermissions => write_mask.contains(WriteMask::ROLE_PERMISSIONS), - AttributeId::AccessRestrictions => { - write_mask.contains(WriteMask::ACCESS_RESTRICTIONS) - } - AttributeId::AccessLevelEx => write_mask.contains(WriteMask::ACCESS_LEVEL_EX), - AttributeId::UserRolePermissions => false, // Reserved - } - } else { - false - } - } - - /* - fn is_history_readable(session: &Session, node: &NodeType) -> bool { - Self::user_access_level(session, node, AttributeId::Value).contains(UserAccessLevel::HISTORY_READ) - } - - fn is_history_updateable(session: &Session, node: &NodeType) -> bool { - Self::user_access_level(session, node, AttributeId::Value).contains(UserAccessLevel::HISTORY_WRITE) - } - */ - - /// Determine if the value is writable to a Variable node's data type - fn validate_value_to_write( - address_space: &AddressSpace, - variable: &Variable, - value: &Variant, - ) -> bool { - // Get the value rank and data type of the variable - let value_rank = variable.value_rank(); - let node_data_type = variable.data_type(); - - let valid = if let Variant::Empty = value { - // Assigning an empty value is permissible - true - } else if let Some(value_data_type) = value.scalar_data_type() { - // Value is scalar. Check if the data type matches - let data_type_matches = address_space.is_subtype(&value_data_type, &node_data_type); - if !data_type_matches { - // Check if the value to write is a byte string and the receiving node type a byte array. - // This code is a mess just for some weird edge case in the spec that a write from - // a byte string to a byte array should succeed - match value { - Variant::ByteString(_) => { - if node_data_type == DataTypeId::Byte.into() { - match value_rank { - -2 | -3 | 1 => true, - _ => false, - } - } else { - false - } - } - _ => data_type_matches, - } - } else { - true - } - } else if let Some(value_data_type) = value.array_data_type() { - // TODO check that value is array of same dimensions - address_space.is_subtype(&value_data_type, &node_data_type) - } else { - // Value should have a data type - false - }; - if !valid { - debug!("Variable value validation did not pass, check value {:?} against var {} data type of {}", value, variable.node_id(), node_data_type); - } - valid - } - - fn write_node_value( - session: &Session, - address_space: &mut AddressSpace, - node_to_write: &WriteValue, - ) -> StatusCode { - if let Some(node) = address_space.find_node(&node_to_write.node_id) { - if let Ok(attribute_id) = AttributeId::from_u32(node_to_write.attribute_id) { - let index_range = node_to_write.index_range.as_ref().parse::(); - - if !Self::is_writable(session, node, attribute_id) { - StatusCode::BadNotWritable - } else if attribute_id != AttributeId::Value && !node_to_write.index_range.is_null() - { - // Index ranges are not supported on anything other than a value attribute - error!("Server does not support indexes for attributes other than Value"); - StatusCode::BadWriteNotSupported - // else if node_to_write.value.server_timestamp.is_some() || node_to_write.value.server_picoseconds.is_some() || - // node_to_write.value.source_timestamp.is_some() || node_to_write.value.source_picoseconds.is_some() { - // error!("Server does not support timestamps in write"); - // StatusCode::BadWriteNotSupported - } else if index_range.is_err() { - error!("Index range is invalid"); - StatusCode::BadIndexRangeInvalid - } else if let Some(ref value) = node_to_write.value.value { - let index_range = index_range.unwrap(); - - // This is a band-aid for Variable::Value which should check if the data type - // matches the written value. Note, that ALL attributes should check for subtypes - // but they don't. There should be a general purpose fn attribute_type(attribute_id) helper - // on the node impl that returns a datatype for the attribute regardless of node. - let data_type_valid = if attribute_id == AttributeId::Value { - match node { - NodeType::Variable(ref variable) => { - Self::validate_value_to_write(address_space, variable, value) - } - _ => true, // Other types don't have this attr but they will reject later during set - } - } else { - true - }; - if !data_type_valid { - error!("Data type of value is invalid for writing to attribute"); - StatusCode::BadTypeMismatch - } else { - let node = address_space.find_node_mut(&node_to_write.node_id).unwrap(); - let result = if attribute_id == AttributeId::Value { - match node { - NodeType::Variable(ref mut variable) => variable - .set_value(index_range, value.clone()) - .map_err(|err| { - error!( - "Value could not be set to node {} Value, error = {}", - node_to_write.node_id, err - ); - err - }), - _ => Err(StatusCode::BadAttributeIdInvalid), - } - } else { - let node = node.as_mut_node(); - node.set_attribute(attribute_id, value.clone()) - .map_err(|err| { - error!("Value could not be set to node {} attribute {:?}, error = {}", node_to_write.node_id, attribute_id, err); - err - }) - }; - if let Err(err) = result { - err - } else { - StatusCode::Good - } - } - } else { - error!("Server does not support missing value in write"); - StatusCode::BadTypeMismatch - } - } else { - warn!("Attribute id {} is invalid", node_to_write.attribute_id); - StatusCode::BadAttributeIdInvalid - } - } else { - warn!("Cannot find node id {}", node_to_write.node_id); - StatusCode::BadNodeIdUnknown - } - } -} diff --git a/lib/src/server/services/audit.rs b/lib/src/server/services/audit.rs deleted file mode 100644 index 96d22c827..000000000 --- a/lib/src/server/services/audit.rs +++ /dev/null @@ -1,150 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::sync::Arc; - -use crate::sync::*; -use crate::types::{status_code::StatusCode, *}; - -use crate::server::prelude::SecureChannel; -use crate::server::{ - address_space::address_space::AddressSpace, - events::audit::{certificate_events::*, session_events::*}, - session::Session, - state::ServerState, -}; - -fn next_node_id(address_space: Arc>) -> NodeId { - let audit_namespace = { - let address_space = trace_read_lock!(address_space); - address_space.audit_namespace() - }; - NodeId::next_numeric(audit_namespace) -} - -pub fn log_create_session( - server_state: &ServerState, - secure_channel: &SecureChannel, - session: &Session, - address_space: Arc>, - status: bool, - revised_session_timeout: Duration, - request: &CreateSessionRequest, -) { - let node_id = next_node_id(address_space); - let now = DateTime::now(); - - // Raise an event - let event = AuditCreateSessionEventType::new(node_id, now) - .status(status) - .client_audit_entry_id(request.request_header.audit_entry_id.clone()); - - let event = if status { - let session_id = session.session_id().clone(); - let secure_channel_id = format!("{}", secure_channel.secure_channel_id()); - - let event = event - .session_id(session_id) - .secure_channel_id(secure_channel_id) - .revised_session_timeout(revised_session_timeout); - - // Client certificate info - if let Some(ref client_certificate) = session.client_certificate() { - event.client_certificate(client_certificate) - } else { - event - } - } else { - event - }; - - let _ = server_state.raise_and_log(event); -} - -pub fn log_activate_session( - secure_channel: &SecureChannel, - server_state: &ServerState, - session: &Session, - address_space: Arc>, - status: bool, - request: &ActivateSessionRequest, -) { - let node_id = next_node_id(address_space); - let now = DateTime::now(); - - let session_id = session.session_id().clone(); - let secure_channel_id = format!("{}", secure_channel.secure_channel_id()); - let event = AuditActivateSessionEventType::new(node_id, now) - .status(status) - .session_id(session_id) - .client_audit_entry_id(request.request_header.audit_entry_id.clone()) - .secure_channel_id(secure_channel_id); - - let event = if status { - // Client software certificates - let event = - if let Some(ref client_software_certificates) = request.client_software_certificates { - event.client_software_certificates(client_software_certificates.clone()) - } else { - event - }; - - // TODO user identity token - should we serialize the entire token in an audit log, or just the policy uri? - // from a security perspective, logging credentials is bad. - - event - } else { - event - }; - - let _ = server_state.raise_and_log(event); -} - -pub fn log_close_session( - server_state: &ServerState, - session: &Session, - address_space: Arc>, - status: bool, - request: &CloseSessionRequest, -) { - let node_id = next_node_id(address_space); - let now = DateTime::now(); - - let session_id = session.session_id().clone(); - let event = AuditSessionEventType::new_close_session( - node_id, - now, - AuditCloseSessionReason::CloseSession, - ) - .status(status) - .client_user_id(session.client_user_id()) - .client_audit_entry_id(request.request_header.audit_entry_id.clone()) - .session_id(session_id); - - let _ = server_state.raise_and_log(event); -} - -pub fn log_certificate_error( - server_state: &ServerState, - address_space: Arc>, - status_code: StatusCode, - request_header: &RequestHeader, -) { - let node_id = next_node_id(address_space); - let now = DateTime::now(); - - match status_code.sub_code() { - SubStatusCode::BadCertificateTimeInvalid => { - let event = AuditCertificateExpiredEventType::new(node_id, now) - .client_audit_entry_id(request_header.audit_entry_id.clone()); - let _ = server_state.raise_and_log(event); - } - _ => { - // TODO client_id - let event = AuditCertificateInvalidEventType::new(node_id, now) - .client_audit_entry_id(request_header.audit_entry_id.clone()); - let _ = server_state.raise_and_log(event); - } - }; -} diff --git a/lib/src/server/services/discovery.rs b/lib/src/server/services/discovery.rs deleted file mode 100644 index 0428f2e21..000000000 --- a/lib/src/server/services/discovery.rs +++ /dev/null @@ -1,102 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::sync::Arc; - -use crate::core::{config::Config, supported_message::SupportedMessage}; -use crate::sync::*; -use crate::types::{status_code::StatusCode, *}; - -use crate::server::state::ServerState; - -use super::Service; - -/// The discovery service. Allows a server to return the endpoints that it supports. -pub(crate) struct DiscoveryService; - -impl Service for DiscoveryService { - fn name(&self) -> String { - String::from("DiscoveryService") - } -} - -impl DiscoveryService { - pub fn new() -> DiscoveryService { - DiscoveryService {} - } - - pub fn get_endpoints( - &self, - server_state: Arc>, - request: &GetEndpointsRequest, - ) -> SupportedMessage { - let server_state = trace_read_lock!(server_state); - - // TODO some of the arguments in the request are ignored - // localeIds - list of locales to use for human readable strings (in the endpoint descriptions) - - // TODO audit - generate event for failed service invocation - - let endpoints = server_state.endpoints(&request.endpoint_url, &request.profile_uris); - GetEndpointsResponse { - response_header: ResponseHeader::new_good(&request.request_header), - endpoints, - } - .into() - } - - pub fn register_server( - &self, - _server_state: Arc>, - request: &RegisterServerRequest, - ) -> SupportedMessage { - self.service_fault(&request.request_header, StatusCode::BadNotSupported) - } - - pub fn register_server2( - &self, - _server_state: Arc>, - request: &RegisterServer2Request, - ) -> SupportedMessage { - self.service_fault(&request.request_header, StatusCode::BadNotSupported) - } - - pub fn find_servers( - &self, - server_state: Arc>, - request: &FindServersRequest, - ) -> SupportedMessage { - let server_state = trace_read_lock!(server_state); - - let application_description = { - let config = trace_read_lock!(server_state.config); - config.application_description() - }; - - // Fields within the request - - let mut servers = vec![application_description]; - - // TODO endpoint URL - - // TODO localeids, filter out servers that do not support locale ids - - // Filter servers that do not have a matching application uri - if let Some(ref server_uris) = request.server_uris { - if !server_uris.is_empty() { - // Filter the servers down - servers - .retain(|server| server_uris.iter().any(|uri| *uri == server.application_uri)); - } - } - - let servers = Some(servers); - - FindServersResponse { - response_header: ResponseHeader::new_good(&request.request_header), - servers, - } - .into() - } -} diff --git a/lib/src/server/services/message_handler.rs b/lib/src/server/services/message_handler.rs deleted file mode 100644 index 296c2d73b..000000000 --- a/lib/src/server/services/message_handler.rs +++ /dev/null @@ -1,633 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::sync::Arc; - -use chrono::Utc; - -use crate::core::comms::secure_channel::SecureChannel; -use crate::core::supported_message::SupportedMessage; -use crate::crypto::CertificateStore; -use crate::sync::*; -use crate::types::{status_code::StatusCode, *}; - -use crate::server::{ - address_space::AddressSpace, - comms::tcp_transport::MessageSender, - services::{ - attribute::AttributeService, discovery::DiscoveryService, method::MethodService, - monitored_item::MonitoredItemService, node_management::NodeManagementService, - query::QueryService, session::SessionService, subscription::SubscriptionService, - view::ViewService, - }, - session::{Session, SessionManager}, - session_diagnostics::*, - state::ServerState, -}; - -/// Processes and dispatches messages for handling -pub(crate) struct MessageHandler { - /// Secure channel - secure_channel: Arc>, - /// Certificate store for certs - certificate_store: Arc>, - /// Server state - server_state: Arc>, - /// Address space - address_space: Arc>, - /// Session state - session_manager: Arc>, - /// Attribute service - attribute_service: AttributeService, - /// Discovery service - discovery_service: DiscoveryService, - /// Node Management service - node_management_service: NodeManagementService, - /// Method service - method_service: MethodService, - /// MonitoredItem service - monitored_item_service: MonitoredItemService, - /// Query service - query_service: QueryService, - /// Session service - session_service: SessionService, - /// Subscription service - subscription_service: SubscriptionService, - /// View service - view_service: ViewService, -} - -impl MessageHandler { - pub fn new( - secure_channel: Arc>, - certificate_store: Arc>, - server_state: Arc>, - session_manager: Arc>, - address_space: Arc>, - ) -> MessageHandler { - MessageHandler { - secure_channel, - certificate_store, - server_state, - session_manager, - address_space, - attribute_service: AttributeService::new(), - discovery_service: DiscoveryService::new(), - method_service: MethodService::new(), - monitored_item_service: MonitoredItemService::new(), - node_management_service: NodeManagementService::new(), - query_service: QueryService::new(), - session_service: SessionService::new(), - view_service: ViewService::new(), - subscription_service: SubscriptionService::new(), - } - } - - pub fn handle_message( - &mut self, - request_id: u32, - message: &SupportedMessage, - sender: &MessageSender, - ) -> Result<(), StatusCode> { - // Note the order of arguments for all these services is the order that they must be locked in, - // - // 1. ServerState - // 2. Session - // 3. AddressSpace - - let server_state = self.server_state.clone(); - let address_space = self.address_space.clone(); - - let response = match message { - // Discovery Service Set, OPC UA Part 4, Section 5.4 - SupportedMessage::GetEndpointsRequest(request) => { - Some(self.discovery_service.get_endpoints(server_state, request)) - } - - SupportedMessage::RegisterServerRequest(request) => Some( - self.discovery_service - .register_server(server_state, request), - ), - - SupportedMessage::RegisterServer2Request(request) => Some( - self.discovery_service - .register_server2(server_state, request), - ), - - SupportedMessage::FindServersRequest(request) => { - Some(self.discovery_service.find_servers(server_state, request)) - } - - // Session Service Set, OPC UA Part 4, Section 5.6 - SupportedMessage::CreateSessionRequest(request) => { - let mut session_manager = trace_write_lock!(self.session_manager); - - // TODO this is completely arbitrary - 5 sessions total in a single connection - pub(crate) const MAX_SESSIONS_PER_TRANSPORT: usize = 5; - - let response = if session_manager.len() >= MAX_SESSIONS_PER_TRANSPORT { - ServiceFault::new(&request.request_header, StatusCode::BadTooManySessions) - .into() - } else { - let (session, response) = self.session_service.create_session( - self.secure_channel.clone(), - self.certificate_store.clone(), - server_state, - address_space, - request, - ); - if let Some(session) = session { - session_manager.register_session(Arc::new(RwLock::new(session))); - } - response - }; - Some(response) - } - SupportedMessage::CloseSessionRequest(request) => { - let secure_channel = self.secure_channel.clone(); - Some(self.session_service.close_session( - secure_channel, - self.session_manager.clone(), - server_state, - address_space, - request, - )) - } - - // NOTE - ALL THE REQUESTS BEYOND THIS POINT MUST BE VALIDATED AGAINST THE SESSION - SupportedMessage::ActivateSessionRequest(request) => self - .validate_activate_service_request(message, "", |session| { - let secure_channel = self.secure_channel.clone(); - self.session_service.activate_session( - secure_channel, - server_state, - session, - address_space, - request, - ) - }), - - // NOTE - ALL THE REQUESTS BEYOND THIS POINT MUST BE VALIDATED AGAINST THE SESSION AND - // HAVE AN ACTIVE SESSION - SupportedMessage::CancelRequest(request) => { - self.validate_service_request(message, "", |session, _| { - Some(self.session_service.cancel(server_state, session, request)) - }) - } - - // NodeManagement Service Set, OPC UA Part 4, Section 5.7 - SupportedMessage::AddNodesRequest(request) => { - self.validate_service_request(message, ADD_NODES_COUNT, |session, _| { - Some(self.node_management_service.add_nodes( - server_state, - session, - address_space, - request, - )) - }) - } - - SupportedMessage::AddReferencesRequest(request) => { - self.validate_service_request(message, ADD_REFERENCES_COUNT, |session, _| { - Some(self.node_management_service.add_references( - server_state, - session, - address_space, - request, - )) - }) - } - - SupportedMessage::DeleteNodesRequest(request) => { - self.validate_service_request(message, DELETE_NODES_COUNT, |session, _| { - Some(self.node_management_service.delete_nodes( - server_state, - session, - address_space, - request, - )) - }) - } - - SupportedMessage::DeleteReferencesRequest(request) => { - self.validate_service_request(message, DELETE_REFERENCES_COUNT, |session, _| { - Some(self.node_management_service.delete_references( - server_state, - session, - address_space, - request, - )) - }) - } - - // View Service Set, OPC UA Part 4, Section 5.8 - SupportedMessage::BrowseRequest(request) => { - self.validate_service_request(message, BROWSE_COUNT, |session, _| { - Some( - self.view_service - .browse(server_state, session, address_space, request), - ) - }) - } - SupportedMessage::BrowseNextRequest(request) => { - self.validate_service_request(message, BROWSE_NEXT_COUNT, |session, _| { - Some( - self.view_service - .browse_next(session, address_space, request), - ) - }) - } - SupportedMessage::TranslateBrowsePathsToNodeIdsRequest(request) => self - .validate_service_request( - message, - TRANSLATE_BROWSE_PATHS_TO_NODE_IDS_COUNT, - |_, _| { - Some(self.view_service.translate_browse_paths_to_node_ids( - server_state, - address_space, - request, - )) - }, - ), - SupportedMessage::RegisterNodesRequest(request) => { - self.validate_service_request(message, REGISTER_NODES_COUNT, |session, _| { - Some( - self.view_service - .register_nodes(server_state, session, request), - ) - }) - } - SupportedMessage::UnregisterNodesRequest(request) => { - self.validate_service_request(message, UNREGISTER_NODES_COUNT, |session, _| { - Some( - self.view_service - .unregister_nodes(server_state, session, request), - ) - }) - } - - // Query Service Set, OPC UA Part 4, Section 5.9 - SupportedMessage::QueryFirstRequest(request) => { - self.validate_service_request(message, READ_COUNT, |session, _| { - Some(self.query_service.query_first( - server_state, - session, - address_space, - request, - )) - }) - } - - SupportedMessage::QueryNextRequest(request) => { - self.validate_service_request(message, READ_COUNT, |session, _| { - Some(self.query_service.query_next( - server_state, - session, - address_space, - request, - )) - }) - } - - // Attribute Service Set, OPC UA Part 4, Section 5.10 - SupportedMessage::ReadRequest(request) => { - self.validate_service_request(message, READ_COUNT, |session, _| { - Some( - self.attribute_service - .read(server_state, session, address_space, request), - ) - }) - } - SupportedMessage::HistoryReadRequest(request) => { - self.validate_service_request(message, HISTORY_READ_COUNT, |session, _| { - Some(self.attribute_service.history_read( - server_state, - session, - address_space, - request, - )) - }) - } - SupportedMessage::WriteRequest(request) => { - self.validate_service_request(message, WRITE_COUNT, |session, _| { - Some(self.attribute_service.write( - server_state, - session, - address_space, - request, - )) - }) - } - SupportedMessage::HistoryUpdateRequest(request) => { - self.validate_service_request(message, HISTORY_UPDATE_COUNT, |session, _| { - Some(self.attribute_service.history_update( - server_state, - session, - address_space, - request, - )) - }) - } - - // Method Service Set, OPC UA Part 4, Section 5.11 - SupportedMessage::CallRequest(request) => { - self.validate_service_request(message, CALL_COUNT, |session, session_manager| { - let session_id = { - let session = trace_read_lock!(session); - session.session_id().clone() - }; - Some(self.method_service.call( - server_state, - &session_id, - session_manager, - address_space, - request, - )) - }) - } - - // Monitored Item Service Set, OPC UA Part 4, Section 5.12 - SupportedMessage::CreateMonitoredItemsRequest(request) => self - .validate_service_request(message, CREATE_MONITORED_ITEMS_COUNT, |session, _| { - Some(self.monitored_item_service.create_monitored_items( - server_state, - session, - address_space, - request, - )) - }), - SupportedMessage::ModifyMonitoredItemsRequest(request) => self - .validate_service_request(message, MODIFY_MONITORED_ITEMS_COUNT, |session, _| { - Some(self.monitored_item_service.modify_monitored_items( - server_state, - session, - address_space, - request, - )) - }), - SupportedMessage::SetMonitoringModeRequest(request) => { - self.validate_service_request(message, SET_MONITORING_MODE_COUNT, |session, _| { - Some( - self.monitored_item_service - .set_monitoring_mode(session, request), - ) - }) - } - SupportedMessage::SetTriggeringRequest(request) => { - self.validate_service_request(message, SET_TRIGGERING_COUNT, |session, _| { - Some(self.monitored_item_service.set_triggering(session, request)) - }) - } - SupportedMessage::DeleteMonitoredItemsRequest(request) => self - .validate_service_request(message, DELETE_MONITORED_ITEMS_COUNT, |session, _| { - Some( - self.monitored_item_service - .delete_monitored_items(session, request), - ) - }), - - // Subscription Service Set, OPC UA Part 4, Section 5.13 - SupportedMessage::CreateSubscriptionRequest(request) => { - self.validate_service_request(message, CREATE_SUBSCRIPTION_COUNT, |session, _| { - Some(self.subscription_service.create_subscription( - server_state, - session, - request, - )) - }) - } - SupportedMessage::ModifySubscriptionRequest(request) => { - self.validate_service_request(message, MODIFY_SUBSCRIPTION_COUNT, |session, _| { - Some(self.subscription_service.modify_subscription( - server_state, - session, - request, - )) - }) - } - SupportedMessage::SetPublishingModeRequest(request) => { - self.validate_service_request(message, SET_PUBLISHING_MODE_COUNT, |session, _| { - Some( - self.subscription_service - .set_publishing_mode(session, request), - ) - }) - } - SupportedMessage::DeleteSubscriptionsRequest(request) => { - self.validate_service_request(message, DELETE_SUBSCRIPTIONS_COUNT, |session, _| { - Some( - self.subscription_service - .delete_subscriptions(session, request), - ) - }) - } - SupportedMessage::TransferSubscriptionsRequest(request) => self - .validate_service_request(message, TRANSFER_SUBSCRIPTIONS_COUNT, |session, _| { - Some( - self.subscription_service - .transfer_subscriptions(session, request), - ) - }), - SupportedMessage::PublishRequest(request) => { - self.validate_service_request(message, "", |session, _| { - // TODO publish request diagnostics have to be done asynchronously too - - // Unlike other calls which return immediately, this one is asynchronous - the - // request is queued and the response will come back out of sequence some time in - // the future. - self.subscription_service.async_publish( - &Utc::now(), - session, - address_space, - request_id, - request, - ) - }) - } - SupportedMessage::RepublishRequest(request) => { - self.validate_service_request(message, REPUBLISH_COUNT, |session, _| { - Some(self.subscription_service.republish(session, request)) - }) - } - - // Unhandle messages - message => { - debug!( - "Message handler does not handle this kind of message {:?}", - message - ); - return Err(StatusCode::BadServiceUnsupported); - } - }; - - if let Some(response) = response { - let _ = sender.send_message(request_id, response); - } - - Ok(()) - } - - /// Tests if this request should be rejected because of a session timeout - fn is_session_timed_out( - session: Arc>, - request_header: &RequestHeader, - now: DateTimeUtc, - ) -> Result<(), SupportedMessage> { - let mut session = trace_write_lock!(session); - let last_service_request_timestamp = session.last_service_request_timestamp(); - let elapsed = now - last_service_request_timestamp; - if elapsed.num_milliseconds() as f64 > session.session_timeout() - && session.session_timeout() > 0.0 - { - session.terminate_session(); - error!("Session has timed out because too much time has elapsed between service calls - elapsed time = {}ms", elapsed.num_milliseconds()); - Err(ServiceFault::new(request_header, StatusCode::BadSessionIdInvalid).into()) - } else { - Ok(()) - } - } - - /// Test if the session is activated - fn is_session_activated( - &self, - session: Arc>, - request_header: &RequestHeader, - ) -> Result<(), SupportedMessage> { - let session = trace_read_lock!(session); - if !session.is_activated() { - error!("Session is not activated so request fails"); - Err(ServiceFault::new(request_header, StatusCode::BadSessionNotActivated).into()) - } else { - // Ensure the session's secure channel - let secure_channel_id = { - let secure_channel = trace_read_lock!(self.secure_channel); - secure_channel.secure_channel_id() - }; - if secure_channel_id != session.secure_channel_id() { - error!( - "service call rejected as secure channel id does not match that on the session" - ); - Err(ServiceFault::new(request_header, StatusCode::BadSessionIdInvalid).into()) - } else { - Ok(()) - } - } - } - - /// Validate the security of the call - fn validate_activate_service_request( - &self, - request: &SupportedMessage, - diagnostic_key: &'static str, - action: F, - ) -> Option - where - F: FnOnce(Arc>) -> SupportedMessage, - { - let now = Utc::now(); - let request_header = request.request_header(); - - // Look up the session from a map to see if it exists - let session = { - let session_manager = trace_read_lock!(self.session_manager); - session_manager.find_session_by_token(&request_header.authentication_token) - }; - if let Some(session) = session { - let (response, authorized) = if let Err(response) = - Self::is_session_timed_out(session.clone(), request_header, now) - { - (response, false) - } else { - let response = action(session.clone()); - let mut session = trace_write_lock!(session); - session.set_last_service_request_timestamp(now); - (response, true) - }; - Self::diag_service_response(session, authorized, &response, diagnostic_key); - Some(response) - } else { - warn!( - "validate_activate_service_request, session not found for token {}", - &request_header.authentication_token - ); - Some(ServiceFault::new(request_header, StatusCode::BadSessionIdInvalid).into()) - } - } - - /// Validate the security of the call and also for an active session - fn validate_service_request( - &self, - request: &SupportedMessage, - diagnostic_key: &'static str, - action: F, - ) -> Option - where - F: FnOnce(Arc>, Arc>) -> Option, - { - let now = Utc::now(); - let request_header = request.request_header(); - // Look up the session from a map to see if it exists - let session_manager = self.session_manager.clone(); - let session = { - let session_manager = trace_read_lock!(session_manager); - session_manager.find_session_by_token(&request_header.authentication_token) - }; - if let Some(session) = session { - let (response, authorized) = - if let Err(response) = self.is_session_activated(session.clone(), request_header) { - (Some(response), false) - } else if let Err(response) = - Self::is_session_timed_out(session.clone(), request_header, now) - { - (Some(response), false) - } else { - let response = action(session.clone(), session_manager); - let mut session = trace_write_lock!(session); - session.set_last_service_request_timestamp(now); - (response, true) - }; - // Async calls may not return a response here - response.map(|response| { - Self::diag_service_response(session, authorized, &response, diagnostic_key); - response - }) - } else { - Some(ServiceFault::new(request_header, StatusCode::BadSessionIdInvalid).into()) - } - } - - /// Increment count of request in session diagnostics - fn diag_authorized_request(session_diagnostics: &mut SessionDiagnostics, authorized: bool) { - if authorized { - session_diagnostics.request(); - } else { - session_diagnostics.unauthorized_request(); - } - } - - /// Increment count of service call in session diagnostics - fn diag_service_response( - session: Arc>, - authorized: bool, - response: &SupportedMessage, - diagnostic_key: &'static str, - ) { - let session = trace_read_lock!(session); - let session_diagnostics = session.session_diagnostics(); - let mut session_diagnostics = trace_write_lock!(session_diagnostics); - Self::diag_authorized_request(&mut session_diagnostics, authorized); - if !diagnostic_key.is_empty() { - let service_success = if let SupportedMessage::ServiceFault(_response) = response { - false - } else { - true - }; - if service_success { - session_diagnostics.service_success(diagnostic_key); - } else { - session_diagnostics.service_error(diagnostic_key); - } - } - } -} diff --git a/lib/src/server/services/method.rs b/lib/src/server/services/method.rs deleted file mode 100644 index 806ccbbee..000000000 --- a/lib/src/server/services/method.rs +++ /dev/null @@ -1,95 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::sync::Arc; - -use crate::core::supported_message::SupportedMessage; -use crate::sync::*; -use crate::types::{status_code::StatusCode, *}; - -use crate::server::{ - address_space::AddressSpace, services::Service, session::SessionManager, state::ServerState, -}; - -/// The method service. Allows a client to call a method on the server. -pub(crate) struct MethodService; - -impl Service for MethodService { - fn name(&self) -> String { - String::from("MethodService") - } -} - -impl MethodService { - pub fn new() -> MethodService { - MethodService {} - } - - pub fn call( - &self, - server_state: Arc>, - session_id: &NodeId, - session_manager: Arc>, - address_space: Arc>, - request: &CallRequest, - ) -> SupportedMessage { - if let Some(ref calls) = request.methods_to_call { - let server_state = trace_read_lock!(server_state); - if calls.len() <= server_state.operational_limits.max_nodes_per_method_call { - let mut address_space = trace_write_lock!(address_space); - - let results: Vec = calls - .iter() - .map(|request| { - trace!( - "Calling to {:?} on {:?}", - request.method_id, - request.object_id - ); - - // Note: Method invocations that modify the address space, write a value, or modify the - // state of the system (acknowledge, batch sequencing or other system changes) must - // generate an AuditUpdateMethodEventType or a subtype of it. - - // Call the method via whatever is registered in the address space - match address_space.call_method( - &server_state, - session_id, - session_manager.clone(), - request, - ) { - Ok(response) => response, - Err(status_code) => { - // Call didn't work for some reason - error!( - "Call to {:?} on {:?} failed with status code {}", - request.method_id, request.object_id, status_code - ); - CallMethodResult { - status_code, - input_argument_results: None, - input_argument_diagnostic_infos: None, - output_arguments: None, - } - } - } - }) - .collect(); - // Produce response - let response = CallResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results: Some(results), - diagnostic_infos: None, - }; - response.into() - } else { - error!("Call request, too many calls {}", calls.len()); - self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) - } - } else { - warn!("Call has nothing to do"); - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } - } -} diff --git a/lib/src/server/services/mod.rs b/lib/src/server/services/mod.rs deleted file mode 100644 index d21054c0b..000000000 --- a/lib/src/server/services/mod.rs +++ /dev/null @@ -1,39 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use crate::core::supported_message::SupportedMessage; -use crate::types::{status_code::StatusCode, RequestHeader, ServiceFault}; - -pub mod message_handler; - -/// The implementation of a service, or a set of services will implement this trait -trait Service { - fn name(&self) -> String; - - fn service_fault( - &self, - request_header: &RequestHeader, - service_result: StatusCode, - ) -> SupportedMessage { - warn!( - "Service {}, request handle {} generated a service fault with status code {}", - self.name(), - request_header.request_handle, - service_result - ); - ServiceFault::new(request_header, service_result).into() - } -} - -pub mod attribute; -pub mod discovery; -pub mod method; -pub mod monitored_item; -pub mod node_management; -pub mod query; -pub mod session; -pub mod subscription; -pub mod view; - -mod audit; diff --git a/lib/src/server/services/monitored_item.rs b/lib/src/server/services/monitored_item.rs deleted file mode 100644 index bee6e2a84..000000000 --- a/lib/src/server/services/monitored_item.rs +++ /dev/null @@ -1,234 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::sync::Arc; - -use crate::core::supported_message::SupportedMessage; -use crate::sync::*; -use crate::types::{status_code::StatusCode, *}; - -use crate::server::{ - address_space::AddressSpace, services::Service, session::Session, state::ServerState, -}; - -/// The monitored item service. Allows client to create, modify and delete monitored items on a subscription. -pub(crate) struct MonitoredItemService; - -impl Service for MonitoredItemService { - fn name(&self) -> String { - String::from("MonitoredItemService") - } -} - -impl MonitoredItemService { - pub fn new() -> MonitoredItemService { - MonitoredItemService {} - } - - /// Implementation of CreateMonitoredItems service. See OPC Unified Architecture, Part 4 5.12.2 - pub fn create_monitored_items( - &self, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - request: &CreateMonitoredItemsRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.items_to_create) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let server_state = trace_read_lock!(server_state); - let mut session = trace_write_lock!(session); - let address_space = trace_read_lock!(address_space); - - let items_to_create = request.items_to_create.as_ref().unwrap(); - // Find subscription and add items to it - if let Some(subscription) = session.subscriptions_mut().get_mut(request.subscription_id) - { - let now = chrono::Utc::now(); - let results = Some(subscription.create_monitored_items( - &server_state, - &address_space, - &now, - request.timestamps_to_return, - items_to_create, - )); - let response = CreateMonitoredItemsResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results, - diagnostic_infos: None, - }; - response.into() - } else { - // No matching subscription - self.service_fault( - &request.request_header, - StatusCode::BadSubscriptionIdInvalid, - ) - } - } - } - - /// Implementation of ModifyMonitoredItems service. See OPC Unified Architecture, Part 4 5.12.3 - pub fn modify_monitored_items( - &self, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - request: &ModifyMonitoredItemsRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.items_to_modify) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let server_state = trace_read_lock!(server_state); - let mut session = trace_write_lock!(session); - let address_space = trace_read_lock!(address_space); - let items_to_modify = request.items_to_modify.as_ref().unwrap(); - // Find subscription and modify items in it - let subscription_id = request.subscription_id; - if let Some(subscription) = session.subscriptions_mut().get_mut(subscription_id) { - let results = Some(subscription.modify_monitored_items( - &server_state, - &address_space, - request.timestamps_to_return, - items_to_modify, - )); - ModifyMonitoredItemsResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results, - diagnostic_infos: None, - } - .into() - } else { - // No matching subscription - self.service_fault( - &request.request_header, - StatusCode::BadSubscriptionIdInvalid, - ) - } - } - } - - /// Implementation of SetMonitoringMode service. See OPC Unified Architecture, Part 4 5.12.4 - pub fn set_monitoring_mode( - &self, - session: Arc>, - request: &SetMonitoringModeRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.monitored_item_ids) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let mut session = trace_write_lock!(session); - let monitored_item_ids = request.monitored_item_ids.as_ref().unwrap(); - let subscription_id = request.subscription_id; - if let Some(subscription) = session.subscriptions_mut().get_mut(subscription_id) { - let monitoring_mode = request.monitoring_mode; - let results = monitored_item_ids - .iter() - .map(|i| subscription.set_monitoring_mode(*i, monitoring_mode)) - .collect(); - SetMonitoringModeResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results: Some(results), - diagnostic_infos: None, - } - .into() - } else { - self.service_fault( - &request.request_header, - StatusCode::BadSubscriptionIdInvalid, - ) - } - } - } - - /// Implementation of SetTriggering service. See OPC Unified Architecture, Part 4 5.12.5 - pub fn set_triggering( - &self, - session: Arc>, - request: &SetTriggeringRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.links_to_add) - && is_empty_option_vec!(request.links_to_remove) - { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let mut session = trace_write_lock!(session); - let links_to_add = match request.links_to_add { - Some(ref links_to_add) => &links_to_add[..], - None => &[], - }; - let links_to_remove = match request.links_to_remove { - Some(ref links_to_remove) => &links_to_remove[..], - None => &[], - }; - - // Set the triggering on the subscription. - let subscription_id = request.subscription_id; - if let Some(subscription) = session.subscriptions_mut().get_mut(subscription_id) { - match subscription.set_triggering( - request.triggering_item_id, - links_to_add, - links_to_remove, - ) { - Ok((add_results, remove_results)) => { - let response = SetTriggeringResponse { - response_header: ResponseHeader::new_good(&request.request_header), - add_results: if request.links_to_add.is_some() { - Some(add_results) - } else { - None - }, - add_diagnostic_infos: None, - remove_results: if request.links_to_remove.is_some() { - Some(remove_results) - } else { - None - }, - remove_diagnostic_infos: None, - }; - response.into() - } - Err(err) => self.service_fault(&request.request_header, err), - } - } else { - self.service_fault( - &request.request_header, - StatusCode::BadSubscriptionIdInvalid, - ) - } - } - } - - /// Implementation of DeleteMonitoredItems service. See OPC Unified Architecture, Part 4 5.12.6 - pub fn delete_monitored_items( - &self, - session: Arc>, - request: &DeleteMonitoredItemsRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.monitored_item_ids) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let mut session = trace_write_lock!(session); - let monitored_item_ids = request.monitored_item_ids.as_ref().unwrap(); - // Find subscription and delete items from it - let subscription_id = request.subscription_id; - if let Some(subscription) = session.subscriptions_mut().get_mut(subscription_id) { - let results = Some(subscription.delete_monitored_items(monitored_item_ids)); - let diagnostic_infos = None; - let response = DeleteMonitoredItemsResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results, - diagnostic_infos, - }; - response.into() - } else { - // No matching subscription - self.service_fault( - &request.request_header, - StatusCode::BadSubscriptionIdInvalid, - ) - } - } - } -} diff --git a/lib/src/server/services/node_management.rs b/lib/src/server/services/node_management.rs deleted file mode 100644 index 298dcaa48..000000000 --- a/lib/src/server/services/node_management.rs +++ /dev/null @@ -1,533 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::{result::Result, sync::Arc}; - -use crate::core::supported_message::SupportedMessage; -use crate::sync::*; -use crate::types::{node_ids::ObjectId, status_code::StatusCode, *}; - -use crate::server::{ - address_space::{relative_path, types::*, AddressSpace}, - services::Service, - session::Session, - state::ServerState, -}; - -pub(crate) struct NodeManagementService; - -impl Service for NodeManagementService { - fn name(&self) -> String { - String::from("NodeManagementService") - } -} - -impl NodeManagementService { - pub fn new() -> NodeManagementService { - NodeManagementService {} - } - - /// Implements the AddNodes service - pub fn add_nodes( - &self, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - request: &AddNodesRequest, - ) -> SupportedMessage { - // TODO audit - generate AuditAddNodesEventType event - if let Some(ref nodes_to_add) = request.nodes_to_add { - if !nodes_to_add.is_empty() { - let server_state = trace_read_lock!(server_state); - if nodes_to_add.len() - <= server_state - .operational_limits - .max_nodes_per_node_management - { - let session = trace_read_lock!(session); - let mut address_space = trace_write_lock!(address_space); - - let decoding_options = server_state.decoding_options(); - let results = nodes_to_add - .iter() - .map(|node_to_add| { - let (status_code, added_node_id) = Self::add_node( - &session, - &mut address_space, - node_to_add, - &decoding_options, - ); - AddNodesResult { - status_code, - added_node_id, - } - }) - .collect(); - let response = AddNodesResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results: Some(results), - diagnostic_infos: None, - }; - response.into() - } else { - self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) - } - } else { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } - } else { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } - } - - /// Implements the AddReferences service - pub fn add_references( - &self, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - request: &AddReferencesRequest, - ) -> SupportedMessage { - // TODO audit - generate AuditAddReferencesEventType event - if let Some(ref references_to_add) = request.references_to_add { - if !references_to_add.is_empty() { - let server_state = trace_read_lock!(server_state); - if references_to_add.len() - <= server_state - .operational_limits - .max_nodes_per_node_management - { - let session = trace_read_lock!(session); - let mut address_space = trace_write_lock!(address_space); - let results = references_to_add - .iter() - .map(|r| Self::add_reference(&session, &mut address_space, r)) - .collect(); - AddReferencesResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results: Some(results), - diagnostic_infos: None, - } - .into() - } else { - self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) - } - } else { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } - } else { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } - } - - /// Implements the DeleteNodes service - pub fn delete_nodes( - &self, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - request: &DeleteNodesRequest, - ) -> SupportedMessage { - // TODO audit - generate AuditDeleteNodesEventType event - if let Some(ref nodes_to_delete) = request.nodes_to_delete { - if !nodes_to_delete.is_empty() { - let server_state = trace_read_lock!(server_state); - if nodes_to_delete.len() - <= server_state - .operational_limits - .max_nodes_per_node_management - { - let session = trace_read_lock!(session); - let mut address_space = trace_write_lock!(address_space); - let results = nodes_to_delete - .iter() - .map(|node_to_delete| { - Self::delete_node(&session, &mut address_space, node_to_delete) - }) - .collect(); - let response = DeleteNodesResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results: Some(results), - diagnostic_infos: None, - }; - response.into() - } else { - self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) - } - } else { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } - } else { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } - } - - /// Implements the DeleteReferences service - pub fn delete_references( - &self, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - request: &DeleteReferencesRequest, - ) -> SupportedMessage { - // TODO audit - generate AuditDeleteReferencesEventType event - if let Some(ref references_to_delete) = request.references_to_delete { - if !references_to_delete.is_empty() { - let server_state = trace_read_lock!(server_state); - if references_to_delete.len() - <= server_state - .operational_limits - .max_nodes_per_node_management - { - let session = trace_read_lock!(session); - let mut address_space = trace_write_lock!(address_space); - let results = references_to_delete - .iter() - .map(|r| Self::delete_reference(&session, &mut address_space, r)) - .collect(); - DeleteReferencesResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results: Some(results), - diagnostic_infos: None, - } - .into() - } else { - self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) - } - } else { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } - } else { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } - } - - fn create_node( - node_id: &NodeId, - node_class: NodeClass, - browse_name: QualifiedName, - node_attributes: &ExtensionObject, - decoding_options: &DecodingOptions, - ) -> Result { - let object_id = node_attributes - .node_id - .as_object_id() - .map_err(|_| StatusCode::BadNodeAttributesInvalid)?; - // Note we are expecting the node_class and the object id for the attributes to be for the same - // thing. If they are different, it is an error. - - match object_id { - ObjectId::ObjectAttributes_Encoding_DefaultBinary => { - if node_class == NodeClass::Object { - let attributes = node_attributes.decode_inner::(decoding_options)?; - Object::from_attributes(node_id, browse_name, attributes).map(|n| n.into()) - } else { - error!("node class and object node attributes are not compatible"); - Err(()) - } - } - ObjectId::VariableAttributes_Encoding_DefaultBinary => { - if node_class == NodeClass::Variable { - let attributes = node_attributes.decode_inner::(decoding_options)?; - Variable::from_attributes(node_id, browse_name, attributes).map(|n| n.into()) - } else { - error!("node class and variable node attributes are not compatible"); - Err(()) - } - } - ObjectId::MethodAttributes_Encoding_DefaultBinary => { - if node_class == NodeClass::Method { - let attributes = node_attributes.decode_inner::(decoding_options)?; - Method::from_attributes(node_id, browse_name, attributes).map(|n| n.into()) - } else { - error!("node class and method node attributes are not compatible"); - Err(()) - } - } - ObjectId::ObjectTypeAttributes_Encoding_DefaultBinary => { - if node_class == NodeClass::ObjectType { - let attributes = node_attributes.decode_inner::(decoding_options)?; - ObjectType::from_attributes(node_id, browse_name, attributes).map(|n| n.into()) - } else { - error!("node class and object type node attributes are not compatible"); - Err(()) - } - } - ObjectId::VariableTypeAttributes_Encoding_DefaultBinary => { - if node_class == NodeClass::VariableType { - let attributes = node_attributes.decode_inner::(decoding_options)?; - VariableType::from_attributes(node_id, browse_name, attributes).map(|n| n.into()) - } else { - error!("node class and variable type node attributes are not compatible"); - Err(()) - } - } - ObjectId::ReferenceTypeAttributes_Encoding_DefaultBinary => { - if node_class == NodeClass::ReferenceType { - let attributes = node_attributes.decode_inner::(decoding_options)?; - ReferenceType::from_attributes(node_id, browse_name, attributes).map(|n| n.into()) - } else { - error!("node class and reference type node attributes are not compatible"); - Err(()) - } - } - ObjectId::DataTypeAttributes_Encoding_DefaultBinary => { - if node_class == NodeClass::DataType { - let attributes = node_attributes.decode_inner::(decoding_options)?; - DataType::from_attributes(node_id, browse_name, attributes).map(|n| n.into()) - } else { - error!("node class and data type node attributes are not compatible"); - Err(()) - } - } - ObjectId::ViewAttributes_Encoding_DefaultBinary => { - if node_class == NodeClass::View { - let attributes = node_attributes.decode_inner::(decoding_options)?; - View::from_attributes(node_id, browse_name, attributes).map(|n| n.into()) - } else { - error!("node class and view node attributes are not compatible"); - Err(()) - } - } - _ => { - error!("create_node was called with an object id which does not match a supported type"); - Err(()) - } - }.map_err(|_| StatusCode::BadNodeAttributesInvalid) - } - - fn add_node( - session: &Session, - address_space: &mut AddressSpace, - item: &AddNodesItem, - decoding_options: &DecodingOptions, - ) -> (StatusCode, NodeId) { - if !session.can_modify_address_space() { - // No permission to modify address space - return (StatusCode::BadUserAccessDenied, NodeId::null()); - } - - let requested_new_node_id = &item.requested_new_node_id; - if requested_new_node_id.server_index != 0 { - // Server index is supposed to be 0 - error!("node cannot be created because server index is not 0"); - return (StatusCode::BadNodeIdRejected, NodeId::null()); - } - - if item.node_class == NodeClass::Unspecified { - error!("node cannot be created because node class is unspecified"); - return (StatusCode::BadNodeClassInvalid, NodeId::null()); - } - - if !requested_new_node_id.is_null() - && address_space.node_exists(&requested_new_node_id.node_id) - { - // If a node id is supplied, it should not already exist - error!("node cannot be created because node id already exists"); - return (StatusCode::BadNodeIdExists, NodeId::null()); - } - - // Test for invalid browse name - if item.browse_name.is_null() || item.browse_name.name.as_ref().is_empty() { - error!("node cannot be created because the browse name is invalid"); - return (StatusCode::BadBrowseNameInvalid, NodeId::null()); - } - - // Test duplicate browse name to same parent - let browse_name = if item.browse_name.namespace_index != 0 { - format!( - "{}:{}", - item.browse_name.namespace_index, - item.browse_name.name.as_ref() - ) - } else { - format!("/{}", item.browse_name.name.as_ref()) - }; - let relative_path = - RelativePath::from_str(&browse_name, &RelativePathElement::default_node_resolver) - .unwrap(); - if let Ok(nodes) = relative_path::find_nodes_relative_path( - address_space, - &item.parent_node_id.node_id, - &relative_path, - ) { - if !nodes.is_empty() { - error!("node cannot be created because the browse name is a duplicate"); - return (StatusCode::BadBrowseNameDuplicated, NodeId::null()); - } - } - - if let Ok(reference_type_id) = item.reference_type_id.as_reference_type_id() { - // Node Id was either supplied or will be generated - let new_node_id = if requested_new_node_id.is_null() { - NodeId::next_numeric(address_space.internal_namespace()) - } else { - requested_new_node_id.node_id.clone() - }; - - // TODO test data model constraint - // BadReferenceNotAllowed - - // Check the type definition is valid - if !address_space - .is_valid_type_definition(item.node_class, &item.type_definition.node_id) - { - // Type definition was either invalid or supplied when it should not have been supplied - error!("node cannot be created because type definition is not valid"); - return (StatusCode::BadTypeDefinitionInvalid, NodeId::null()); - } - - // Check that the parent node exists - if !item.parent_node_id.server_index == 0 - || !address_space.node_exists(&item.parent_node_id.node_id) - { - error!( - "node cannot be created because parent node id is invalid or does not exist" - ); - return (StatusCode::BadParentNodeIdInvalid, NodeId::null()); - } - - // Create a node - if let Ok(node) = Self::create_node( - &new_node_id, - item.node_class, - item.browse_name.clone(), - &item.node_attributes, - decoding_options, - ) { - // Add the node to the address space - address_space.insert( - node, - Some(&[( - &item.parent_node_id.node_id, - &reference_type_id, - ReferenceDirection::Forward, - )]), - ); - // Object / Variable types must add a reference to the type - if item.node_class == NodeClass::Object || item.node_class == NodeClass::Variable { - address_space.set_node_type(&new_node_id, item.type_definition.node_id.clone()); - } - (StatusCode::Good, new_node_id) - } else { - // Create node failed, so assume a problem with the node attributes - error!("node cannot be created because attributes / not class are not valid"); - (StatusCode::BadNodeAttributesInvalid, NodeId::null()) - } - } else { - error!("node cannot be created because reference type is invalid"); - (StatusCode::BadReferenceTypeIdInvalid, NodeId::null()) - } - } - - fn add_reference( - session: &Session, - address_space: &mut AddressSpace, - item: &AddReferencesItem, - ) -> StatusCode { - if !session.can_modify_address_space() { - // No permission to modify address space - StatusCode::BadUserAccessDenied - } else if !item.target_server_uri.is_null() { - StatusCode::BadServerUriInvalid - } else if item.target_node_id.server_index != 0 { - StatusCode::BadReferenceLocalOnly - } else if !address_space.node_exists(&item.source_node_id) { - StatusCode::BadSourceNodeIdInvalid - } else if !address_space.node_exists(&item.target_node_id.node_id) { - StatusCode::BadTargetNodeIdInvalid - } else if item.target_node_class == NodeClass::Unspecified { - StatusCode::BadNodeClassInvalid - } else { - if let Some(node_type) = address_space.find_node(&item.target_node_id.node_id) { - // If the target node exists the class can be compared to the one supplied - if item.target_node_class != node_type.node_class() { - return StatusCode::BadNodeClassInvalid; - } - } - if let Ok(reference_type_id) = item.reference_type_id.as_reference_type_id() { - if !address_space.has_reference( - &item.source_node_id, - &item.target_node_id.node_id, - reference_type_id, - ) { - // TODO test data model constraint - // BadReferenceNotAllowed - if item.is_forward { - address_space.insert_reference( - &item.source_node_id, - &item.target_node_id.node_id, - reference_type_id, - ); - } else { - address_space.insert_reference( - &item.target_node_id.node_id, - &item.source_node_id, - reference_type_id, - ); - } - StatusCode::Good - } else { - error!("reference cannot be added because reference is a duplicate"); - StatusCode::BadDuplicateReferenceNotAllowed - } - } else { - error!("reference cannot be added because reference type id is invalid"); - StatusCode::BadReferenceTypeIdInvalid - } - } - } - - fn delete_node( - session: &Session, - address_space: &mut AddressSpace, - item: &DeleteNodesItem, - ) -> StatusCode { - if !session.can_modify_address_space() { - // No permission to modify address space - StatusCode::BadUserAccessDenied - } else if address_space.delete(&item.node_id, item.delete_target_references) { - StatusCode::Good - } else { - error!("node cannot be deleted"); - StatusCode::BadNodeIdUnknown - } - } - - fn delete_reference( - session: &Session, - address_space: &mut AddressSpace, - item: &DeleteReferencesItem, - ) -> StatusCode { - let node_id = &item.source_node_id; - let target_node_id = &item.target_node_id.node_id; - - if !session.can_modify_address_space() { - // No permission to modify address space - StatusCode::BadUserAccessDenied - } else if item.target_node_id.server_index != 0 { - error!("reference cannot be added because only local references are supported"); - StatusCode::BadReferenceLocalOnly - } else if node_id.is_null() || !address_space.node_exists(node_id) { - error!("reference cannot be added because source node id is invalid"); - StatusCode::BadSourceNodeIdInvalid - } else if target_node_id.is_null() || !address_space.node_exists(target_node_id) { - error!("reference cannot be added because target node id is invalid"); - StatusCode::BadTargetNodeIdInvalid - } else if let Ok(reference_type_id) = item.reference_type_id.as_reference_type_id() { - if item.delete_bidirectional { - address_space.delete_reference(node_id, target_node_id, reference_type_id); - address_space.delete_reference(target_node_id, node_id, reference_type_id); - } else if item.is_forward { - address_space.delete_reference(node_id, target_node_id, reference_type_id); - } else { - address_space.delete_reference(target_node_id, node_id, reference_type_id); - } - StatusCode::Good - } else { - error!("reference cannot be added because reference type id is invalid"); - StatusCode::BadReferenceTypeIdInvalid - } - } -} diff --git a/lib/src/server/services/query.rs b/lib/src/server/services/query.rs deleted file mode 100644 index 00f07b25b..000000000 --- a/lib/src/server/services/query.rs +++ /dev/null @@ -1,48 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::sync::Arc; - -use crate::core::supported_message::SupportedMessage; -use crate::sync::*; -use crate::types::{status_code::StatusCode, *}; - -use crate::server::{ - address_space::AddressSpace, services::Service, session::Session, state::ServerState, -}; - -/// The view service. Allows the client to browse the address space of the server. -pub(crate) struct QueryService; - -impl Service for QueryService { - fn name(&self) -> String { - String::from("QueryService") - } -} - -impl QueryService { - pub fn new() -> QueryService { - QueryService {} - } - - pub fn query_first( - &self, - _server_state: Arc>, - _session: Arc>, - _address_space: Arc>, - request: &QueryFirstRequest, - ) -> SupportedMessage { - self.service_fault(&request.request_header, StatusCode::BadNotSupported) - } - - pub fn query_next( - &self, - _server_state: Arc>, - _session: Arc>, - _address_space: Arc>, - request: &QueryNextRequest, - ) -> SupportedMessage { - self.service_fault(&request.request_header, StatusCode::BadNotSupported) - } -} diff --git a/lib/src/server/services/session.rs b/lib/src/server/services/session.rs deleted file mode 100644 index b9575f05e..000000000 --- a/lib/src/server/services/session.rs +++ /dev/null @@ -1,433 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::sync::Arc; - -use crate::core::comms::secure_channel::SecureChannel; -use crate::core::supported_message::SupportedMessage; -use crate::crypto::{self as crypto, random, CertificateStore, SecurityPolicy}; -use crate::sync::*; -use crate::types::{status_code::StatusCode, *}; - -use crate::server::{ - address_space::address_space::AddressSpace, - constants, - identity_token::IdentityToken, - services::{audit, Service}, - session::{Session, SessionManager}, - state::ServerState, -}; - -/// The session service. Allows the client to create, activate and close an authenticated session with the server. -pub(crate) struct SessionService; - -impl Service for SessionService { - fn name(&self) -> String { - String::from("SessionService") - } -} - -impl SessionService { - pub fn new() -> SessionService { - SessionService {} - } - - pub fn create_session( - &self, - secure_channel: Arc>, - certificate_store: Arc>, - server_state: Arc>, - address_space: Arc>, - request: &CreateSessionRequest, - ) -> (Option, SupportedMessage) { - let mut session = Session::new(server_state.clone()); - let server_state = trace_write_lock!(server_state); - - debug!("Create session request {:?}", request); - - let endpoints = server_state.new_endpoint_descriptions(request.endpoint_url.as_ref()); - - // Check the args - let service_result = { - // Validate the endpoint url - if request.endpoint_url.is_null() { - error!("Create session was passed an null endpoint url"); - StatusCode::BadTcpEndpointUrlInvalid - } else { - // TODO request.endpoint_url should match hostname of server application certificate - // Find matching end points for this url - if endpoints.is_none() { - error!("Create session cannot find matching endpoints"); - StatusCode::BadTcpEndpointUrlInvalid - } else { - StatusCode::Good - } - } - }; - if service_result.is_bad() { - // Rejected - let mut diagnostics = trace_write_lock!(server_state.diagnostics); - diagnostics.on_rejected_session(); - ( - None, - self.service_fault(&request.request_header, service_result), - ) - } else { - let endpoints = endpoints.unwrap(); - - // Extract the client certificate if one is supplied - let client_certificate = - crypto::X509::from_byte_string(&request.client_certificate).ok(); - - // Check the client's certificate for validity and acceptance - let security_policy = { - let secure_channel = trace_read_lock!(secure_channel); - secure_channel.security_policy() - }; - let service_result = if security_policy != SecurityPolicy::None { - let certificate_store = trace_read_lock!(certificate_store); - let result = if let Some(ref client_certificate) = client_certificate { - certificate_store.validate_or_reject_application_instance_cert( - client_certificate, - security_policy, - None, - None, - ) - } else { - warn!("Certificate supplied by client is invalid"); - StatusCode::BadCertificateInvalid - }; - if result.is_bad() { - // Log an error - audit::log_certificate_error( - &server_state, - address_space.clone(), - result, - &request.request_header, - ); - - // Rejected for security reasons - let mut diagnostics = trace_write_lock!(server_state.diagnostics); - diagnostics.on_rejected_security_session(); - } - result - } else { - StatusCode::Good - }; - - let secure_channel = trace_read_lock!(secure_channel); - if service_result.is_bad() { - audit::log_create_session( - &server_state, - &secure_channel, - &session, - address_space, - false, - 0f64, - request, - ); - ( - None, - self.service_fault(&request.request_header, service_result), - ) - } else { - let session_timeout = - if request.requested_session_timeout > constants::MAX_SESSION_TIMEOUT { - constants::MAX_SESSION_TIMEOUT - } else { - request.requested_session_timeout - }; - - let max_request_message_size = constants::MAX_REQUEST_MESSAGE_SIZE; - - // Calculate a signature (assuming there is a pkey) - let server_signature = if let Some(ref pkey) = server_state.server_pkey { - crypto::create_signature_data(pkey, security_policy, &request.client_certificate, &request.client_nonce) - .unwrap_or_else(|err| { - error!("Cannot create signature data from private key, check log and error {:?}", err); - SignatureData::null() - }) - } else { - SignatureData::null() - }; - - let authentication_token = NodeId::new(0, random::byte_string(32)); - let server_nonce = security_policy.random_nonce(); - let server_certificate = server_state.server_certificate_as_byte_string(); - let server_endpoints = Some(endpoints); - - session.set_authentication_token(authentication_token.clone()); - session.set_secure_channel_id(secure_channel.secure_channel_id()); - session.set_session_timeout(session_timeout); - session.set_max_request_message_size(max_request_message_size); - session.set_max_response_message_size(request.max_response_message_size); - session.set_endpoint_url(request.endpoint_url.clone()); - session.set_security_policy_uri(security_policy.to_uri()); - session.set_user_identity(IdentityToken::None); - session.set_client_certificate(client_certificate); - session.set_session_nonce(server_nonce.clone()); - session.set_session_name(request.session_name.clone()); - - audit::log_create_session( - &server_state, - &secure_channel, - &session, - address_space.clone(), - true, - session_timeout, - request, - ); - - // Create a session id in the address space - session.register_session(address_space); - - let response = CreateSessionResponse { - response_header: ResponseHeader::new_good(&request.request_header), - session_id: session.session_id().clone(), - authentication_token, - revised_session_timeout: session_timeout, - server_nonce, - server_certificate, - server_endpoints, - server_software_certificates: None, - server_signature, - max_request_message_size, - } - .into(); - - (Some(session), response) - } - } - } - - pub fn activate_session( - &self, - secure_channel: Arc>, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - request: &ActivateSessionRequest, - ) -> SupportedMessage { - let server_state = trace_write_lock!(server_state); - let mut session = trace_write_lock!(session); - let endpoint_url = session.endpoint_url().as_ref(); - - let (security_policy, security_mode, secure_channel_id) = { - let secure_channel = trace_read_lock!(secure_channel); - ( - secure_channel.security_policy(), - secure_channel.security_mode(), - secure_channel.secure_channel_id(), - ) - }; - - let server_nonce = security_policy.random_nonce(); - let mut service_result = if !server_state.endpoint_exists( - endpoint_url, - security_policy, - security_mode, - ) { - // Need an endpoint - error!( - "activate_session, Endpoint does not exist for requested url & mode {}, {:?} / {:?}", - endpoint_url, security_policy, security_mode - ); - StatusCode::BadTcpEndpointUrlInvalid - } else if security_policy != SecurityPolicy::None { - // Crypto see 5.6.3.1 verify the caller is the same caller as create_session by validating - // signature supplied by the client during the create. - Self::verify_client_signature( - security_policy, - &server_state, - &session, - &request.client_signature, - ) - } else { - // No cert checks for no security - StatusCode::Good - }; - - if service_result.is_good() { - if let Err(err) = server_state.authenticate_endpoint( - request, - endpoint_url, - security_policy, - security_mode, - &request.user_identity_token, - session.session_nonce(), - ) { - error!("activate_session, invalid endpoint"); - service_result = err; - } - } - - // From spec: When the ActivateSession Service is called for the first time then the Server - // shall reject the request if the SecureChannel is not same as the one associated with the - // CreateSession request. Subsequent calls to ActivateSession may be associated with - // different SecureChannels. If this is the case then the Server shall verify that - // the Certificate the Client used to create the new SecureChannel is the same as the - // Certificate used to create the original SecureChannel. In addition, the Server shall - // verify that the Client supplied a UserIdentityToken that is identical to the token - // currently associated with the Session. Once the Server accepts the new SecureChannel - // it shall reject requests sent via the old SecureChannel. - - if service_result.is_good() { - if !session.is_activated() && session.secure_channel_id() != secure_channel_id { - error!("activate session, rejected secure channel id {} for inactive session does not match one used to create session, {}", secure_channel_id, session.secure_channel_id()); - service_result = StatusCode::BadSecureChannelIdInvalid - } else { - // TODO additional secure channel validation here for client certificate and user identity - // token - } - } - - // Authenticate the user identity token - if service_result.is_good() { - info!( - "activate_session success for session {}", - session.session_id() - ); - - session.set_activated(true); - session.set_secure_channel_id(secure_channel_id); - session.set_session_nonce(server_nonce); - session.set_user_identity(IdentityToken::new( - &request.user_identity_token, - &server_state.decoding_options(), - )); - session.set_locale_ids(request.locale_ids.clone()); - - let diagnostic_infos = None; - - { - let secure_channel = trace_read_lock!(secure_channel); - audit::log_activate_session( - &secure_channel, - &server_state, - &session, - address_space, - true, - request, - ); - } - - ActivateSessionResponse { - response_header: ResponseHeader::new_good(&request.request_header), - server_nonce: session.session_nonce().clone(), - results: None, - diagnostic_infos, - } - .into() - } else { - error!("activate_session error, fault = {:?}", service_result); - session.set_activated(false); - self.service_fault(&request.request_header, service_result) - } - } - - pub fn close_session( - &self, - secure_channel: Arc>, - session_manager: Arc>, - server_state: Arc>, - address_space: Arc>, - request: &CloseSessionRequest, - ) -> SupportedMessage { - info!( - "close_session with authentication token {}", - &request.request_header.authentication_token - ); - - let session = { - let session_manager = trace_read_lock!(session_manager); - session_manager.find_session_by_token(&request.request_header.authentication_token) - }; - - if let Some(session) = session { - let secure_channel_id = { - let secure_channel = trace_read_lock!(secure_channel); - secure_channel.secure_channel_id() - }; - - { - let server_state = trace_write_lock!(server_state); - let mut session = trace_write_lock!(session); - - // From spec: When the CloseSession Service is called before the Session is - // successfully activated, the Server shall reject the request if the - // SecureChannel is not the same as the one associated with the CreateSession request. - - if !session.is_activated() && session.secure_channel_id() != secure_channel_id { - error!("close_session rejected, secure channel id {} for inactive session does not match one used to create session, {}", secure_channel_id, session.secure_channel_id()); - return self.service_fault( - &request.request_header, - StatusCode::BadSecureChannelIdInvalid, - ); - } - - session.set_authentication_token(NodeId::null()); - session.set_user_identity(IdentityToken::None); - session.set_activated(false); - audit::log_close_session(&server_state, &session, address_space, true, request); - } - - { - let mut session_manager = trace_write_lock!(session_manager); - session_manager.deregister_session(session); - } - - CloseSessionResponse { - response_header: ResponseHeader::new_good(&request.request_header), - } - .into() - } else { - warn!( - "Close session invalid session for token {}", - &request.request_header.authentication_token - ); - self.service_fault(&request.request_header, StatusCode::BadSessionIdInvalid) - } - } - - pub fn cancel( - &self, - _server_state: Arc>, - _session: Arc>, - request: &CancelRequest, - ) -> SupportedMessage { - // This service call currently does nothing - CancelResponse { - response_header: ResponseHeader::new_good(&request.request_header), - cancel_count: 0, - } - .into() - } - - /// Verifies that the supplied client signature was produced by the session's client certificate - /// from the server's certificate and nonce. - fn verify_client_signature( - security_policy: SecurityPolicy, - server_state: &ServerState, - session: &Session, - client_signature: &SignatureData, - ) -> StatusCode { - if let Some(ref client_certificate) = session.client_certificate() { - if let Some(ref server_certificate) = server_state.server_certificate { - crypto::verify_signature_data( - client_signature, - security_policy, - client_certificate, - server_certificate, - session.session_nonce().as_ref(), - ) - } else { - error!("Client signature verification failed, server has no server certificate"); - StatusCode::BadUnexpectedError - } - } else { - error!("Client signature verification failed, session has no client certificate"); - StatusCode::BadUnexpectedError - } - } -} diff --git a/lib/src/server/services/subscription.rs b/lib/src/server/services/subscription.rs deleted file mode 100644 index fd63bf4f0..000000000 --- a/lib/src/server/services/subscription.rs +++ /dev/null @@ -1,321 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::sync::Arc; - -use crate::core::supported_message::SupportedMessage; -use crate::sync::*; -use crate::types::{status_code::StatusCode, *}; - -use crate::server::{ - address_space::AddressSpace, services::Service, session::Session, state::ServerState, - subscriptions::subscription::Subscription, -}; - -/// The subscription service. Allows the client to create, modify and delete subscriptions of monitored items -/// on the server and to request publish of notifications. -pub(crate) struct SubscriptionService; - -impl Service for SubscriptionService { - fn name(&self) -> String { - String::from("SubscriptionService") - } -} - -impl SubscriptionService { - pub fn new() -> SubscriptionService { - SubscriptionService {} - } - - /// Handles a CreateSubscriptionRequest - pub fn create_subscription( - &self, - server_state: Arc>, - session: Arc>, - request: &CreateSubscriptionRequest, - ) -> SupportedMessage { - let mut server_state = trace_write_lock!(server_state); - let mut session = trace_write_lock!(session); - - let subscriptions = session.subscriptions_mut(); - - if server_state.max_subscriptions > 0 - && subscriptions.len() >= server_state.max_subscriptions - { - self.service_fault(&request.request_header, StatusCode::BadTooManySubscriptions) - } else { - let subscription_id = server_state.create_subscription_id(); - - // Check the requested publishing interval and keep alive values - let (revised_publishing_interval, revised_max_keep_alive_count, revised_lifetime_count) = - Self::revise_subscription_values( - &server_state, - request.requested_publishing_interval, - request.requested_max_keep_alive_count, - request.requested_lifetime_count, - ); - - // Create a new subscription - let publishing_enabled = request.publishing_enabled; - let subscription = Subscription::new( - server_state.diagnostics.clone(), - subscription_id, - publishing_enabled, - revised_publishing_interval, - revised_lifetime_count, - revised_max_keep_alive_count, - request.priority, - ); - subscriptions.insert(subscription_id, subscription); - - // Create the response - CreateSubscriptionResponse { - response_header: ResponseHeader::new_good(&request.request_header), - subscription_id, - revised_publishing_interval, - revised_lifetime_count, - revised_max_keep_alive_count, - } - .into() - } - } - - /// Handles a ModifySubscriptionRequest - pub fn modify_subscription( - &self, - server_state: Arc>, - session: Arc>, - request: &ModifySubscriptionRequest, - ) -> SupportedMessage { - let server_state = trace_write_lock!(server_state); - let mut session = trace_write_lock!(session); - - let subscriptions = session.subscriptions_mut(); - let subscription_id = request.subscription_id; - - if !subscriptions.contains(subscription_id) { - self.service_fault( - &request.request_header, - StatusCode::BadSubscriptionIdInvalid, - ) - } else { - let subscription = subscriptions.get_mut(subscription_id).unwrap(); - - let (revised_publishing_interval, revised_max_keep_alive_count, revised_lifetime_count) = - SubscriptionService::revise_subscription_values( - &server_state, - request.requested_publishing_interval, - request.requested_max_keep_alive_count, - request.requested_lifetime_count, - ); - - subscription.set_publishing_interval(revised_publishing_interval); - subscription.set_max_keep_alive_count(revised_max_keep_alive_count); - subscription.set_max_lifetime_count(revised_lifetime_count); - subscription.set_priority(request.priority); - subscription.reset_lifetime_counter(); - subscription.reset_keep_alive_counter(); - // ...max_notifications_per_publish?? - - ModifySubscriptionResponse { - response_header: ResponseHeader::new_good(&request.request_header), - revised_publishing_interval, - revised_lifetime_count, - revised_max_keep_alive_count, - } - .into() - } - } - - /// Implementation of SetPublishingModeRequest service. See OPC Unified Architecture, Part 4 5.13.4 - pub fn set_publishing_mode( - &self, - session: Arc>, - request: &SetPublishingModeRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.subscription_ids) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let mut session = trace_write_lock!(session); - let subscription_ids = request.subscription_ids.as_ref().unwrap(); - let results = { - let publishing_enabled = request.publishing_enabled; - let mut results = Vec::with_capacity(subscription_ids.len()); - let subscriptions = session.subscriptions_mut(); - for subscription_id in subscription_ids { - if let Some(subscription) = subscriptions.get_mut(*subscription_id) { - subscription.set_publishing_enabled(publishing_enabled); - subscription.reset_lifetime_counter(); - results.push(StatusCode::Good); - } else { - results.push(StatusCode::BadSubscriptionIdInvalid); - } - } - Some(results) - }; - let diagnostic_infos = None; - SetPublishingModeResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results, - diagnostic_infos, - } - .into() - } - } - - /// Handles a TransferSubscriptionsRequest - pub fn transfer_subscriptions( - &self, - _session: Arc>, - request: &TransferSubscriptionsRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.subscription_ids) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let subscription_ids = request.subscription_ids.as_ref().unwrap(); - let results = { - // TODO this is a stub. The real thing should look up subscriptions belonging to - // other sessions and transfer them across to this one. - let results = subscription_ids - .iter() - .map(|_subscription_id| TransferResult { - status_code: StatusCode::BadSubscriptionIdInvalid, - available_sequence_numbers: None, - }) - .collect::>(); - Some(results) - }; - let diagnostic_infos = None; - TransferSubscriptionsResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results, - diagnostic_infos, - } - .into() - } - } - - /// Handles a DeleteSubscriptionsRequest - pub fn delete_subscriptions( - &self, - session: Arc>, - request: &DeleteSubscriptionsRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.subscription_ids) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let mut session = trace_write_lock!(session); - let subscription_ids = request.subscription_ids.as_ref().unwrap(); - let results = { - let subscriptions = session.subscriptions_mut(); - // Attempt to remove each subscription - let results = subscription_ids - .iter() - .map(|subscription_id| { - let subscription = subscriptions.remove(*subscription_id); - if subscription.is_some() { - StatusCode::Good - } else { - StatusCode::BadSubscriptionIdInvalid - } - }) - .collect::>(); - Some(results) - }; - let diagnostic_infos = None; - DeleteSubscriptionsResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results, - diagnostic_infos, - } - .into() - } - } - - /// Handles a PublishRequest. This is asynchronous, so the response will be sent later on. - pub fn async_publish( - &self, - now: &DateTimeUtc, - session: Arc>, - address_space: Arc>, - request_id: u32, - request: &PublishRequest, - ) -> Option { - trace!("--> Receive a PublishRequest {:?}", request); - let mut session = trace_write_lock!(session); - if session.subscriptions().is_empty() { - Some(self.service_fault(&request.request_header, StatusCode::BadNoSubscription)) - } else { - let address_space = trace_read_lock!(address_space); - let request_header = request.request_header.clone(); - let result = - session.enqueue_publish_request(now, request_id, request.clone(), &address_space); - if let Err(error) = result { - Some(self.service_fault(&request_header, error)) - } else { - None - } - } - } - - /// Handles a RepublishRequest - pub fn republish( - &self, - session: Arc>, - request: &RepublishRequest, - ) -> SupportedMessage { - trace!("Republish {:?}", request); - // Look for a matching notification message - let mut session = trace_write_lock!(session); - let result = session - .subscriptions() - .find_notification_message(request.subscription_id, request.retransmit_sequence_number); - if let Ok(notification_message) = result { - session.reset_subscription_lifetime_counter(request.subscription_id); - let response = RepublishResponse { - response_header: ResponseHeader::new_good(&request.request_header), - notification_message, - }; - response.into() - } else { - self.service_fault(&request.request_header, result.unwrap_err()) - } - } - - /// This function takes the requested values passed in a create / modify and returns revised - /// values that conform to the server's limits. For simplicity the return type is a tuple - fn revise_subscription_values( - server_state: &ServerState, - requested_publishing_interval: Duration, - requested_max_keep_alive_count: u32, - requested_lifetime_count: u32, - ) -> (Duration, u32, u32) { - let revised_publishing_interval = f64::max( - requested_publishing_interval, - server_state.min_publishing_interval_ms, - ); - let revised_max_keep_alive_count = - if requested_max_keep_alive_count > server_state.max_keep_alive_count { - server_state.max_keep_alive_count - } else if requested_max_keep_alive_count == 0 { - server_state.default_keep_alive_count - } else { - requested_max_keep_alive_count - }; - // Lifetime count must exceed keep alive count by at least a multiple of - let min_lifetime_count = revised_max_keep_alive_count * 3; - let revised_lifetime_count = if requested_lifetime_count < min_lifetime_count { - min_lifetime_count - } else if requested_lifetime_count > server_state.max_lifetime_count { - server_state.max_lifetime_count - } else { - requested_lifetime_count - }; - ( - revised_publishing_interval, - revised_max_keep_alive_count, - revised_lifetime_count, - ) - } -} diff --git a/lib/src/server/services/view.rs b/lib/src/server/services/view.rs deleted file mode 100644 index 1d47fd5ed..000000000 --- a/lib/src/server/services/view.rs +++ /dev/null @@ -1,555 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::result::Result; -use std::sync::Arc; - -use crate::core::supported_message::SupportedMessage; -use crate::crypto::random; -use crate::sync::*; -use crate::types::{node_ids::ReferenceTypeId, status_code::StatusCode, *}; - -use crate::server::{ - address_space::{relative_path, AddressSpace}, - continuation_point::BrowseContinuationPoint, - services::Service, - session::Session, - state::ServerState, -}; -/// The view service. Allows the client to browse the address space of the server. -pub(crate) struct ViewService; - -impl Service for ViewService { - fn name(&self) -> String { - String::from("ViewService") - } -} - -impl ViewService { - pub fn new() -> ViewService { - ViewService {} - } - - pub fn browse( - &self, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - request: &BrowseRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.nodes_to_browse) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let server_state = trace_read_lock!(server_state); - let mut session = trace_write_lock!(session); - let address_space = trace_read_lock!(address_space); - - let view = &request.view; - if !view.view_id.is_null() || !view.timestamp.is_null() { - // Views are not supported - info!("Browse request ignored because view was specified (views not supported)"); - self.service_fault(&request.request_header, StatusCode::BadViewIdUnknown) - } else { - // debug!("Browse request = {:#?}", request); - let nodes_to_browse = request.nodes_to_browse.as_ref().unwrap(); - if nodes_to_browse.len() <= server_state.operational_limits.max_nodes_per_browse { - // Max references per node. This should be server configurable but the constant - // is generous. TODO this value needs to adapt for the max message size - const DEFAULT_MAX_REFERENCES_PER_NODE: u32 = 255; - let max_references_per_node = if request.requested_max_references_per_node == 0 - { - // Client imposes no limit - DEFAULT_MAX_REFERENCES_PER_NODE - } else if request.requested_max_references_per_node - > DEFAULT_MAX_REFERENCES_PER_NODE - { - // Client limit exceeds default - DEFAULT_MAX_REFERENCES_PER_NODE - } else { - request.requested_max_references_per_node - }; - // Browse the nodes - let results = Some(Self::browse_nodes( - &mut session, - &address_space, - nodes_to_browse, - max_references_per_node as usize, - )); - let diagnostic_infos = None; - BrowseResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results, - diagnostic_infos, - } - .into() - } else { - error!( - "Browse request too many nodes to browse {}", - nodes_to_browse.len() - ); - self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) - } - } - } - } - - pub fn browse_next( - &self, - session: Arc>, - address_space: Arc>, - request: &BrowseNextRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.continuation_points) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let mut session = trace_write_lock!(session); - let address_space = trace_read_lock!(address_space); - - let continuation_points = request.continuation_points.as_ref().unwrap(); - let results = if request.release_continuation_points { - session.remove_browse_continuation_points(continuation_points); - None - } else { - // Iterate from the continuation point, assuming it is valid - session.remove_expired_browse_continuation_points(&address_space); - let results = continuation_points - .iter() - .map(|continuation_point| { - Self::browse_from_continuation_point( - &mut session, - &address_space, - continuation_point, - ) - }) - .collect(); - Some(results) - }; - - let diagnostic_infos = None; - BrowseNextResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results, - diagnostic_infos, - } - .into() - } - } - - pub fn translate_browse_paths_to_node_ids( - &self, - server_state: Arc>, - address_space: Arc>, - request: &TranslateBrowsePathsToNodeIdsRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.browse_paths) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let server_state = trace_read_lock!(server_state); - let address_space = trace_read_lock!(address_space); - let browse_paths = request.browse_paths.as_ref().unwrap(); - let max_browse_paths_per_translate = server_state - .operational_limits - .max_nodes_per_translate_browse_paths_to_node_ids; - if browse_paths.len() <= max_browse_paths_per_translate { - let results = browse_paths - .iter() - .enumerate() - .map(|(i, browse_path)| { - trace!("Processing browse path {}", i); - let node_id = browse_path.starting_node.clone(); - if browse_path.relative_path.elements.is_none() { - BrowsePathResult { - status_code: StatusCode::BadNothingToDo, - targets: None, - } - } else { - // Starting from the node_id, find paths - match relative_path::find_nodes_relative_path( - &address_space, - &node_id, - &browse_path.relative_path, - ) { - Err(err) => { - trace!( - "Browse path result for find nodes returned in error {}", - err.sub_code().name() - ); - BrowsePathResult { - status_code: err, - targets: None, - } - } - Ok(result) => { - let targets = if !result.is_empty() { - let targets = result - .iter() - .map(|node_id| BrowsePathTarget { - target_id: ExpandedNodeId::new(node_id.clone()), - remaining_path_index: u32::MAX, - }) - .collect(); - Some(targets) - } else { - None - }; - BrowsePathResult { - status_code: StatusCode::Good, - targets, - } - } - } - } - }) - .collect(); - TranslateBrowsePathsToNodeIdsResponse { - response_header: ResponseHeader::new_good(&request.request_header), - results: Some(results), - diagnostic_infos: None, - } - .into() - } else { - error!( - "Browse paths size {} exceeds max nodes {}", - browse_paths.len(), - max_browse_paths_per_translate - ); - self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) - } - } - } - - pub fn register_nodes( - &self, - server_state: Arc>, - session: Arc>, - request: &RegisterNodesRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.nodes_to_register) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let mut server_state = trace_write_lock!(server_state); - let nodes_to_register = request.nodes_to_register.as_ref().unwrap(); - if nodes_to_register.len() - <= server_state.operational_limits.max_nodes_per_register_nodes - { - if let Some(ref mut callback) = server_state.register_nodes_callback { - match callback.register_nodes(session, &nodes_to_register[..]) { - Ok(registered_node_ids) => RegisterNodesResponse { - response_header: ResponseHeader::new_good(&request.request_header), - registered_node_ids: Some(registered_node_ids), - } - .into(), - Err(err) => self.service_fault(&request.request_header, err), - } - } else { - // There is no callback for registering nodes, so just pretend they're registered. - let registered_node_ids = nodes_to_register.to_vec(); - RegisterNodesResponse { - response_header: ResponseHeader::new_good(&request.request_header), - registered_node_ids: Some(registered_node_ids), - } - .into() - } - } else { - error!( - "Register nodes too many operations {}", - nodes_to_register.len() - ); - self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) - } - } - } - - pub fn unregister_nodes( - &self, - server_state: Arc>, - session: Arc>, - request: &UnregisterNodesRequest, - ) -> SupportedMessage { - if is_empty_option_vec!(request.nodes_to_unregister) { - self.service_fault(&request.request_header, StatusCode::BadNothingToDo) - } else { - let mut server_state = trace_write_lock!(server_state); - let nodes_to_unregister = request.nodes_to_unregister.as_ref().unwrap(); - if nodes_to_unregister.len() - <= server_state.operational_limits.max_nodes_per_register_nodes - { - if let Some(ref mut callback) = server_state.unregister_nodes_callback { - match callback.unregister_nodes(session, &nodes_to_unregister[..]) { - Ok(_) => UnregisterNodesResponse { - response_header: ResponseHeader::new_good(&request.request_header), - } - .into(), - Err(err) => self.service_fault(&request.request_header, err), - } - } else { - // There is no callback so just succeed - UnregisterNodesResponse { - response_header: ResponseHeader::new_good(&request.request_header), - } - .into() - } - } else { - error!( - "Unregister nodes too many operations {}", - nodes_to_unregister.len() - ); - self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) - } - } - } - - fn browse_nodes( - session: &mut Session, - address_space: &AddressSpace, - nodes_to_browse: &[BrowseDescription], - max_references_per_node: usize, - ) -> Vec { - nodes_to_browse - .iter() - .map(|node_to_browse| { - match Self::browse_node( - session, - address_space, - 0, - node_to_browse, - max_references_per_node, - ) { - Ok(browse_result) => browse_result, - Err(status_code) => BrowseResult { - status_code, - continuation_point: ByteString::null(), - references: None, - }, - } - }) - .collect() - } - - fn browse_node( - session: &mut Session, - address_space: &AddressSpace, - starting_index: usize, - node_to_browse: &BrowseDescription, - max_references_per_node: usize, - ) -> Result { - // Node must exist or there will be no references - if node_to_browse.node_id.is_null() || !address_space.node_exists(&node_to_browse.node_id) { - return Err(StatusCode::BadNodeIdUnknown); - } - - //debug!("Node to browse = {:?}", node_to_browse); - - // Request may wish to filter by a kind of reference - let reference_type_id = if node_to_browse.reference_type_id.is_null() { - None - } else if let Ok(reference_type_id) = - node_to_browse.reference_type_id.as_reference_type_id() - { - Some((reference_type_id, node_to_browse.include_subtypes)) - } else { - None - }; - - // Fetch the references to / from the given node to browse - - let (references, inverse_ref_idx) = address_space.find_references_by_direction( - &node_to_browse.node_id, - node_to_browse.browse_direction, - reference_type_id, - ); - - let result_mask = - BrowseDescriptionResultMask::from_bits_truncate(node_to_browse.result_mask); - let node_class_mask = NodeClassMask::from_bits_truncate(node_to_browse.node_class_mask); - - // Construct descriptions for each reference - let mut reference_descriptions: Vec = - Vec::with_capacity(max_references_per_node); - for (idx, reference) in references.iter().enumerate() { - if idx < starting_index { - continue; - } - let target_node_id = reference.target_node.clone(); - if target_node_id.is_null() { - continue; - } - let target_node = address_space.find_node(&target_node_id); - if target_node.is_none() { - continue; - } - - let target_node = target_node.unwrap().as_node(); - let target_node_class = target_node.node_class(); - - // Skip target nodes not required by the mask - if target_node_class != NodeClass::Unspecified && !node_class_mask.is_empty() { - let target_node_class = NodeClassMask::from_bits_truncate(target_node_class as u32); - if !node_class_mask.contains(target_node_class) { - continue; - } - } - - // Prepare the values to put into the struct according to the result mask - let reference_type_id = - if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_REFERENCE_TYPE) { - reference.reference_type.clone() - } else { - NodeId::null() - }; - let is_forward = - if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_IS_FORWARD) { - idx < inverse_ref_idx - } else { - true - }; - - let target_node_class = - if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_NODE_CLASS) { - target_node_class - } else { - NodeClass::Unspecified - }; - let browse_name = - if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_BROWSE_NAME) { - target_node.browse_name().clone() - } else { - QualifiedName::null() - }; - let display_name = - if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_DISPLAY_NAME) { - target_node.display_name().clone() - } else { - LocalizedText::null() - }; - let type_definition = - if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_TYPE_DEFINITION) { - // Type definition NodeId of the TargetNode. Type definitions are only available - // for the NodeClasses Object and Variable. For all other NodeClasses a null NodeId - // shall be returned. - match target_node_class { - NodeClass::Object | NodeClass::Variable => { - let type_defs = address_space.find_references( - &target_node.node_id(), - Some((ReferenceTypeId::HasTypeDefinition, false)), - ); - if let Some(type_defs) = type_defs { - ExpandedNodeId::new(type_defs[0].target_node.clone()) - } else { - ExpandedNodeId::null() - } - } - _ => ExpandedNodeId::null(), - } - } else { - ExpandedNodeId::null() - }; - - let reference_description = ReferenceDescription { - node_id: ExpandedNodeId::new(target_node_id), - reference_type_id, - is_forward, - node_class: target_node_class, - browse_name, - display_name, - type_definition, - }; - reference_descriptions.push(reference_description); - } - - Ok(Self::reference_description_to_browse_result( - session, - address_space, - &reference_descriptions, - 0, - max_references_per_node, - )) - } - - fn browse_from_continuation_point( - session: &mut Session, - address_space: &AddressSpace, - continuation_point: &ByteString, - ) -> BrowseResult { - // Find the continuation point in the session - if let Some(continuation_point) = session.find_browse_continuation_point(continuation_point) - { - debug!( - "Browsing from continuation point {}", - continuation_point.id.as_base64() - ); - let reference_descriptions = continuation_point.reference_descriptions.lock(); - // Use the existing result. This may result in another continuation point being created - Self::reference_description_to_browse_result( - session, - address_space, - &reference_descriptions, - continuation_point.starting_index, - continuation_point.max_references_per_node, - ) - } else { - // Not valid or missing - error!( - "Continuation point {} was invalid", - continuation_point.as_base64() - ); - BrowseResult { - status_code: StatusCode::BadContinuationPointInvalid, - continuation_point: ByteString::null(), - references: None, - } - } - } - - fn reference_description_to_browse_result( - session: &mut Session, - address_space: &AddressSpace, - reference_descriptions: &[ReferenceDescription], - starting_index: usize, - max_references_per_node: usize, - ) -> BrowseResult { - let references_remaining = reference_descriptions.len() - starting_index; - let (reference_descriptions, continuation_point) = if max_references_per_node > 0 - && references_remaining > max_references_per_node - { - // There is too many results for a single browse result, so only a result will be used - let next_starting_index = starting_index + max_references_per_node; - let reference_descriptions_slice = - reference_descriptions[starting_index..next_starting_index].to_vec(); - - // TODO it is wasteful to create a new reference_descriptions vec if the caller to this fn - // already has a ref counted reference_descriptions. We could clone the Arc if the fn could - // be factored to allow for that - - // Create a continuation point for the remainder of the result. The point will hold the entire result - let continuation_point = random::byte_string(6); - - debug!("References remaining {} exceeds max references {}, returning range {}..{} and creating new continuation point {}", references_remaining, max_references_per_node, starting_index, next_starting_index, continuation_point.as_base64()); - - session.add_browse_continuation_point(BrowseContinuationPoint { - id: continuation_point.clone(), - address_space_last_modified: address_space.last_modified(), - max_references_per_node, - starting_index: next_starting_index, - reference_descriptions: Arc::new(Mutex::new(reference_descriptions.to_vec())), - }); - - (reference_descriptions_slice, continuation_point) - } else { - // Returns the remainder of the results - let reference_descriptions_slice = reference_descriptions[starting_index..].to_vec(); - debug!( - "Returning references {}..{}, with no further continuation point", - starting_index, - reference_descriptions.len() - ); - (reference_descriptions_slice, ByteString::null()) - }; - BrowseResult { - status_code: StatusCode::Good, - continuation_point, - references: Some(reference_descriptions), - } - } -} diff --git a/lib/src/server/session.rs b/lib/src/server/session.rs deleted file mode 100644 index 735328182..000000000 --- a/lib/src/server/session.rs +++ /dev/null @@ -1,561 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock -use std::{ - collections::{HashMap, HashSet, VecDeque}, - sync::{ - atomic::{AtomicI32, Ordering}, - Arc, - }, -}; - -use chrono::Utc; - -use crate::crypto::X509; -use crate::sync::*; -use crate::types::{service_types::PublishRequest, status_code::StatusCode, *}; - -use crate::server::{ - address_space::{AddressSpace, UserAccessLevel}, - continuation_point::BrowseContinuationPoint, - diagnostics::ServerDiagnostics, - identity_token::IdentityToken, - session_diagnostics::SessionDiagnostics, - state::ServerState, - subscriptions::subscription::TickReason, - subscriptions::subscriptions::Subscriptions, -}; - -/// Session info holds information about a session created by CreateSession service -#[derive(Clone)] -pub struct SessionInfo {} - -const PUBLISH_REQUEST_TIMEOUT: i64 = 30000; - -lazy_static! { - static ref NEXT_SESSION_ID: AtomicI32 = AtomicI32::new(1); -} - -fn next_session_id() -> NodeId { - // Session id will be a string identifier - let session_id = NEXT_SESSION_ID.fetch_add(1, Ordering::Relaxed); - let session_id = format!("Session-{}", session_id); - NodeId::new(1, session_id) -} - -pub enum ServerUserIdentityToken { - Empty, - AnonymousIdentityToken, - UserNameIdentityToken(UserIdentityToken), - X509IdentityToken(X509IdentityToken), - Invalid(ExtensionObject), -} - -#[derive(Default)] -pub struct SessionManager { - pub sessions: HashMap>>, - pub sessions_terminated: bool, -} - -impl SessionManager { - pub fn len(&self) -> usize { - self.sessions.len() - } - - pub fn first(&self) -> Option>> { - self.sessions.iter().next().map(|(_, s)| s.clone()) - } - - pub fn sessions_terminated(&self) -> bool { - self.sessions_terminated - } - - /// Puts all sessions into a terminated state, deregisters them, and clears the map - pub fn clear(&mut self, address_space: Arc>) { - for (_nodeid, session) in self.sessions.drain() { - let mut session = trace_write_lock!(session); - session.set_terminated(); - let mut space = trace_write_lock!(address_space); - let diagnostics = trace_write_lock!(session.session_diagnostics); - diagnostics.deregister_session(&session, &mut space); - } - } - - /// Find a session by its session id and return it. - pub fn find_session_by_id(&self, session_id: &NodeId) -> Option>> { - self.sessions - .iter() - .find(|s| { - let session = trace_read_lock!(s.1); - session.session_id() == session_id - }) - .map(|s| s.1) - .cloned() - } - - /// Finds the session by its authentication token and returns it. The authentication token - /// can be renewed so it is not used as a key. - pub fn find_session_by_token( - &self, - authentication_token: &NodeId, - ) -> Option>> { - self.sessions - .iter() - .find(|s| { - let session = trace_read_lock!(s.1); - session.authentication_token() == authentication_token - }) - .map(|s| s.1) - .cloned() - } - - /// Register the session in the map so it can be searched on - pub fn register_session(&mut self, session: Arc>) { - let session_id = { - let session = trace_read_lock!(session); - session.session_id().clone() - }; - self.sessions.insert(session_id, session); - } - - /// Deregisters a session from the map - pub fn deregister_session( - &mut self, - session: Arc>, - ) -> Option>> { - let session = trace_read_lock!(session); - let session_id = session.session_id(); - debug!( - "deregister_session with session id {}, auth token {}", - session_id, - session.authentication_token() - ); - let result = self.sessions.remove(session_id); - debug!( - "deregister_session, new session count = {}", - self.sessions.len() - ); - self.sessions_terminated = self.sessions.is_empty(); - result - } -} - -/// The Session is any state maintained between the client and server -pub struct Session { - /// The session identifier - session_id: NodeId, - /// Security policy - security_policy_uri: String, - /// Secure channel id - secure_channel_id: u32, - /// Client's certificate - client_certificate: Option, - /// Authentication token for the session - authentication_token: NodeId, - /// Session nonce - session_nonce: ByteString, - /// Session name (supplied by client) - session_name: UAString, - /// Session timeout - session_timeout: f64, - /// User identity token - user_identity: IdentityToken, - /// Session's preferred locale ids - locale_ids: Option>, - /// Negotiated max request message size - max_request_message_size: u32, - /// Negotiated max response message size - max_response_message_size: u32, - /// Endpoint url for this session - endpoint_url: UAString, - /// Maximum number of continuation points - max_browse_continuation_points: usize, - /// Browse continuation points (oldest to newest) - browse_continuation_points: VecDeque, - /// Diagnostics associated with the server - diagnostics: Arc>, - /// Diagnostics associated with the session - session_diagnostics: Arc>, - /// Indicates if the session has received an ActivateSession - activated: bool, - /// Flag to indicate session should be terminated - terminate_session: bool, - /// Time that session was terminated, helps with recovering sessions, or clearing them out - terminated_at: DateTimeUtc, - /// Flag indicating session is actually terminated - terminated: bool, - /// Flag indicating broadly if this session may modify the address space by adding or removing - /// nodes or references to nodes. - can_modify_address_space: bool, - /// Timestamp of the last service request to have happened (only counts service requests while there is a session) - last_service_request_timestamp: DateTimeUtc, - /// Subscriptions associated with the session - subscriptions: Subscriptions, -} - -impl Drop for Session { - fn drop(&mut self) { - info!("Session is being dropped"); - let mut diagnostics = trace_write_lock!(self.diagnostics); - diagnostics.on_destroy_session(self); - } -} - -impl Session { - #[cfg(test)] - pub fn new_no_certificate_store() -> Session { - let max_browse_continuation_points = super::constants::MAX_BROWSE_CONTINUATION_POINTS; - let session = Session { - subscriptions: Subscriptions::new(100, PUBLISH_REQUEST_TIMEOUT), - session_id: next_session_id(), - secure_channel_id: 0, - activated: false, - terminate_session: false, - terminated: false, - terminated_at: chrono::Utc::now(), - client_certificate: None, - security_policy_uri: String::new(), - authentication_token: NodeId::null(), - session_nonce: ByteString::null(), - session_name: UAString::null(), - session_timeout: 0f64, - user_identity: IdentityToken::None, - locale_ids: None, - max_request_message_size: 0, - max_response_message_size: 0, - endpoint_url: UAString::null(), - max_browse_continuation_points, - browse_continuation_points: VecDeque::with_capacity(max_browse_continuation_points), - can_modify_address_space: true, - diagnostics: Arc::new(RwLock::new(ServerDiagnostics::default())), - session_diagnostics: Arc::new(RwLock::new(SessionDiagnostics::default())), - last_service_request_timestamp: Utc::now(), - }; - - { - let mut diagnostics = trace_write_lock!(session.diagnostics); - diagnostics.on_create_session(&session); - } - session - } - - /// Create a `Session` from a `Server` - pub fn new(server_state: Arc>) -> Session { - let max_browse_continuation_points = super::constants::MAX_BROWSE_CONTINUATION_POINTS; - - let server_state = trace_read_lock!(server_state); - let max_subscriptions = server_state.max_subscriptions; - let diagnostics = server_state.diagnostics.clone(); - let can_modify_address_space = { - let config = trace_read_lock!(server_state.config); - config.limits.clients_can_modify_address_space - }; - - let session = Session { - subscriptions: Subscriptions::new(max_subscriptions, PUBLISH_REQUEST_TIMEOUT), - session_id: next_session_id(), - secure_channel_id: 0, - activated: false, - terminate_session: false, - terminated: false, - terminated_at: chrono::Utc::now(), - client_certificate: None, - security_policy_uri: String::new(), - authentication_token: NodeId::null(), - session_nonce: ByteString::null(), - session_name: UAString::null(), - session_timeout: 0f64, - user_identity: IdentityToken::None, - locale_ids: None, - max_request_message_size: 0, - max_response_message_size: 0, - endpoint_url: UAString::null(), - max_browse_continuation_points, - browse_continuation_points: VecDeque::with_capacity(max_browse_continuation_points), - can_modify_address_space, - diagnostics, - session_diagnostics: Arc::new(RwLock::new(SessionDiagnostics::default())), - last_service_request_timestamp: Utc::now(), - }; - { - let mut diagnostics = trace_write_lock!(session.diagnostics); - diagnostics.on_create_session(&session); - } - session - } - - pub fn session_id(&self) -> &NodeId { - &self.session_id - } - - pub fn set_activated(&mut self, activated: bool) { - self.activated = activated; - } - - pub fn is_activated(&self) -> bool { - self.activated - } - - pub fn is_terminated(&self) -> bool { - self.terminated - } - - pub fn terminated_at(&self) -> DateTimeUtc { - self.terminated_at - } - - pub fn set_terminated(&mut self) { - info!("Session being set to terminated"); - self.terminated = true; - self.terminated_at = chrono::Utc::now(); - } - - pub fn secure_channel_id(&self) -> u32 { - self.secure_channel_id - } - - pub fn set_secure_channel_id(&mut self, secure_channel_id: u32) { - self.secure_channel_id = secure_channel_id; - } - - pub fn authentication_token(&self) -> &NodeId { - &self.authentication_token - } - - pub fn set_authentication_token(&mut self, authentication_token: NodeId) { - self.authentication_token = authentication_token; - } - - pub fn session_timeout(&self) -> f64 { - self.session_timeout - } - - pub fn set_session_timeout(&mut self, session_timeout: f64) { - self.session_timeout = session_timeout; - } - - pub fn set_max_request_message_size(&mut self, max_request_message_size: u32) { - self.max_request_message_size = max_request_message_size; - } - - pub fn set_max_response_message_size(&mut self, max_response_message_size: u32) { - self.max_response_message_size = max_response_message_size; - } - - pub fn endpoint_url(&self) -> &UAString { - &self.endpoint_url - } - - pub fn set_endpoint_url(&mut self, endpoint_url: UAString) { - self.endpoint_url = endpoint_url; - } - - pub fn set_security_policy_uri(&mut self, security_policy_uri: &str) { - self.security_policy_uri = security_policy_uri.to_string(); - } - - pub fn set_user_identity(&mut self, user_identity: IdentityToken) { - self.user_identity = user_identity; - } - - pub fn last_service_request_timestamp(&self) -> DateTimeUtc { - self.last_service_request_timestamp - } - - pub fn set_last_service_request_timestamp( - &mut self, - last_service_request_timestamp: DateTimeUtc, - ) { - self.last_service_request_timestamp = last_service_request_timestamp; - } - - pub fn locale_ids(&self) -> &Option> { - &self.locale_ids - } - - pub fn set_locale_ids(&mut self, locale_ids: Option>) { - self.locale_ids = locale_ids; - } - - pub fn client_certificate(&self) -> &Option { - &self.client_certificate - } - - pub fn set_client_certificate(&mut self, client_certificate: Option) { - self.client_certificate = client_certificate; - } - - pub fn session_nonce(&self) -> &ByteString { - &self.session_nonce - } - - pub fn set_session_nonce(&mut self, session_nonce: ByteString) { - self.session_nonce = session_nonce; - } - - pub fn session_name(&self) -> &UAString { - &self.session_name - } - - pub fn set_session_name(&mut self, session_name: UAString) { - self.session_name = session_name; - } - - pub(crate) fn session_diagnostics(&self) -> Arc> { - self.session_diagnostics.clone() - } - - pub(crate) fn subscriptions(&self) -> &Subscriptions { - &self.subscriptions - } - - pub(crate) fn subscriptions_mut(&mut self) -> &mut Subscriptions { - &mut self.subscriptions - } - - pub(crate) fn enqueue_publish_request( - &mut self, - now: &DateTimeUtc, - request_id: u32, - request: PublishRequest, - address_space: &AddressSpace, - ) -> Result<(), StatusCode> { - self.subscriptions - .enqueue_publish_request(now, request_id, request, address_space) - } - - pub(crate) fn tick_subscriptions( - &mut self, - now: &DateTimeUtc, - address_space: &AddressSpace, - reason: TickReason, - ) -> Result<(), StatusCode> { - self.subscriptions.tick(now, address_space, reason) - } - - /// Reset the lifetime counter on the subscription, e.g. because a service references the - /// subscription. - pub(crate) fn reset_subscription_lifetime_counter(&mut self, subscription_id: u32) { - if let Some(subscription) = self.subscriptions.get_mut(subscription_id) { - subscription.reset_lifetime_counter(); - } - } - - /// Iterates through the existing queued publish requests and creates a timeout - /// publish response any that have expired. - pub(crate) fn expire_stale_publish_requests(&mut self, now: &DateTimeUtc) { - self.subscriptions.expire_stale_publish_requests(now); - } - - pub(crate) fn add_browse_continuation_point( - &mut self, - continuation_point: BrowseContinuationPoint, - ) { - // Remove excess browse continuation points - while self.browse_continuation_points.len() >= self.max_browse_continuation_points { - let continuation_point = self.browse_continuation_points.pop_front(); - debug!( - "Removing old continuation point {} to make way for new one", - continuation_point.unwrap().id.as_base64() - ); - } - self.browse_continuation_points - .push_back(continuation_point); - } - - /// Finds and REMOVES a continuation point by id. - pub(crate) fn find_browse_continuation_point( - &mut self, - id: &ByteString, - ) -> Option { - if let Some(idx) = self - .browse_continuation_points - .iter() - .position(|continuation_point| continuation_point.id == *id) - { - self.browse_continuation_points.remove(idx) - } else { - None - } - } - - pub(crate) fn remove_expired_browse_continuation_points( - &mut self, - address_space: &AddressSpace, - ) { - self.browse_continuation_points.retain(|continuation_point| { - let valid = continuation_point.is_valid_browse_continuation_point(address_space); - if !valid { - debug!("Continuation point {:?} is no longer valid and will be removed, address space last modified = {}", continuation_point, address_space.last_modified()); - } - valid - }); - } - - /// Remove all the specified continuation points by id - pub(crate) fn remove_browse_continuation_points(&mut self, continuation_points: &[ByteString]) { - // Turn the supplied slice into a set - let continuation_points_set: HashSet = - continuation_points.iter().cloned().collect(); - // Now remove any continuation points that are part of that set - self.browse_continuation_points - .retain(|continuation_point| !continuation_points_set.contains(&continuation_point.id)); - } - - pub(crate) fn can_modify_address_space(&self) -> bool { - self.can_modify_address_space - } - - #[cfg(test)] - pub(crate) fn set_can_modify_address_space(&mut self, can_modify_address_space: bool) { - self.can_modify_address_space = can_modify_address_space; - } - - pub(crate) fn effective_user_access_level( - &self, - user_access_level: UserAccessLevel, - _node_id: &NodeId, - _attribute_id: AttributeId, - ) -> UserAccessLevel { - // TODO session could modify the user_access_level further here via user / groups - user_access_level - } - - /// Helper function to return the client user id from the identity token or None of there is no user id - /// - /// This conforms to OPC Part 5 6.4.3 ClientUserId - pub fn client_user_id(&self) -> UAString { - match self.user_identity { - IdentityToken::None | IdentityToken::AnonymousIdentityToken(_) => UAString::null(), - IdentityToken::UserNameIdentityToken(ref token) => token.user_name.clone(), - IdentityToken::X509IdentityToken(ref token) => { - if let Ok(cert) = X509::from_byte_string(&token.certificate_data) { - UAString::from(cert.subject_name()) - } else { - UAString::from("Invalid certificate") - } - } - IdentityToken::Invalid(_) => UAString::from("invalid"), - } - } - - pub fn is_session_terminated(&self) -> bool { - self.terminate_session - } - - pub fn terminate_session(&mut self) { - self.terminate_session = true; - } - - pub(crate) fn register_session(&self, address_space: Arc>) { - let session_diagnostics = trace_read_lock!(self.session_diagnostics); - let mut address_space = trace_write_lock!(address_space); - session_diagnostics.register_session(self, &mut address_space); - } - - pub(crate) fn deregister_session(&self, address_space: Arc>) { - let session_diagnostics = trace_read_lock!(self.session_diagnostics); - let mut address_space = trace_write_lock!(address_space); - session_diagnostics.deregister_session(self, &mut address_space); - } -} diff --git a/lib/src/server/session/continuation_points.rs b/lib/src/server/session/continuation_points.rs new file mode 100644 index 000000000..5a7bcbf0c --- /dev/null +++ b/lib/src/server/session/continuation_points.rs @@ -0,0 +1,38 @@ +use std::any::Any; + +/// Representation of a dynamic continuation point. +/// Each node manager may provide their own continuation point type, +/// which is stored by the server. This wraps that value and provides interfaces +/// to access it for a given node manager. +pub struct ContinuationPoint { + payload: Box, +} + +impl ContinuationPoint { + pub fn new(item: Box) -> Self { + Self { payload: item } + } + + /// Retrieve the value of the continuation point. + /// This will return `None` if the stored value is not equal to the + /// given type. Most node managers should report an error if this happens. + pub fn get(&self) -> Option<&T> { + self.payload.downcast_ref() + } + + /// Retrieve the value of the continuation point. + /// This will return `None` if the stored value is not equal to the + /// given type. Most node managers should report an error if this happens. + pub fn get_mut(&mut self) -> Option<&mut T> { + self.payload.downcast_mut() + } + + /// Consume this continuation point and return a specific type. + pub fn take(self) -> Option> { + self.payload.downcast().ok() + } +} + +/// Continuation point implementation used when continuation is necessary, but +/// the last called node manager is empty. +pub(crate) struct EmptyContinuationPoint; diff --git a/lib/src/server/session/controller.rs b/lib/src/server/session/controller.rs new file mode 100644 index 000000000..08cc7ad41 --- /dev/null +++ b/lib/src/server/session/controller.rs @@ -0,0 +1,592 @@ +use std::{ + pin::Pin, + sync::Arc, + time::{Duration, Instant}, +}; + +use futures::{future::Either, stream::FuturesUnordered, Future, StreamExt}; +use tokio::net::TcpStream; + +use crate::{ + core::{ + comms::{ + secure_channel::SecureChannel, security_header::SecurityHeader, tcp_types::ErrorMessage, + }, + config::Config, + handle::AtomicHandle, + supported_message::SupportedMessage, + }, + crypto::{CertificateStore, SecurityPolicy}, + server::{ + authenticator::UserToken, + info::ServerInfo, + node_manager::NodeManagers, + subscriptions::SubscriptionCache, + transport::tcp::{Request, TcpTransport, TransportConfig, TransportPollResult}, + }, + sync::RwLock, + types::{ + ChannelSecurityToken, DateTime, FindServersResponse, GetEndpointsResponse, + MessageSecurityMode, OpenSecureChannelRequest, OpenSecureChannelResponse, ResponseHeader, + SecurityTokenRequestType, ServiceFault, StatusCode, + }, +}; + +use super::{instance::Session, manager::SessionManager, message_handler::MessageHandler}; + +pub(crate) struct Response { + pub message: SupportedMessage, + pub request_id: u32, +} + +impl Response { + pub fn from_result( + result: Result, StatusCode>, + request_handle: u32, + request_id: u32, + ) -> Self { + match result { + Ok(r) => Self { + message: r.into(), + request_id, + }, + Err(e) => Self { + message: ServiceFault::new(request_handle, e).into(), + request_id, + }, + } + } +} + +pub(crate) enum ControllerCommand { + Close, +} + +/// Master type managing a single connection. +pub(crate) struct SessionController { + channel: SecureChannel, + transport: TcpTransport, + secure_channel_state: SecureChannelState, + session_manager: Arc>, + certificate_store: Arc>, + message_handler: MessageHandler, + pending_messages: FuturesUnordered< + Pin> + Send + Sync + 'static>>, + >, + info: Arc, + deadline: Instant, +} + +enum RequestProcessResult { + Ok, + Close, +} + +impl SessionController { + pub fn new( + socket: TcpStream, + session_manager: Arc>, + certificate_store: Arc>, + info: Arc, + node_managers: NodeManagers, + subscriptions: Arc, + ) -> Self { + let channel = SecureChannel::new( + certificate_store.clone(), + crate::core::comms::secure_channel::Role::Server, + info.decoding_options(), + ); + let transport = TcpTransport::new( + socket, + TransportConfig { + send_buffer_size: info.config.limits.send_buffer_size, + max_message_size: info.config.limits.max_message_size, + max_chunk_count: info.config.limits.max_chunk_count, + hello_timeout: Duration::from_secs(info.config.tcp_config.hello_timeout as u64), + }, + info.decoding_options(), + info.clone(), + ); + + Self { + channel, + transport, + secure_channel_state: SecureChannelState::new(info.secure_channel_id_handle.clone()), + session_manager, + certificate_store, + message_handler: MessageHandler::new(info.clone(), node_managers, subscriptions), + deadline: Instant::now() + + Duration::from_secs(info.config.tcp_config.hello_timeout as u64), + info, + pending_messages: FuturesUnordered::new(), + } + } + + pub async fn run(mut self, mut command: tokio::sync::mpsc::Receiver) { + loop { + let resp_fut = if self.pending_messages.is_empty() { + Either::Left(futures::future::pending::>>()) + } else { + Either::Right(self.pending_messages.next()) + }; + + tokio::select! { + _ = tokio::time::sleep_until(self.deadline.into()) => { + if !self.transport.is_closing() { + warn!("Connection timed out, closing"); + self.transport.enqueue_error(ErrorMessage::new(StatusCode::BadTimeout, "Connection timeout")); + } + self.transport.set_closing(); + } + cmd = command.recv() => { + match cmd { + Some(ControllerCommand::Close) | None => { + if !self.transport.is_closing() { + self.transport.enqueue_error(ErrorMessage::new(StatusCode::BadServerHalted, "Server stopped")); + } + self.transport.set_closing(); + } + } + } + msg = resp_fut => { + let msg = match msg { + Some(Ok(x)) => x, + Some(Err(e)) => { + error!("Unexpected error in message handler: {e}"); + self.transport.set_closing(); + continue; + } + // Cannot happen, pending_messages is non-empty or this future never returns. + None => unreachable!(), + }; + if let Err(e) = self.transport.enqueue_message_for_send( + &mut self.channel, + msg.message, + msg.request_id + ) { + error!("Failed to send response: {e}"); + self.transport.set_closing(); + } + } + res = self.transport.poll(&mut self.channel) => { + trace!("Transport poll result: {res:?}"); + match res { + TransportPollResult::IncomingMessage(req) => { + if matches!(self.process_request(req).await, RequestProcessResult::Close) { + self.transport.set_closing(); + } + } + TransportPollResult::Error(s) => { + error!("Fatal transport error: {s}"); + if !self.transport.is_closing() { + self.transport.enqueue_error(ErrorMessage::new(s, "Transport error")); + } + self.transport.set_closing(); + } + TransportPollResult::Closed => break, + _ => (), + } + } + } + } + } + + async fn process_request(&mut self, req: Request) -> RequestProcessResult { + let id = req.request_id; + match req.message { + SupportedMessage::OpenSecureChannelRequest(r) => { + let res = self.open_secure_channel( + &req.chunk_info.security_header, + self.transport.client_protocol_version, + &r, + ); + if res.is_ok() { + self.deadline = self.channel.token_renewal_deadline(); + } + match res { + Ok(r) => match self + .transport + .enqueue_message_for_send(&mut self.channel, r, id) + { + Ok(_) => RequestProcessResult::Ok, + Err(e) => { + error!("Failed to send open secure channel response: {e}"); + return RequestProcessResult::Close; + } + }, + Err(e) => { + let _ = self.transport.enqueue_message_for_send( + &mut self.channel, + ServiceFault::new(&r.request_header, e).into(), + id, + ); + return RequestProcessResult::Close; + } + } + } + + SupportedMessage::CloseSecureChannelRequest(_r) => { + return RequestProcessResult::Close; + } + + SupportedMessage::CreateSessionRequest(request) => { + let mut mgr = trace_write_lock!(self.session_manager); + let res = mgr.create_session(&mut self.channel, &self.certificate_store, &request); + drop(mgr); + self.process_service_result(res, request.request_header.request_handle, id) + } + + SupportedMessage::ActivateSessionRequest(request) => { + let mut mgr = trace_write_lock!(self.session_manager); + let res = mgr.activate_session(&mut self.channel, &request).await; + drop(mgr); + self.process_service_result(res, request.request_header.request_handle, id) + } + + SupportedMessage::CloseSessionRequest(request) => { + let mut mgr = trace_write_lock!(self.session_manager); + let res = mgr + .close_session(&mut self.channel, &mut self.message_handler, &request) + .await; + drop(mgr); + self.process_service_result(res, request.request_header.request_handle, id) + } + SupportedMessage::GetEndpointsRequest(request) => { + // TODO some of the arguments in the request are ignored + // localeIds - list of locales to use for human readable strings (in the endpoint descriptions) + + // TODO audit - generate event for failed service invocation + + let endpoints = self + .info + .endpoints(&request.endpoint_url, &request.profile_uris); + self.process_service_result( + Ok(GetEndpointsResponse { + response_header: ResponseHeader::new_good(&request.request_header), + endpoints, + }), + request.request_header.request_handle, + id, + ) + } + SupportedMessage::FindServersRequest(request) => { + let desc = self.info.config.application_description(); + let mut servers = vec![desc]; + + // TODO endpoint URL + + // TODO localeids, filter out servers that do not support locale ids + + // Filter servers that do not have a matching application uri + if let Some(ref server_uris) = request.server_uris { + if !server_uris.is_empty() { + // Filter the servers down + servers.retain(|server| { + server_uris.iter().any(|uri| *uri == server.application_uri) + }); + } + } + + let servers = Some(servers); + + self.process_service_result( + Ok(FindServersResponse { + response_header: ResponseHeader::new_good(&request.request_header), + servers, + }), + request.request_header.request_handle, + id, + ) + } + + message => { + let now = Instant::now(); + let mgr = trace_read_lock!(self.session_manager); + let session = mgr.find_by_token(&message.request_header().authentication_token); + + let (session_id, session, user_token) = + match Self::validate_request(&message, session, &self.channel) { + Ok(s) => s, + Err(e) => { + match self + .transport + .enqueue_message_for_send(&mut self.channel, e, id) + { + Ok(_) => return RequestProcessResult::Ok, + Err(e) => { + error!("Failed to send request response: {e}"); + return RequestProcessResult::Close; + } + } + } + }; + let deadline = { + let timeout = message.request_header().timeout_hint; + let max_timeout = self.info.config.max_timeout_ms; + let timeout = if max_timeout == 0 { + timeout + } else { + max_timeout.max(timeout) + }; + if timeout == 0 { + // Just set some huge value. A request taking a day can probably + // be safely canceled... + now + Duration::from_secs(60 * 60 * 24) + } else { + now + Duration::from_millis(timeout.into()) + } + }; + let request_handle = message.request_handle(); + + match self + .message_handler + .handle_message(message, session_id, session, user_token, id) + { + super::message_handler::HandleMessageResult::AsyncMessage(mut handle) => { + self.pending_messages + .push(Box::pin(async move { + // Select biased because if for some reason there's a long time between polls, + // we want to return the response even if the timeout expired. We only want to send a timeout + // if the call has not been finished yet. + tokio::select! { + biased; + r = &mut handle => { + r.map_err(|e| e.to_string()) + } + _ = tokio::time::sleep_until(deadline.into()) => { + handle.abort(); + Ok(Response { message: ServiceFault::new(request_handle, StatusCode::BadTimeout).into(), request_id: id }) + } + } + })); + RequestProcessResult::Ok + } + super::message_handler::HandleMessageResult::SyncMessage(s) => { + if let Err(e) = self.transport.enqueue_message_for_send( + &mut self.channel, + s.message, + s.request_id, + ) { + error!("Failed to send response: {e}"); + return RequestProcessResult::Close; + } + RequestProcessResult::Ok + } + super::message_handler::HandleMessageResult::PublishResponse(resp) => { + self.pending_messages.push(Box::pin(resp.recv())); + RequestProcessResult::Ok + } + } + } + } + } + + fn process_service_result( + &mut self, + res: Result, StatusCode>, + request_handle: u32, + request_id: u32, + ) -> RequestProcessResult { + let message = match res { + Ok(m) => m.into(), + Err(e) => ServiceFault::new(request_handle, e).into(), + }; + if let Err(e) = + self.transport + .enqueue_message_for_send(&mut self.channel, message, request_id) + { + error!("Failed to send request response: {e}"); + RequestProcessResult::Close + } else { + RequestProcessResult::Ok + } + } + + fn validate_request( + message: &SupportedMessage, + session: Option>>, + channel: &SecureChannel, + ) -> Result<(u32, Arc>, UserToken), SupportedMessage> { + let header = message.request_header(); + + let Some(session) = session else { + return Err(ServiceFault::new(header, StatusCode::BadSessionIdInvalid).into()); + }; + + let session_lock = trace_read_lock!(session); + let id = session_lock.session_id_numeric(); + + let user_token = (move || { + let token = session_lock.validate_activated()?; + session_lock.validate_secure_channel_id(channel.secure_channel_id())?; + session_lock.validate_timed_out()?; + Ok(token.clone()) + })() + .map_err(|e| ServiceFault::new(header, e).into())?; + Ok((id, session, user_token)) + } + + fn open_secure_channel( + &mut self, + security_header: &SecurityHeader, + client_protocol_version: u32, + request: &OpenSecureChannelRequest, + ) -> Result { + let security_header = match security_header { + SecurityHeader::Asymmetric(security_header) => security_header, + _ => { + error!("Secure channel request message does not have asymmetric security header"); + return Err(StatusCode::BadUnexpectedError); + } + }; + + // Must compare protocol version to the one from HELLO + if request.client_protocol_version != client_protocol_version { + error!( + "Client sent a different protocol version than it did in the HELLO - {} vs {}", + request.client_protocol_version, client_protocol_version + ); + return Ok(ServiceFault::new( + &request.request_header, + StatusCode::BadProtocolVersionUnsupported, + ) + .into()); + } + + // Test the request type + let secure_channel_id = match request.request_type { + SecurityTokenRequestType::Issue => { + trace!("Request type == Issue"); + // check to see if renew has been called before or not + if self.secure_channel_state.renew_count > 0 { + error!("Asked to issue token on session that has called renew before"); + } + self.secure_channel_state.create_secure_channel_id() + } + SecurityTokenRequestType::Renew => { + trace!("Request type == Renew"); + + // Check for a duplicate nonce. It is invalid for the renew to use the same nonce + // as was used for last issue/renew. It doesn't matter when policy is none. + if self.channel.security_policy() != SecurityPolicy::None + && request.client_nonce.as_ref() == self.channel.remote_nonce() + { + error!("Client reused a nonce for a renew"); + return Ok(ServiceFault::new( + &request.request_header, + StatusCode::BadNonceInvalid, + ) + .into()); + } + + // check to see if the secure channel has been issued before or not + if !self.secure_channel_state.issued { + error!("Asked to renew token on session that has never issued token"); + return Err(StatusCode::BadUnexpectedError); + } + self.secure_channel_state.renew_count += 1; + self.channel.secure_channel_id() + } + }; + + // Check the requested security mode + debug!("Message security mode == {:?}", request.security_mode); + match request.security_mode { + MessageSecurityMode::None + | MessageSecurityMode::Sign + | MessageSecurityMode::SignAndEncrypt => { + // TODO validate NONCE + } + _ => { + error!("Security mode is invalid"); + return Ok(ServiceFault::new( + &request.request_header, + StatusCode::BadSecurityModeRejected, + ) + .into()); + } + } + + // Process the request + self.secure_channel_state.issued = true; + + // Create a new secure channel info + let security_mode = request.security_mode; + self.channel.set_security_mode(security_mode); + self.channel + .set_token_id(self.secure_channel_state.create_token_id()); + self.channel.set_secure_channel_id(secure_channel_id); + self.channel + .set_remote_cert_from_byte_string(&security_header.sender_certificate)?; + + let revised_lifetime = self + .info + .config + .max_secure_channel_token_lifetime_ms + .min(request.requested_lifetime); + self.channel.set_token_lifetime(revised_lifetime); + + match self + .channel + .set_remote_nonce_from_byte_string(&request.client_nonce) + { + Ok(_) => self.channel.create_random_nonce(), + Err(err) => { + error!("Was unable to set their nonce, check logic"); + return Ok(ServiceFault::new(&request.request_header, err).into()); + } + } + + let security_policy = self.channel.security_policy(); + if security_policy != SecurityPolicy::None + && (security_mode == MessageSecurityMode::Sign + || security_mode == MessageSecurityMode::SignAndEncrypt) + { + self.channel.derive_keys(); + } + + let response = OpenSecureChannelResponse { + response_header: ResponseHeader::new_good(&request.request_header), + server_protocol_version: 0, + security_token: ChannelSecurityToken { + channel_id: self.channel.secure_channel_id(), + token_id: self.channel.token_id(), + created_at: DateTime::now(), + revised_lifetime, + }, + server_nonce: self.channel.local_nonce_as_byte_string(), + }; + Ok(response.into()) + } +} + +struct SecureChannelState { + // Issued flag + issued: bool, + // Renew count, debugging + renew_count: usize, + // Last secure channel id + secure_channel_id: Arc, + /// Last token id number + last_token_id: u32, +} + +impl SecureChannelState { + pub fn new(handle: Arc) -> SecureChannelState { + SecureChannelState { + secure_channel_id: handle, + issued: false, + renew_count: 0, + last_token_id: 0, + } + } + + pub fn create_secure_channel_id(&mut self) -> u32 { + self.secure_channel_id.next() + } + + pub fn create_token_id(&mut self) -> u32 { + self.last_token_id += 1; + self.last_token_id + } +} diff --git a/lib/src/server/session/instance.rs b/lib/src/server/session/instance.rs new file mode 100644 index 000000000..f00732bcd --- /dev/null +++ b/lib/src/server/session/instance.rs @@ -0,0 +1,331 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use arc_swap::ArcSwap; + +use super::continuation_points::ContinuationPoint; +use super::manager::next_session_id; +use crate::crypto::X509; +use crate::server::authenticator::UserToken; +use crate::server::identity_token::IdentityToken; +use crate::server::info::ServerInfo; +use crate::server::node_manager::{BrowseContinuationPoint, QueryContinuationPoint}; +use crate::types::{ + ApplicationDescription, ByteString, MessageSecurityMode, NodeId, StatusCode, UAString, +}; + +/// An instance of an OPC-UA session. +pub struct Session { + /// The session identifier + session_id: NodeId, + /// For convenience, the integer form of the session ID. + session_id_numeric: u32, + /// Security policy + security_policy_uri: String, + /// Secure channel id + secure_channel_id: u32, + /// Client's certificate + client_certificate: Option, + /// Authentication token for the session + pub(super) authentication_token: NodeId, + /// Session nonce + session_nonce: ByteString, + /// Session name (supplied by client) + session_name: UAString, + /// Session timeout + session_timeout: Duration, + /// User identity token + user_identity: IdentityToken, + /// Session's preferred locale ids + locale_ids: Option>, + /// Negotiated max request message size + max_request_message_size: u32, + /// Negotiated max response message size + max_response_message_size: u32, + /// Endpoint url for this session + endpoint_url: UAString, + /// Maximum number of continuation points for browse + max_browse_continuation_points: usize, + /// Maximum number of continuation points for history. + max_history_continuation_points: usize, + /// Maximum number of continuation points for query. + max_query_continuation_points: usize, + /// Client application description + application_description: ApplicationDescription, + /// Message security mode. Set on the channel, but cached here. + message_security_mode: MessageSecurityMode, + /// Time of last service request. + last_service_request: ArcSwap, + /// Continuation points for browse. + browse_continuation_points: HashMap, + /// Continuation points for history. + history_continuation_points: HashMap, + /// Continuation points for querying. + query_continuation_points: HashMap, + /// User token. + user_token: Option, + /// Whether the session has been closed. + is_closed: bool, +} + +impl Session { + /// Create a new session object. + pub(crate) fn create( + info: &ServerInfo, + authentication_token: NodeId, + secure_channel_id: u32, + session_timeout: u64, + max_request_message_size: u32, + max_response_message_size: u32, + endpoint_url: UAString, + security_policy_uri: String, + user_identity: IdentityToken, + client_certificate: Option, + session_nonce: ByteString, + session_name: UAString, + application_description: ApplicationDescription, + message_security_mode: MessageSecurityMode, + ) -> Self { + let (session_id, session_id_numeric) = next_session_id(); + Self { + session_id, + session_id_numeric, + security_policy_uri, + secure_channel_id, + client_certificate, + authentication_token, + session_nonce, + session_name, + session_timeout: if session_timeout <= 0 { + Duration::from_millis(info.config.max_session_timeout_ms) + } else { + Duration::from_millis(session_timeout) + }, + last_service_request: ArcSwap::new(Arc::new(Instant::now())), + user_identity, + locale_ids: None, + max_request_message_size, + max_response_message_size, + endpoint_url, + max_browse_continuation_points: info.config.limits.max_browse_continuation_points, + max_history_continuation_points: info.config.limits.max_history_continuation_points, + max_query_continuation_points: info.config.limits.max_query_continuation_points, + browse_continuation_points: Default::default(), + history_continuation_points: Default::default(), + query_continuation_points: Default::default(), + user_token: None, + application_description, + message_security_mode, + is_closed: false, + } + } + + /// Check whether this session has timed out and return the appropriate error if it has. + pub(crate) fn validate_timed_out(&self) -> Result<(), StatusCode> { + let elapsed = Instant::now() - **self.last_service_request.load(); + + self.last_service_request.store(Arc::new(Instant::now())); + + if self.session_timeout < elapsed { + // This will eventually be collected by the timeout monitor. + error!("Session has timed out because too much time has elapsed between service calls - elapsed time = {}ms", elapsed.as_millis()); + Err(StatusCode::BadSessionIdInvalid) + } else { + Ok(()) + } + } + + /// Get the session timeout deadline. + pub fn deadline(&self) -> Instant { + **self.last_service_request.load() + self.session_timeout + } + + /// Check whether this session is validated and return the appropriate error if not. + pub(crate) fn validate_activated(&self) -> Result<&UserToken, StatusCode> { + // Unlikely, but this protects against race conditions where the + // session is removed from the session cache after it has been retrieved for a service call, + // but before it has been locked. + if self.is_closed { + return Err(StatusCode::BadSessionClosed); + } + if let Some(token) = &self.user_token { + Ok(token) + } else { + Err(StatusCode::BadSessionNotActivated) + } + } + + /// Check whether this session is associated with the secure channel given by + /// `secure_channel_id` and return the appropriate error fi not. + pub(crate) fn validate_secure_channel_id( + &self, + secure_channel_id: u32, + ) -> Result<(), StatusCode> { + if secure_channel_id != self.secure_channel_id { + Err(StatusCode::BadSecureChannelIdInvalid) + } else { + Ok(()) + } + } + + /// Activate the session. + pub(crate) fn activate( + &mut self, + secure_channel_id: u32, + server_nonce: ByteString, + identity: IdentityToken, + locale_ids: Option>, + user_token: UserToken, + ) { + self.user_token = Some(user_token); + self.secure_channel_id = secure_channel_id; + self.session_nonce = server_nonce; + self.user_identity = identity; + self.locale_ids = locale_ids; + } + + pub(crate) fn close(&mut self) { + self.is_closed = true; + } + + /// Get the session ID of this session, this is known to the client, and is what they + /// use to refer to this session. + /// + /// Note: Do not use this for access control, instead you should almost always use the + /// `UserToken` to refer to the _user_, rather than the session. + pub fn session_id(&self) -> &NodeId { + &self.session_id + } + + /// Get the endpoint this session was created on. + pub fn endpoint_url(&self) -> &UAString { + &self.endpoint_url + } + + /// Get the client certificate, if it is set. + pub fn client_certificate(&self) -> Option<&X509> { + self.client_certificate.as_ref() + } + + /// Get the session nonce. + pub fn session_nonce(&self) -> &ByteString { + &self.session_nonce + } + + /// Whether this session is activated. + pub fn is_activated(&self) -> bool { + self.user_token.is_some() && !self.is_closed + } + + /// Get the secure channel ID of this session. + pub fn secure_channel_id(&self) -> u32 { + self.secure_channel_id + } + + pub(crate) fn add_browse_continuation_point( + &mut self, + cp: BrowseContinuationPoint, + ) -> Result<(), ()> { + if self.max_browse_continuation_points <= self.browse_continuation_points.len() + && self.max_browse_continuation_points > 0 + { + Err(()) + } else { + self.browse_continuation_points.insert(cp.id.clone(), cp); + Ok(()) + } + } + + pub(crate) fn remove_browse_continuation_point( + &mut self, + id: &ByteString, + ) -> Option { + self.browse_continuation_points.remove(id) + } + + pub(crate) fn add_history_continuation_point( + &mut self, + id: &ByteString, + cp: ContinuationPoint, + ) -> Result<(), ()> { + if self.max_history_continuation_points <= self.history_continuation_points.len() + && self.max_history_continuation_points > 0 + { + Err(()) + } else { + self.history_continuation_points.insert(id.clone(), cp); + Ok(()) + } + } + + pub(crate) fn remove_history_continuation_point( + &mut self, + id: &ByteString, + ) -> Option { + self.history_continuation_points.remove(id) + } + + pub(crate) fn add_query_continuation_point( + &mut self, + id: &ByteString, + cp: QueryContinuationPoint, + ) -> Result<(), ()> { + if self.max_query_continuation_points <= self.query_continuation_points.len() + && self.max_query_continuation_points > 0 + { + Err(()) + } else { + self.query_continuation_points.insert(id.clone(), cp); + Ok(()) + } + } + + pub(crate) fn remove_query_continuation_point( + &mut self, + id: &ByteString, + ) -> Option { + self.query_continuation_points.remove(id) + } + + /// Get the application description of the client that created this session. + pub fn application_description(&self) -> &ApplicationDescription { + &self.application_description + } + + /// Get the user token, if set. This will be present if the session + /// is activated. + pub fn user_token(&self) -> Option<&UserToken> { + self.user_token.as_ref() + } + + /// Get the message security mode used by this session. + pub fn message_security_mode(&self) -> MessageSecurityMode { + self.message_security_mode + } + + /// Get a numeric representation of the session ID. + pub fn session_id_numeric(&self) -> u32 { + self.session_id_numeric + } + + /// Get the negotiated max request message size. + pub fn max_request_message_size(&self) -> u32 { + self.max_request_message_size + } + + /// Get the negotiated max response message size. + pub fn max_response_message_size(&self) -> u32 { + self.max_response_message_size + } + + /// Get the name of this session as set by the client. + pub fn session_name(&self) -> &str { + self.session_name.as_ref() + } + + /// Get the security policy URI of this session. + pub fn security_policy_uri(&self) -> &str { + &self.security_policy_uri + } +} diff --git a/lib/src/server/session/manager.rs b/lib/src/server/session/manager.rs new file mode 100644 index 000000000..c2b3a0b5c --- /dev/null +++ b/lib/src/server/session/manager.rs @@ -0,0 +1,357 @@ +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; + +use crypto::{random, security_policy::SecurityPolicy}; +use parking_lot::RwLock; +use tokio::sync::Notify; + +use crate::{ + core::comms::secure_channel::SecureChannel, + crypto, + server::{identity_token::IdentityToken, info::ServerInfo}, + types::{ + ActivateSessionRequest, ActivateSessionResponse, CloseSessionRequest, CloseSessionResponse, + CreateSessionRequest, CreateSessionResponse, NodeId, ResponseHeader, SignatureData, + StatusCode, + }, +}; + +use super::{instance::Session, message_handler::MessageHandler}; + +lazy_static! { + static ref NEXT_SESSION_ID: AtomicU32 = AtomicU32::new(1); +} + +pub(super) fn next_session_id() -> (NodeId, u32) { + // Session id will be a string identifier + let session_id = NEXT_SESSION_ID.fetch_add(1, Ordering::Relaxed); + (NodeId::new(1, session_id), session_id) +} + +/// Manages all sessions on the server. +pub struct SessionManager { + sessions: HashMap>>, + info: Arc, + notify: Arc, +} + +impl SessionManager { + pub(crate) fn new(info: Arc, notify: Arc) -> Self { + Self { + sessions: Default::default(), + info, + notify, + } + } + + /// Get a session by its authentication token. + pub fn find_by_token(&self, authentication_token: &NodeId) -> Option>> { + Self::find_by_token_int(&self.sessions, authentication_token) + } + + fn find_by_token_int<'a>( + sessions: &'a HashMap>>, + authentication_token: &NodeId, + ) -> Option>> { + sessions + .iter() + .find(|(_, s)| &s.read().authentication_token == authentication_token) + .map(|p| p.1.clone()) + } + + pub(crate) fn create_session( + &mut self, + channel: &mut SecureChannel, + certificate_store: &RwLock, + request: &CreateSessionRequest, + ) -> Result { + if self.sessions.len() >= self.info.config.limits.max_sessions { + return Err(StatusCode::BadTooManySessions); + } + + // TODO: Auditing and diagnostics. + let endpoints = self + .info + .new_endpoint_descriptions(request.endpoint_url.as_ref()); + // TODO request.endpoint_url should match hostname of server application certificate + // Find matching end points for this url + if request.endpoint_url.is_null() { + error!("Create session was passed an null endpoint url"); + return Err(StatusCode::BadTcpEndpointUrlInvalid); + } + + let Some(endpoints) = endpoints else { + return Err(StatusCode::BadTcpEndpointUrlInvalid); + }; + + let client_certificate = crypto::X509::from_byte_string(&request.client_certificate); + let security_policy = channel.security_policy(); + + if security_policy != SecurityPolicy::None { + let store = trace_read_lock!(certificate_store); + let result = match &client_certificate { + Ok(cert) => store.validate_or_reject_application_instance_cert( + cert, + security_policy, + None, + None, + ), + Err(e) => *e, + }; + + if result.is_bad() { + return Err(result); + } + } + + let session_timeout = self + .info + .config + .max_session_timeout_ms + .min(request.requested_session_timeout.floor() as u64); + let max_request_message_size = self.info.config.limits.max_message_size as u32; + + let server_signature = if let Some(ref pkey) = self.info.server_pkey { + crypto::create_signature_data( + pkey, + security_policy, + &request.client_certificate, + &request.client_nonce, + ) + .unwrap_or_else(|err| { + error!( + "Cannot create signature data from private key, check log and error {:?}", + err + ); + SignatureData::null() + }) + } else { + SignatureData::null() + }; + + let authentication_token = NodeId::new(0, random::byte_string(32)); + let server_nonce = security_policy.random_nonce(); + let server_certificate = self.info.server_certificate_as_byte_string(); + let server_endpoints = Some(endpoints); + + let session = Session::create( + &self.info, + authentication_token.clone(), + channel.secure_channel_id(), + session_timeout, + max_request_message_size, + request.max_response_message_size, + request.endpoint_url.clone(), + security_policy.to_uri().to_string(), + IdentityToken::None, + client_certificate.ok(), + server_nonce.clone(), + request.session_name.clone(), + request.client_description.clone(), + channel.security_mode(), + ); + + let session_id = session.session_id().clone(); + self.sessions + .insert(session_id.clone(), Arc::new(RwLock::new(session))); + + self.notify.notify_waiters(); + + // TODO: Register session in core namespace + // Note: This will instead be handled by the diagnostic node manager on the fly. + + Ok(CreateSessionResponse { + response_header: ResponseHeader::new_good(&request.request_header), + session_id: session_id, + authentication_token, + revised_session_timeout: session_timeout as f64, + server_nonce, + server_certificate, + server_endpoints, + server_software_certificates: None, + server_signature, + max_request_message_size, + }) + } + + pub(crate) async fn activate_session( + &mut self, + channel: &mut SecureChannel, + request: &ActivateSessionRequest, + ) -> Result { + let Some(session) = self.find_by_token(&request.request_header.authentication_token) else { + return Err(StatusCode::BadSessionIdInvalid); + }; + + let mut session = trace_write_lock!(session); + session.validate_timed_out()?; + + let security_policy = channel.security_policy(); + let security_mode = channel.security_mode(); + let secure_channel_id = channel.secure_channel_id(); + let server_nonce = security_policy.random_nonce(); + let endpoint_url = session.endpoint_url().as_ref(); + + if !self + .info + .endpoint_exists(endpoint_url, security_policy, security_mode) + { + error!("activate_session, Endpoint dues not exist for requested url & mode {}, {:?} / {:?}", + endpoint_url, security_policy, security_mode); + return Err(StatusCode::BadTcpEndpointUrlInvalid); + } + + if security_policy != SecurityPolicy::None { + Self::verify_client_signature( + security_policy, + &self.info, + &session, + &request.client_signature, + )?; + } + + let user_token = self + .info + .authenticate_endpoint( + request, + endpoint_url, + security_policy, + security_mode, + &request.user_identity_token, + session.session_nonce(), + ) + .await?; + + if !session.is_activated() && session.secure_channel_id() != secure_channel_id { + error!("activate session, rejected secure channel id {} for inactive session does not match one used to create session, {}", secure_channel_id, session.secure_channel_id()); + return Err(StatusCode::BadSecureChannelIdInvalid); + } else { + // TODO additional secure channel validation here for client certificate and user identity + // token + } + + // TODO: If the user identity changed here, we need to re-check permissions for any created monitored items. + // It may be possible to just create a "fake" UserAccessLevel for each monitored item and pass it to the auth manager. + // The standard also mentions that a server may need to + // "Tear down connections to an underlying system and re-establish them using the new credentials". We need some way to + // handle this eventuality, perhaps a dedicated node-manager endpoint that can be called here. + session.activate( + secure_channel_id, + server_nonce, + IdentityToken::new(&request.user_identity_token, &self.info.decoding_options()), + request.locale_ids.clone(), + user_token, + ); + + // TODO: Audit + + Ok(ActivateSessionResponse { + response_header: ResponseHeader::new_good(&request.request_header), + server_nonce: session.session_nonce().clone(), + results: None, + diagnostic_infos: None, + }) + } + + fn verify_client_signature( + security_policy: SecurityPolicy, + info: &ServerInfo, + session: &Session, + client_signature: &SignatureData, + ) -> Result<(), StatusCode> { + if let Some(ref client_certificate) = session.client_certificate() { + if let Some(ref server_certificate) = info.server_certificate { + let r = crypto::verify_signature_data( + client_signature, + security_policy, + client_certificate, + server_certificate, + session.session_nonce().as_ref(), + ); + if r.is_good() { + Ok(()) + } else { + Err(r) + } + } else { + error!("Client signature verification failed, server has no server certificate"); + Err(StatusCode::BadUnexpectedError) + } + } else { + error!("Client signature verification failed, session has no client certificate"); + Err(StatusCode::BadUnexpectedError) + } + } + + pub(crate) async fn close_session( + &mut self, + channel: &mut SecureChannel, + handler: &mut MessageHandler, + request: &CloseSessionRequest, + ) -> Result { + let Some(session) = self.find_by_token(&request.request_header.authentication_token) else { + return Err(StatusCode::BadSessionIdInvalid); + }; + + let session = trace_read_lock!(session); + let id = session.session_id_numeric(); + let token = session.user_token().cloned(); + + let secure_channel_id = channel.secure_channel_id(); + if !session.is_activated() && session.secure_channel_id() != secure_channel_id { + error!("close_session rejected, secure channel id {} for inactive session does not match one used to create session, {}", secure_channel_id, session.secure_channel_id()); + return Err(StatusCode::BadSecureChannelIdInvalid); + } + let session_id = session.session_id().clone(); + + let session = self.sessions.remove(&session_id).unwrap(); + { + let mut session_lck = trace_write_lock!(session); + session_lck.close(); + } + + if request.delete_subscriptions { + handler + .delete_session_subscriptions(id, session, token.unwrap()) + .await; + } + + Ok(CloseSessionResponse { + response_header: ResponseHeader::new_good(&request.request_header), + }) + } + + pub(crate) fn expire_session(&mut self, id: &NodeId) { + let Some(session) = self.sessions.remove(id) else { + return; + }; + + info!("Session {id} has expired, removing it from the session map. Subscriptions will remain until they individually expire"); + + let mut session = trace_write_lock!(session); + session.close(); + } + + pub(crate) fn check_session_expiry(&self) -> (Instant, Vec) { + let now = Instant::now(); + let mut expired = Vec::new(); + let mut expiry = + now + Duration::from_millis(self.info.config.max_session_timeout_ms as u64); + for (id, session) in &self.sessions { + let deadline = session.read().deadline(); + if deadline < now { + expired.push(id.clone()); + } else if deadline < expiry { + expiry = deadline; + } + } + + (expiry, expired) + } +} diff --git a/lib/src/server/session/message_handler.rs b/lib/src/server/session/message_handler.rs new file mode 100644 index 000000000..3daf8e3c5 --- /dev/null +++ b/lib/src/server/session/message_handler.rs @@ -0,0 +1,449 @@ +use std::{sync::Arc, time::Instant}; + +use chrono::Utc; +use parking_lot::RwLock; +use tokio::task::JoinHandle; + +use crate::{ + core::SupportedMessage, + server::{ + authenticator::UserToken, + info::ServerInfo, + node_manager::{NodeManagers, RequestContext}, + session::services, + subscriptions::{PendingPublish, SubscriptionCache}, + }, + types::{ + PublishRequest, ResponseHeader, ServiceFault, SetTriggeringRequest, SetTriggeringResponse, + StatusCode, + }, +}; + +use super::{controller::Response, instance::Session}; + +/// Type that takes care of incoming requests that have passed +/// the initial validation stage, meaning that they have a session and a valid +/// secure channel. +pub(crate) struct MessageHandler { + node_managers: NodeManagers, + info: Arc, + subscriptions: Arc, +} + +/// Result of a message. All messages should be able to yield a response, but +/// depending on the message this may take different forms. +pub(crate) enum HandleMessageResult { + /// A request spawned as a tokio task, all messages that go to + /// node managers return this response type. + AsyncMessage(JoinHandle), + /// A publish request, which takes a slightly different form, instead + /// using a callback pattern. + PublishResponse(PendingPublishRequest), + /// A message that was resolved synchronously and returns a response immediately. + SyncMessage(Response), +} + +pub(crate) struct PendingPublishRequest { + request_id: u32, + request_handle: u32, + recv: tokio::sync::oneshot::Receiver, +} + +impl PendingPublishRequest { + /// Receive a publish request response. + /// This may take a long time, since publish requests can be open for + /// arbitrarily long waiting for new data to be produced. + pub async fn recv(self) -> Result { + match self.recv.await { + Ok(msg) => Ok(Response { + message: msg, + request_id: self.request_id, + }), + Err(_) => { + // This shouldn't be possible at all. + warn!("Failed to receive response to publish request, sender dropped."); + Ok(Response { + message: ServiceFault::new(self.request_handle, StatusCode::BadInternalError) + .into(), + request_id: self.request_id, + }) + } + } + } +} + +/// Wrapper around information necessary for executing a request. +pub(super) struct Request { + pub request: Box, + pub request_id: u32, + pub request_handle: u32, + pub info: Arc, + pub session: Arc>, + pub token: UserToken, + pub subscriptions: Arc, + pub session_id: u32, +} + +/// Convenient macro for creating a response containing a service fault. +macro_rules! service_fault { + ($req:ident, $status:expr) => { + Response { + message: $crate::types::ServiceFault::new($req.request_handle, $status).into(), + request_id: $req.request_id, + } + }; +} + +impl Request { + /// Create a new request. + pub fn new( + request: Box, + info: Arc, + request_id: u32, + request_handle: u32, + session: Arc>, + token: UserToken, + subscriptions: Arc, + session_id: u32, + ) -> Self { + Self { + request, + request_id, + request_handle, + info, + session, + token, + subscriptions, + session_id, + } + } + + /// Get a request context object from this request. + pub fn context(&self) -> RequestContext { + RequestContext { + session: self.session.clone(), + authenticator: self.info.authenticator.clone(), + token: self.token.clone(), + current_node_manager_index: 0, + type_tree: self.info.type_tree.clone(), + subscriptions: self.subscriptions.clone(), + session_id: self.session_id, + info: self.info.clone(), + } + } +} + +/// Macro for calling a service asynchronously. +macro_rules! async_service_call { + ($m:path, $slf:ident, $req:ident, $r:ident) => { + HandleMessageResult::AsyncMessage(tokio::task::spawn($m( + $slf.node_managers.clone(), + Request::new( + $req, + $slf.info.clone(), + $r.request_id, + $r.request_handle, + $r.session, + $r.token, + $slf.subscriptions.clone(), + $r.session_id, + ), + ))) + }; +} + +struct RequestData { + request_id: u32, + request_handle: u32, + session: Arc>, + token: UserToken, + session_id: u32, +} + +impl MessageHandler { + /// Create a new message handler. + pub fn new( + info: Arc, + node_managers: NodeManagers, + subscriptions: Arc, + ) -> Self { + Self { + node_managers, + info, + subscriptions, + } + } + + /// Handle an incoming message and return a result object. + /// This method returns synchronously, but the returned result object + /// may take longer to resolve. + /// Once this returns the request will either be resolved or will have been started. + pub fn handle_message( + &mut self, + message: SupportedMessage, + session_id: u32, + session: Arc>, + token: UserToken, + request_id: u32, + ) -> HandleMessageResult { + let data = RequestData { + request_id, + request_handle: message.request_handle(), + session, + token, + session_id, + }; + // Session management requests are not handled here. + match message { + SupportedMessage::ReadRequest(request) => { + async_service_call!(services::read, self, request, data) + } + + SupportedMessage::BrowseRequest(request) => { + async_service_call!(services::browse, self, request, data) + } + + SupportedMessage::BrowseNextRequest(request) => { + async_service_call!(services::browse_next, self, request, data) + } + + SupportedMessage::TranslateBrowsePathsToNodeIdsRequest(request) => { + async_service_call!(services::translate_browse_paths, self, request, data) + } + + SupportedMessage::RegisterNodesRequest(request) => { + async_service_call!(services::register_nodes, self, request, data) + } + + SupportedMessage::UnregisterNodesRequest(request) => { + async_service_call!(services::unregister_nodes, self, request, data) + } + + SupportedMessage::CreateMonitoredItemsRequest(request) => { + async_service_call!(services::create_monitored_items, self, request, data) + } + + SupportedMessage::ModifyMonitoredItemsRequest(request) => { + async_service_call!(services::modify_monitored_items, self, request, data) + } + + SupportedMessage::SetMonitoringModeRequest(request) => { + async_service_call!(services::set_monitoring_mode, self, request, data) + } + + SupportedMessage::DeleteMonitoredItemsRequest(request) => { + async_service_call!(services::delete_monitored_items, self, request, data) + } + + SupportedMessage::SetTriggeringRequest(request) => self.set_triggering(request, data), + + SupportedMessage::PublishRequest(request) => self.publish(request, data), + + SupportedMessage::RepublishRequest(request) => { + HandleMessageResult::SyncMessage(Response::from_result( + self.subscriptions.republish(data.session_id, &request), + data.request_handle, + data.request_id, + )) + } + + SupportedMessage::CreateSubscriptionRequest(request) => { + HandleMessageResult::SyncMessage(Response::from_result( + self.subscriptions.create_subscription( + data.session_id, + &data.session, + &request, + &self.info, + ), + data.request_handle, + data.request_id, + )) + } + + SupportedMessage::ModifySubscriptionRequest(request) => { + HandleMessageResult::SyncMessage(Response::from_result( + self.subscriptions + .modify_subscription(data.session_id, &request, &self.info), + data.request_handle, + data.request_id, + )) + } + + SupportedMessage::SetPublishingModeRequest(request) => { + HandleMessageResult::SyncMessage(Response::from_result( + self.subscriptions + .set_publishing_mode(data.session_id, &request), + data.request_handle, + data.request_id, + )) + } + + SupportedMessage::TransferSubscriptionsRequest(request) => { + HandleMessageResult::SyncMessage(Response { + message: self + .subscriptions + .transfer(&request, data.session_id, &data.session) + .into(), + request_id: data.request_id, + }) + } + + SupportedMessage::DeleteSubscriptionsRequest(request) => { + async_service_call!(services::delete_subscriptions, self, request, data) + } + + SupportedMessage::HistoryReadRequest(request) => { + async_service_call!(services::history_read, self, request, data) + } + + SupportedMessage::HistoryUpdateRequest(request) => { + async_service_call!(services::history_update, self, request, data) + } + + SupportedMessage::WriteRequest(request) => { + async_service_call!(services::write, self, request, data) + } + + SupportedMessage::QueryFirstRequest(request) => { + async_service_call!(services::query_first, self, request, data) + } + + SupportedMessage::QueryNextRequest(request) => { + async_service_call!(services::query_next, self, request, data) + } + + SupportedMessage::CallRequest(request) => { + async_service_call!(services::call, self, request, data) + } + + SupportedMessage::AddNodesRequest(request) => { + async_service_call!(services::add_nodes, self, request, data) + } + + SupportedMessage::AddReferencesRequest(request) => { + async_service_call!(services::add_references, self, request, data) + } + + SupportedMessage::DeleteNodesRequest(request) => { + async_service_call!(services::delete_nodes, self, request, data) + } + + SupportedMessage::DeleteReferencesRequest(request) => { + async_service_call!(services::delete_references, self, request, data) + } + + message => { + debug!( + "Message handler does not handle this kind of message {:?}", + message + ); + HandleMessageResult::SyncMessage(Response { + message: ServiceFault::new( + message.request_header(), + StatusCode::BadServiceUnsupported, + ) + .into(), + request_id, + }) + } + } + } + + /// Delete the subscriptions from a session. + pub async fn delete_session_subscriptions( + &mut self, + session_id: u32, + session: Arc>, + token: UserToken, + ) { + let ids = self.subscriptions.get_session_subscription_ids(session_id); + if ids.is_empty() { + return; + } + + let mut context = RequestContext { + session, + session_id, + authenticator: self.info.authenticator.clone(), + token, + current_node_manager_index: 0, + type_tree: self.info.type_tree.clone(), + subscriptions: self.subscriptions.clone(), + info: self.info.clone(), + }; + + // Ignore the result + if let Err(e) = services::delete_subscriptions_inner( + self.node_managers.clone(), + ids, + &self.subscriptions, + &mut context, + ) + .await + { + warn!("Cleaning up session subscriptions failed: {e}"); + } + } + + fn set_triggering( + &self, + request: Box, + data: RequestData, + ) -> HandleMessageResult { + let result = self + .subscriptions + .set_triggering( + data.session_id, + request.subscription_id, + request.triggering_item_id, + request.links_to_add.unwrap_or_default(), + request.links_to_remove.unwrap_or_default(), + ) + .map(|(add_res, remove_res)| SetTriggeringResponse { + response_header: ResponseHeader::new_good(&request.request_header), + add_results: Some(add_res), + add_diagnostic_infos: None, + remove_results: Some(remove_res), + remove_diagnostic_infos: None, + }); + + HandleMessageResult::SyncMessage(Response::from_result( + result, + data.request_handle, + data.request_id, + )) + } + + fn publish(&self, request: Box, data: RequestData) -> HandleMessageResult { + let now = Utc::now(); + let now_instant = Instant::now(); + let (send, recv) = tokio::sync::oneshot::channel(); + let timeout = request.request_header.timeout_hint; + let timeout = if timeout == 0 { + self.info.config.publish_timeout_default_ms + } else { + timeout.into() + }; + + let req = PendingPublish { + response: send, + request, + ack_results: None, + deadline: now_instant + std::time::Duration::from_millis(timeout), + }; + match self + .subscriptions + .enqueue_publish_request(data.session_id, &now, now_instant, req) + { + Ok(_) => HandleMessageResult::PublishResponse(PendingPublishRequest { + request_id: data.request_id, + request_handle: data.request_handle, + recv, + }), + Err(e) => HandleMessageResult::SyncMessage(Response { + message: ServiceFault::new(data.request_handle, e).into(), + request_id: data.request_id, + }), + } + } +} diff --git a/lib/src/server/session/mod.rs b/lib/src/server/session/mod.rs new file mode 100644 index 000000000..e83f4bc7f --- /dev/null +++ b/lib/src/server/session/mod.rs @@ -0,0 +1,7 @@ +pub mod continuation_points; +pub mod controller; +pub mod instance; +pub mod manager; +#[macro_use] +pub mod message_handler; +mod services; diff --git a/lib/src/server/session/services/attribute.rs b/lib/src/server/session/services/attribute.rs new file mode 100644 index 000000000..76a769883 --- /dev/null +++ b/lib/src/server/session/services/attribute.rs @@ -0,0 +1,381 @@ +use crate::{ + server::{ + node_manager::{ + HistoryNode, HistoryReadDetails, HistoryUpdateDetails, HistoryUpdateNode, NodeManagers, + ReadNode, WriteNode, + }, + session::{controller::Response, message_handler::Request}, + }, + types::{ + ByteString, DeleteAtTimeDetails, ExtensionObject, HistoryReadRequest, HistoryReadResponse, + HistoryReadResult, HistoryUpdateRequest, HistoryUpdateResponse, NodeId, ObjectId, + ReadRequest, ReadResponse, ResponseHeader, StatusCode, TimestampsToReturn, WriteRequest, + WriteResponse, + }, +}; +pub async fn read(node_managers: NodeManagers, request: Request) -> Response { + let mut context = request.context(); + let nodes_to_read = take_service_items!( + request, + request.request.nodes_to_read, + request.info.operational_limits.max_nodes_per_read + ); + if request.request.max_age < 0.0 { + return service_fault!(request, StatusCode::BadMaxAgeInvalid); + } + if request.request.timestamps_to_return == TimestampsToReturn::Invalid { + return service_fault!(request, StatusCode::BadTimestampsToReturnInvalid); + } + + let mut results: Vec<_> = nodes_to_read + .into_iter() + .map(|n| ReadNode::new(n)) + .collect(); + + for (idx, node_manager) in node_managers.into_iter().enumerate() { + context.current_node_manager_index = idx; + let mut batch: Vec<_> = results + .iter_mut() + .filter(|n| { + node_manager.owns_node(&n.node().node_id) + && n.status() == StatusCode::BadNodeIdUnknown + }) + .collect(); + + if batch.is_empty() { + continue; + } + + if let Err(e) = node_manager + .read( + &context, + request.request.max_age, + request.request.timestamps_to_return, + &mut batch, + ) + .await + { + for node in &mut batch { + node.set_error(e); + } + } + } + + let results = results.into_iter().map(|r| r.take_result()).collect(); + + Response { + message: ReadResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(results), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn write(node_managers: NodeManagers, request: Request) -> Response { + let mut context = request.context(); + let nodes_to_write = take_service_items!( + request, + request.request.nodes_to_write, + request.info.operational_limits.max_nodes_per_write + ); + + let mut results: Vec<_> = nodes_to_write + .into_iter() + .map(|n| WriteNode::new(n)) + .collect(); + + for (idx, node_manager) in node_managers.into_iter().enumerate() { + context.current_node_manager_index = idx; + let mut batch: Vec<_> = results + .iter_mut() + .filter(|n| { + node_manager.owns_node(&n.value().node_id) + && n.status() == StatusCode::BadNodeIdUnknown + }) + .collect(); + + if batch.is_empty() { + continue; + } + + if let Err(e) = node_manager.write(&context, &mut batch).await { + for node in &mut batch { + node.set_status(e); + } + } + } + + let results = results.into_iter().map(|r| r.status()).collect(); + + Response { + message: WriteResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(results), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn history_read( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + let Some(items) = request.request.nodes_to_read else { + return service_fault!(request, StatusCode::BadNothingToDo); + }; + if items.is_empty() { + return service_fault!(request, StatusCode::BadNothingToDo); + } + let details = match HistoryReadDetails::from_extension_object( + request.request.history_read_details, + &request.info.decoding_options(), + ) { + Ok(r) => r, + Err(e) => return service_fault!(request, e), + }; + + let is_events = matches!(details, HistoryReadDetails::Events(_)); + + if is_events { + if items.len() + > request + .info + .operational_limits + .max_nodes_per_history_read_events + { + return service_fault!(request, StatusCode::BadTooManyOperations); + } + } else if items.len() + > request + .info + .operational_limits + .max_nodes_per_history_read_data + { + return service_fault!(request, StatusCode::BadTooManyOperations); + } + let mut nodes: Vec<_> = { + let mut session = trace_write_lock!(request.session); + items + .into_iter() + .map(|node| { + if node.continuation_point.is_null() { + let mut node = HistoryNode::new(node, is_events, None); + if request.request.release_continuation_points { + node.set_status(StatusCode::Good); + } + node + } else { + let cp = session.remove_history_continuation_point(&node.continuation_point); + let cp_missing = cp.is_none(); + let mut node = HistoryNode::new(node, is_events, cp); + if cp_missing { + node.set_status(StatusCode::BadContinuationPointInvalid); + } else if request.request.release_continuation_points { + node.set_status(StatusCode::Good); + } + node + } + }) + .collect() + }; + + // If we are releasing continuation points we should not return any data. + if request.request.release_continuation_points { + return Response { + message: HistoryReadResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some( + nodes + .into_iter() + .map(|n| HistoryReadResult { + status_code: n.status(), + continuation_point: ByteString::null(), + history_data: ExtensionObject::null(), + }) + .collect(), + ), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + }; + } + + for (idx, manager) in node_managers.into_iter().enumerate() { + context.current_node_manager_index = idx; + let mut batch: Vec<_> = nodes + .iter_mut() + .filter(|n| { + if n.node_id() == &ObjectId::Server.into() + && matches!(details, HistoryReadDetails::Events(_)) + { + manager.owns_server_events() && n.status() == StatusCode::BadNodeIdUnknown + } else { + manager.owns_node(n.node_id()) && n.status() == StatusCode::BadNodeIdUnknown + } + }) + .collect(); + + if batch.is_empty() { + continue; + } + + let result = match &details { + HistoryReadDetails::RawModified(d) => { + manager + .history_read_raw_modified( + &context, + d, + &mut batch, + request.request.timestamps_to_return, + ) + .await + } + HistoryReadDetails::AtTime(d) => { + manager + .history_read_at_time( + &context, + d, + &mut batch, + request.request.timestamps_to_return, + ) + .await + } + HistoryReadDetails::Processed(d) => { + manager + .history_read_processed( + &context, + d, + &mut batch, + request.request.timestamps_to_return, + ) + .await + } + HistoryReadDetails::Events(d) => { + manager + .history_read_events( + &context, + d, + &mut batch, + request.request.timestamps_to_return, + ) + .await + } + HistoryReadDetails::Annotations(d) => { + manager + .history_read_annotations( + &context, + d, + &mut batch, + request.request.timestamps_to_return, + ) + .await + } + }; + + if let Err(e) = result { + for node in batch { + node.set_status(e); + } + } + } + let results: Vec<_> = { + let mut session = trace_write_lock!(request.session); + nodes + .into_iter() + .map(|n| n.into_result(&mut session)) + .collect() + }; + + Response { + message: HistoryReadResponse { + response_header: ResponseHeader::new_good(&request.request.request_header), + results: Some(results), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn history_update( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + let items = take_service_items!( + request, + request.request.history_update_details, + request.info.operational_limits.max_nodes_per_history_update + ); + let decoding_options = request.info.decoding_options(); + + let mut nodes: Vec<_> = items + .into_iter() + .map(|obj| { + let details = match HistoryUpdateDetails::from_extension_object(obj, &decoding_options) + { + Ok(h) => h, + Err(e) => { + // need some empty history update node here, it won't be passed to node managers. + let mut node = HistoryUpdateNode::new(HistoryUpdateDetails::DeleteAtTime( + DeleteAtTimeDetails { + node_id: NodeId::null(), + req_times: None, + }, + )); + node.set_status(e); + return node; + } + }; + HistoryUpdateNode::new(details) + }) + .collect(); + + for (idx, manager) in node_managers.into_iter().enumerate() { + context.current_node_manager_index = idx; + let mut batch: Vec<_> = nodes + .iter_mut() + .filter(|n| { + if n.details().node_id() == &ObjectId::Server.into() + && matches!( + n.details(), + HistoryUpdateDetails::UpdateEvent(_) | HistoryUpdateDetails::DeleteEvent(_) + ) + { + manager.owns_server_events() + } else { + manager.owns_node(n.details().node_id()) + && n.status() == StatusCode::BadNodeIdUnknown + } + }) + .collect(); + + if batch.is_empty() { + continue; + } + + if let Err(e) = manager.history_update(&context, &mut batch).await { + for node in batch { + node.set_status(e); + } + } + } + let results: Vec<_> = nodes.into_iter().map(|n| n.into_result()).collect(); + + Response { + message: HistoryUpdateResponse { + response_header: ResponseHeader::new_good(&request.request.request_header), + results: Some(results), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} diff --git a/lib/src/server/session/services/method.rs b/lib/src/server/session/services/method.rs new file mode 100644 index 000000000..8cefc4047 --- /dev/null +++ b/lib/src/server/session/services/method.rs @@ -0,0 +1,50 @@ +use crate::{ + server::{ + node_manager::{MethodCall, NodeManagers}, + session::{controller::Response, message_handler::Request}, + }, + types::{CallRequest, CallResponse, ResponseHeader, StatusCode}, +}; + +pub async fn call(node_managers: NodeManagers, request: Request) -> Response { + let mut context = request.context(); + let method_calls = take_service_items!( + request, + request.request.methods_to_call, + request.info.operational_limits.max_nodes_per_method_call + ); + + println!("Receive method call! {method_calls:?}"); + + let mut calls: Vec<_> = method_calls.into_iter().map(MethodCall::new).collect(); + + for (idx, node_manager) in node_managers.into_iter().enumerate() { + context.current_node_manager_index = idx; + let mut owned: Vec<_> = calls + .iter_mut() + .filter(|c| { + node_manager.owns_node(c.method_id()) && c.status() == StatusCode::BadMethodInvalid + }) + .collect(); + + if owned.is_empty() { + continue; + } + + if let Err(e) = node_manager.call(&context, &mut owned).await { + for call in owned { + call.set_status(e); + } + } + } + + Response { + message: CallResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(calls.into_iter().map(|c| c.into_result()).collect()), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} diff --git a/lib/src/server/session/services/mod.rs b/lib/src/server/session/services/mod.rs new file mode 100644 index 000000000..1ead0eb03 --- /dev/null +++ b/lib/src/server/session/services/mod.rs @@ -0,0 +1,30 @@ +macro_rules! take_service_items { + ($request:ident, $items:expr, $limit:expr) => {{ + let Some(it) = $items else { + return service_fault!($request, StatusCode::BadNothingToDo); + }; + if it.is_empty() { + return service_fault!($request, StatusCode::BadNothingToDo); + } + if it.len() > $limit { + return service_fault!($request, StatusCode::BadTooManyOperations); + } + it + }}; +} + +mod attribute; +mod method; +mod monitored_items; +mod node_management; +mod query; +mod subscriptions; +mod view; + +pub(super) use attribute::*; +pub(super) use method::*; +pub(super) use monitored_items::*; +pub(super) use node_management::*; +pub(super) use query::*; +pub(super) use subscriptions::*; +pub(super) use view::*; diff --git a/lib/src/server/session/services/monitored_items.rs b/lib/src/server/session/services/monitored_items.rs new file mode 100644 index 000000000..0110aff63 --- /dev/null +++ b/lib/src/server/session/services/monitored_items.rs @@ -0,0 +1,264 @@ +use crate::{ + server::{ + node_manager::{MonitoredItemRef, NodeManagers}, + session::{controller::Response, message_handler::Request}, + subscriptions::CreateMonitoredItem, + }, + types::{ + CreateMonitoredItemsRequest, CreateMonitoredItemsResponse, DeleteMonitoredItemsRequest, + DeleteMonitoredItemsResponse, ModifyMonitoredItemsRequest, ModifyMonitoredItemsResponse, + ResponseHeader, SetMonitoringModeRequest, SetMonitoringModeResponse, StatusCode, + }, +}; + +pub async fn create_monitored_items( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + let items_to_create = take_service_items!( + request, + request.request.items_to_create, + request.info.operational_limits.max_monitored_items_per_call + ); + let Some(len) = request + .subscriptions + .get_monitored_item_count(request.session_id, request.request.subscription_id) + else { + return service_fault!(request, StatusCode::BadSubscriptionIdInvalid); + }; + + let max_per_sub = request + .info + .config + .limits + .subscriptions + .max_monitored_items_per_sub; + if max_per_sub > 0 && max_per_sub < len + items_to_create.len() { + return service_fault!(request, StatusCode::BadTooManyMonitoredItems); + } + + let mut items: Vec<_> = { + let type_tree = trace_read_lock!(request.info.type_tree); + items_to_create + .into_iter() + .map(|r| { + CreateMonitoredItem::new( + r, + request.info.monitored_item_id_handle.next(), + request.request.subscription_id, + &request.info, + request.request.timestamps_to_return, + &type_tree, + ) + }) + .collect() + }; + + for (idx, mgr) in node_managers.iter().enumerate() { + context.current_node_manager_index = idx; + let mut owned: Vec<_> = items + .iter_mut() + .filter(|n| { + n.status_code() == StatusCode::BadNodeIdUnknown + && mgr.owns_node(&n.item_to_monitor().node_id) + }) + .collect(); + + if owned.is_empty() { + continue; + } + + if let Err(e) = mgr.create_monitored_items(&context, &mut owned).await { + for n in owned { + n.set_status(e); + } + } + } + + let handles: Vec<_> = items + .iter() + .map(|i| { + MonitoredItemRef::new( + i.handle(), + i.item_to_monitor().node_id.clone(), + i.item_to_monitor().attribute_id, + ) + }) + .collect(); + let handles_ref: Vec<_> = handles.iter().collect(); + + let res = match request.subscriptions.create_monitored_items( + request.session_id, + request.request.subscription_id, + &items, + ) { + Ok(r) => r, + // Shouldn't happen, would be due to a race condition. If it does happen we're fine with failing. + Err(e) => { + // Should clean up any that failed to create though. + for (idx, mgr) in node_managers.iter().enumerate() { + context.current_node_manager_index = idx; + mgr.delete_monitored_items(&context, &handles_ref).await; + } + return service_fault!(request, e); + } + }; + + Response { + message: CreateMonitoredItemsResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(res), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn modify_monitored_items( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + let items_to_modify = take_service_items!( + request, + request.request.items_to_modify, + request.info.operational_limits.max_monitored_items_per_call + ); + + // Call modify first, then only pass successful modify's to the node managers. + let results = { + let type_tree = trace_read_lock!(request.info.type_tree); + + match request.subscriptions.modify_monitored_items( + request.session_id, + request.request.subscription_id, + &request.info, + request.request.timestamps_to_return, + items_to_modify, + &type_tree, + ) { + Ok(r) => r, + Err(e) => return service_fault!(request, e), + } + }; + + for (idx, mgr) in node_managers.iter().enumerate() { + context.current_node_manager_index = idx; + let owned: Vec<_> = results + .iter() + .filter(|n| n.status_code().is_good() && mgr.owns_node(n.node_id())) + .collect(); + + if owned.is_empty() { + continue; + } + + mgr.modify_monitored_items(&context, &owned).await; + } + + Response { + message: ModifyMonitoredItemsResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(results.into_iter().map(|r| r.into_result()).collect()), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn set_monitoring_mode( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + let items = take_service_items!( + request, + request.request.monitored_item_ids, + request.info.operational_limits.max_monitored_items_per_call + ); + + let results = match request.subscriptions.set_monitoring_mode( + request.session_id, + request.request.subscription_id, + request.request.monitoring_mode, + items, + ) { + Ok(r) => r, + Err(e) => return service_fault!(request, e), + }; + + for (idx, mgr) in node_managers.iter().enumerate() { + context.current_node_manager_index = idx; + let owned: Vec<_> = results + .iter() + .filter(|n| n.0.is_good() && mgr.owns_node(n.1.node_id())) + .map(|n| &n.1) + .collect(); + + if owned.is_empty() { + continue; + } + + mgr.set_monitoring_mode(&context, request.request.monitoring_mode, &owned) + .await; + } + + Response { + message: SetMonitoringModeResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(results.into_iter().map(|r| r.0).collect()), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn delete_monitored_items( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + let items = take_service_items!( + request, + request.request.monitored_item_ids, + request.info.operational_limits.max_monitored_items_per_call + ); + + let results = match request.subscriptions.delete_monitored_items( + request.session_id, + request.request.subscription_id, + &items, + ) { + Ok(r) => r, + Err(e) => return service_fault!(request, e), + }; + + for (idx, mgr) in node_managers.iter().enumerate() { + context.current_node_manager_index = idx; + let owned: Vec<_> = results + .iter() + .filter(|n| n.0.is_good() && mgr.owns_node(n.1.node_id())) + .map(|n| &n.1) + .collect(); + + if owned.is_empty() { + continue; + } + + mgr.delete_monitored_items(&context, &owned).await; + } + + Response { + message: DeleteMonitoredItemsResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(results.into_iter().map(|r| r.0).collect()), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} diff --git a/lib/src/server/session/services/node_management.rs b/lib/src/server/session/services/node_management.rs new file mode 100644 index 000000000..64a966bae --- /dev/null +++ b/lib/src/server/session/services/node_management.rs @@ -0,0 +1,257 @@ +use crate::{ + server::{ + node_manager::{ + AddNodeItem, AddReferenceItem, DeleteNodeItem, DeleteReferenceItem, NodeManagers, + }, + session::{controller::Response, message_handler::Request}, + }, + types::{ + AddNodesRequest, AddNodesResponse, AddReferencesRequest, AddReferencesResponse, + DeleteNodesRequest, DeleteNodesResponse, DeleteReferencesRequest, DeleteReferencesResponse, + NodeId, ResponseHeader, StatusCode, + }, +}; + +pub async fn add_nodes(node_managers: NodeManagers, request: Request) -> Response { + let mut context = request.context(); + + let nodes_to_add = take_service_items!( + request, + request.request.nodes_to_add, + request + .info + .operational_limits + .max_nodes_per_node_management + ); + + let decoding_options = request.info.decoding_options(); + let mut to_add: Vec<_> = nodes_to_add + .into_iter() + .map(|it| AddNodeItem::new(it, &decoding_options)) + .collect(); + + for (idx, node_manager) in node_managers.iter().enumerate() { + context.current_node_manager_index = idx; + let mut owned: Vec<_> = to_add + .iter_mut() + .filter(|c| { + if c.status() != StatusCode::BadNotSupported { + return false; + } + if c.requested_new_node_id().is_null() { + node_manager.handle_new_node(&c.parent_node_id()) + } else { + node_manager.owns_node(c.requested_new_node_id()) + } + }) + .collect(); + + if owned.is_empty() { + continue; + } + + if let Err(e) = node_manager.add_nodes(&context, &mut owned).await { + for node in owned { + node.set_result(NodeId::null(), e); + } + } + } + + Response { + message: AddNodesResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(to_add.into_iter().map(|c| c.into_result()).collect()), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn add_references( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + + let references_to_add = take_service_items!( + request, + request.request.references_to_add, + request + .info + .operational_limits + .max_references_per_references_management + ); + + let mut to_add: Vec<_> = references_to_add + .into_iter() + .map(|it| AddReferenceItem::new(it)) + .collect(); + + for (idx, node_manager) in node_managers.iter().enumerate() { + context.current_node_manager_index = idx; + let mut owned: Vec<_> = to_add + .iter_mut() + .filter(|v| { + if v.source_status() != StatusCode::BadNotSupported + && v.target_status() != StatusCode::BadNotSupported + { + return false; + } + node_manager.owns_node(v.source_node_id()) + || node_manager.owns_node(&v.target_node_id().node_id) + }) + .collect(); + + if owned.is_empty() { + continue; + } + + if let Err(e) = node_manager.add_references(&context, &mut owned).await { + for node in owned { + if node_manager.owns_node(node.source_node_id()) { + node.set_source_result(e); + } + if node_manager.owns_node(&node.target_node_id().node_id) { + node.set_target_result(e); + } + } + } + } + + Response { + message: AddReferencesResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(to_add.into_iter().map(|r| r.result_status()).collect()), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn delete_nodes( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + + let nodes_to_delete = take_service_items!( + request, + request.request.nodes_to_delete, + request + .info + .operational_limits + .max_nodes_per_node_management + ); + + let mut to_delete: Vec<_> = nodes_to_delete + .into_iter() + .map(|v| DeleteNodeItem::new(v)) + .collect(); + + for (idx, node_manager) in node_managers.iter().enumerate() { + context.current_node_manager_index = idx; + let mut owned: Vec<_> = to_delete + .iter_mut() + .filter(|it| { + it.status() == StatusCode::BadNodeIdUnknown && node_manager.owns_node(it.node_id()) + }) + .collect(); + + if owned.is_empty() { + continue; + } + + if let Err(e) = node_manager.delete_nodes(&context, &mut owned).await { + for node in owned { + node.set_result(e); + } + } + } + + // Then delete references where necessary. + for (idx, node_manager) in node_managers.iter().enumerate() { + context.current_node_manager_index = idx; + let targets: Vec<_> = to_delete + .iter() + .filter(|it| it.status().is_good() && !node_manager.owns_node(it.node_id())) + .collect(); + + node_manager + .delete_node_references(&context, &targets) + .await; + } + + Response { + message: DeleteNodesResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(to_delete.into_iter().map(|r| r.status()).collect()), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn delete_references( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + + let references_to_delete = take_service_items!( + request, + request.request.references_to_delete, + request + .info + .operational_limits + .max_references_per_references_management + ); + + let mut to_delete: Vec<_> = references_to_delete + .into_iter() + .map(|it| DeleteReferenceItem::new(it)) + .collect(); + + for (idx, node_manager) in node_managers.iter().enumerate() { + context.current_node_manager_index = idx; + let mut owned: Vec<_> = to_delete + .iter_mut() + .filter(|v| { + if v.source_status() != StatusCode::BadNotSupported + && v.target_status() != StatusCode::BadNotSupported + { + return false; + } + node_manager.owns_node(v.source_node_id()) + || node_manager.owns_node(&v.target_node_id().node_id) + }) + .collect(); + + if owned.is_empty() { + continue; + } + + if let Err(e) = node_manager.delete_references(&context, &mut owned).await { + for node in owned { + if node_manager.owns_node(node.source_node_id()) { + node.set_source_result(e); + } + if node_manager.owns_node(&node.target_node_id().node_id) { + node.set_target_result(e); + } + } + } + } + + Response { + message: DeleteReferencesResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(to_delete.into_iter().map(|r| r.result_status()).collect()), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} diff --git a/lib/src/server/session/services/query.rs b/lib/src/server/session/services/query.rs new file mode 100644 index 000000000..383eb8817 --- /dev/null +++ b/lib/src/server/session/services/query.rs @@ -0,0 +1,199 @@ +use crate::{ + server::{ + node_manager::{NodeManagers, ParsedNodeTypeDescription, QueryRequest}, + session::{controller::Response, message_handler::Request}, + ParsedContentFilter, + }, + types::{ + ByteString, QueryFirstRequest, QueryFirstResponse, QueryNextRequest, QueryNextResponse, + ResponseHeader, StatusCode, + }, +}; + +pub async fn query_first( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + let node_types = take_service_items!( + request, + request.request.node_types, + request.info.operational_limits.max_node_descs_per_query + ); + let data_sets_limit = request.info.operational_limits.max_data_sets_query_return; + let references_limit = request.info.operational_limits.max_references_query_return; + let max_data_sets_to_return = if request.request.max_data_sets_to_return == 0 { + data_sets_limit + } else { + data_sets_limit.min(request.request.max_data_sets_to_return as usize) + }; + let max_references_to_return = if request.request.max_references_to_return == 0 { + references_limit + } else { + references_limit.min(request.request.max_references_to_return as usize) + }; + if !request.request.view.view_id.is_null() || !request.request.view.timestamp.is_null() { + info!("Browse request ignored because view was specified (views not supported)"); + return service_fault!(request, StatusCode::BadViewIdUnknown); + } + + let mut status_code = StatusCode::Good; + + let mut parsing_results = Vec::with_capacity(node_types.len()); + let mut final_node_types = Vec::with_capacity(node_types.len()); + for node_type in node_types { + let (res, parsed) = ParsedNodeTypeDescription::parse(node_type); + if let Ok(parsed) = parsed { + final_node_types.push(parsed); + } else { + status_code = StatusCode::BadInvalidArgument; + } + parsing_results.push(res); + } + + let (filter_result, filter) = { + let type_tree = trace_read_lock!(request.info.type_tree); + ParsedContentFilter::parse(request.request.filter, &type_tree, false, false) + }; + + let content_filter = match filter { + Ok(r) => r, + Err(e) => { + status_code = e; + ParsedContentFilter::empty() + } + }; + + if status_code.is_bad() { + return Response { + message: QueryFirstResponse { + response_header: ResponseHeader::new_service_result( + request.request_handle, + status_code, + ), + query_data_sets: None, + continuation_point: ByteString::null(), + parsing_results: Some(parsing_results), + filter_result, + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + }; + } + + let mut query_request = QueryRequest::new( + final_node_types, + content_filter, + max_data_sets_to_return, + max_references_to_return, + ); + + for (index, node_manager) in node_managers.iter().enumerate() { + context.current_node_manager_index = index; + // All node managers must succeed. Partial success is really + // hard to quantify for query... + // TODO: This is pretty much impossible to implement + // until we actually implement this in the core node manager. + if let Err(e) = node_manager.query(&context, &mut query_request).await { + return Response { + message: QueryFirstResponse { + response_header: ResponseHeader::new_service_result(request.request_handle, e), + query_data_sets: None, + continuation_point: ByteString::null(), + parsing_results: Some(parsing_results), + filter_result, + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + }; + } + + if query_request.is_completed() { + break; + } + } + let (result, continuation_point, status) = { + let mut session = trace_write_lock!(request.session); + query_request.into_result( + context.current_node_manager_index, + node_managers.len(), + &mut session, + ) + }; + + Response { + message: QueryFirstResponse { + response_header: ResponseHeader::new_service_result(request.request_handle, status), + query_data_sets: Some(result), + continuation_point, + parsing_results: None, + diagnostic_infos: None, + filter_result, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn query_next( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + let mut query_request = { + let mut session = trace_write_lock!(request.session); + let Some(p) = session.remove_query_continuation_point(&request.request.continuation_point) + else { + return service_fault!(request, StatusCode::BadContinuationPointInvalid); + }; + QueryRequest::from_continuation_point(p) + }; + + if request.request.release_continuation_point { + return Response { + message: QueryNextResponse { + response_header: ResponseHeader::new_good(request.request_handle), + query_data_sets: None, + revised_continuation_point: ByteString::null(), + } + .into(), + request_id: request.request_id, + }; + } + + for (index, node_manager) in node_managers.iter().enumerate() { + if index < query_request.node_manager_index() { + continue; + } + context.current_node_manager_index = index; + + if let Err(e) = node_manager.query(&context, &mut query_request).await { + return service_fault!(request, e); + } + + if query_request.is_completed() { + break; + } + } + + let (result, continuation_point, status) = { + let mut session = trace_write_lock!(request.session); + query_request.into_result( + context.current_node_manager_index, + node_managers.len(), + &mut session, + ) + }; + + Response { + message: QueryNextResponse { + response_header: ResponseHeader::new_service_result(request.request_handle, status), + query_data_sets: Some(result), + revised_continuation_point: continuation_point, + } + .into(), + request_id: request.request_id, + } +} diff --git a/lib/src/server/session/services/subscriptions.rs b/lib/src/server/session/services/subscriptions.rs new file mode 100644 index 000000000..e7cc587be --- /dev/null +++ b/lib/src/server/session/services/subscriptions.rs @@ -0,0 +1,68 @@ +use crate::{ + server::{ + node_manager::{NodeManagers, RequestContext}, + session::{controller::Response, message_handler::Request}, + SubscriptionCache, + }, + types::{DeleteSubscriptionsRequest, DeleteSubscriptionsResponse, ResponseHeader, StatusCode}, +}; + +pub async fn delete_subscriptions( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + let items = take_service_items!( + request, + request.request.subscription_ids, + request.info.operational_limits.max_subscriptions_per_call + ); + + let results = match delete_subscriptions_inner( + node_managers, + items, + &request.subscriptions, + &mut context, + ) + .await + { + Ok(r) => r, + Err(e) => return service_fault!(request, e), + }; + + Response { + message: DeleteSubscriptionsResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(results), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn delete_subscriptions_inner( + node_managers: NodeManagers, + to_delete: Vec, + subscriptions: &SubscriptionCache, + context: &mut RequestContext, +) -> Result, StatusCode> { + let results = subscriptions.delete_subscriptions(context.session_id, &to_delete)?; + + for (idx, mgr) in node_managers.iter().enumerate() { + context.current_node_manager_index = idx; + let owned: Vec<_> = results + .iter() + .filter(|f| f.0.is_good()) + .flat_map(|f| f.1.iter().filter(|i| mgr.owns_node(i.node_id()))) + .collect(); + + if owned.is_empty() { + continue; + } + + mgr.delete_monitored_items(&context, &owned).await; + } + + Ok(results.into_iter().map(|r| r.0).collect()) +} diff --git a/lib/src/server/session/services/view.rs b/lib/src/server/session/services/view.rs new file mode 100644 index 000000000..9e384daba --- /dev/null +++ b/lib/src/server/session/services/view.rs @@ -0,0 +1,561 @@ +use std::collections::HashMap; + +use crate::{ + server::{ + node_manager::{ + resolve_external_references, BrowseNode, BrowsePathItem, ExternalReferencesContPoint, + NodeManagers, RegisterNodeItem, + }, + session::{controller::Response, message_handler::Request}, + }, + types::{ + BrowseNextRequest, BrowseNextResponse, BrowsePathResult, BrowsePathTarget, BrowseRequest, + BrowseResponse, BrowseResult, ByteString, RegisterNodesRequest, RegisterNodesResponse, + ResponseHeader, StatusCode, TranslateBrowsePathsToNodeIdsRequest, + TranslateBrowsePathsToNodeIdsResponse, UnregisterNodesRequest, UnregisterNodesResponse, + }, +}; + +pub async fn browse(node_managers: NodeManagers, request: Request) -> Response { + let mut context: crate::server::node_manager::RequestContext = request.context(); + let nodes_to_browse = take_service_items!( + request, + request.request.nodes_to_browse, + request.info.operational_limits.max_nodes_per_browse + ); + if !request.request.view.view_id.is_null() || !request.request.view.timestamp.is_null() { + info!("Browse request ignored because view was specified (views not supported)"); + return service_fault!(request, StatusCode::BadViewIdUnknown); + } + + let max_references_per_node = if request.request.requested_max_references_per_node == 0 { + request + .info + .operational_limits + .max_references_per_browse_node + } else { + request + .info + .operational_limits + .max_references_per_browse_node + .min(request.request.requested_max_references_per_node as usize) + }; + + let mut nodes: Vec<_> = nodes_to_browse + .into_iter() + .enumerate() + .map(|(idx, r)| BrowseNode::new(r, max_references_per_node, idx)) + .collect(); + + let mut results: Vec<_> = (0..nodes.len()).map(|_| None).collect(); + let node_manager_count = node_managers.len(); + + for (node_manager_index, node_manager) in node_managers.iter().enumerate() { + context.current_node_manager_index = node_manager_index; + + if let Err(e) = node_manager.browse(&context, &mut nodes).await { + for node in &mut nodes { + if node_manager.owns_node(&node.node_id()) { + node.set_status(e); + } + } + } + // Iterate over the current nodes, removing unfinished ones, and storing + // continuation points when relevant. + // This does not preserve ordering, for efficiency, so node managers should + // not rely on ordering at all. + // We store the input index to make sure the results are correctly ordered. + let mut i = 0; + let mut session = request.session.write(); + while let Some(n) = nodes.get(i) { + if n.is_completed() { + let (result, input_index) = nodes.swap_remove(i).into_result( + node_manager_index, + node_manager_count, + &mut session, + ); + results[input_index] = Some(result); + } else { + i += 1; + } + } + + if nodes.is_empty() { + break; + } + } + + // Process external references + + // Any remaining nodes may have an external ref continuation point, process these before proceeding. + { + let type_tree = trace_read_lock!(context.type_tree); + for node in nodes.iter_mut() { + if let Some(mut p) = node.take_continuation_point::() { + while node.remaining() > 0 { + let Some(rf) = p.items.pop_front() else { + break; + }; + node.add(&type_tree, rf); + } + + if !p.items.is_empty() { + node.set_next_continuation_point(p); + } + } + } + } + + // Gather a unique list of all references + let mut external_refs = HashMap::new(); + for (rf, mask) in nodes + .iter() + .flat_map(|n| n.get_external_refs().map(|r| (r, n.result_mask()))) + { + // OR together the masks, so that if (for some reason) a user requests different + // masks for two nodes but they return a reference to the same node, we use the widest + // available mask... + external_refs + .entry(rf) + .and_modify(|m| *m |= mask) + .or_insert(mask); + } + + // Actually resolve the references + let external_refs: Vec<_> = external_refs.into_iter().collect(); + let node_meta = resolve_external_references(&context, &node_managers, &external_refs).await; + let node_map: HashMap<_, _> = node_meta + .iter() + .filter_map(|n| n.as_ref()) + .map(|n| (&n.node_id.node_id, n)) + .collect(); + + // Finally, process all remaining nodes, including external references + { + let mut session = request.session.write(); + let type_tree = trace_read_lock!(context.type_tree); + for mut node in nodes { + node.resolve_external_references(&type_tree, &node_map); + + let (result, input_index) = + node.into_result(node_manager_count - 1, node_manager_count, &mut session); + results[input_index] = Some(result); + } + } + + // Cannot be None here, since we are guaranteed to always empty out nodes. + let results = results.into_iter().map(Option::unwrap).collect(); + + Response { + message: BrowseResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(results), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn browse_next( + node_managers: NodeManagers, + request: Request, +) -> Response { + let mut context = request.context(); + let nodes_to_browse = take_service_items!( + request, + request.request.continuation_points, + request.info.operational_limits.max_nodes_per_browse + ); + let mut results: Vec<_> = (0..nodes_to_browse.len()).map(|_| None).collect(); + + let mut nodes = { + let mut session = trace_write_lock!(request.session); + let mut nodes = Vec::with_capacity(nodes_to_browse.len()); + for (idx, point) in nodes_to_browse.into_iter().enumerate() { + let point = session.remove_browse_continuation_point(&point); + if let Some(point) = point { + nodes.push(BrowseNode::from_continuation_point(point, idx)); + } else { + results[idx] = Some(BrowseResult { + status_code: StatusCode::BadContinuationPointInvalid, + continuation_point: ByteString::null(), + references: None, + }); + } + } + nodes + }; + + let results = if request.request.release_continuation_points { + results + .into_iter() + .map(|r| { + r.unwrap_or_else(|| BrowseResult { + status_code: StatusCode::Good, + continuation_point: ByteString::null(), + references: None, + }) + }) + .collect() + } else { + let node_manager_count = node_managers.len(); + + let mut batch_nodes = Vec::with_capacity(nodes.len()); + + for (node_manager_index, node_manager) in node_managers.iter().enumerate() { + context.current_node_manager_index = node_manager_index; + let mut i = 0; + // Get all the nodes with a continuation point at the current node manager. + // We collect these as we iterate through the node managers. + while let Some(n) = nodes.get(i) { + if n.start_node_manager == node_manager_index { + batch_nodes.push(nodes.swap_remove(i)); + } else { + i += 1; + } + } + + if let Err(e) = node_manager.browse(&context, &mut batch_nodes).await { + for node in &mut nodes { + if node_manager.owns_node(node.node_id()) { + node.set_status(e); + } + } + } + // Iterate over the current nodes, removing unfinished ones, and storing + // continuation points when relevant. + // This does not preserve ordering, for efficiency, so node managers should + // not rely on ordering at all. + // We store the input index to make sure the results are correctly ordered. + let mut i = 0; + let mut session = request.session.write(); + while let Some(n) = batch_nodes.get(i) { + if n.is_completed() { + let (result, input_index) = batch_nodes.swap_remove(i).into_result( + node_manager_index, + node_manager_count, + &mut session, + ); + results[input_index] = Some(result); + } else { + i += 1; + } + } + + if nodes.is_empty() && batch_nodes.is_empty() { + break; + } + } + + // Process external references + + // Any remaining nodes may have an external ref continuation point, process these before proceeding. + { + let type_tree = trace_read_lock!(context.type_tree); + for node in nodes.iter_mut() { + if let Some(mut p) = node.take_continuation_point::() { + while node.remaining() > 0 { + let Some(rf) = p.items.pop_front() else { + break; + }; + node.add(&type_tree, rf); + } + + if !p.items.is_empty() { + node.set_next_continuation_point(p); + } + } + } + } + + // Gather a unique list of all references + let mut external_refs = HashMap::new(); + for (rf, mask) in nodes + .iter() + .chain(batch_nodes.iter()) + .flat_map(|n| n.get_external_refs().map(|r| (r, n.result_mask()))) + { + // OR together the masks, so that if (for some reason) a user requests different + // masks for two nodes but they return a reference to the same node, we use the widest + // available mask... + external_refs + .entry(rf) + .and_modify(|m| *m |= mask) + .or_insert(mask); + } + + // Actually resolve the references + let external_refs: Vec<_> = external_refs.into_iter().collect(); + let node_meta = resolve_external_references(&context, &node_managers, &external_refs).await; + let node_map: HashMap<_, _> = node_meta + .iter() + .filter_map(|n| n.as_ref()) + .map(|n| (&n.node_id.node_id, n)) + .collect(); + + // Finally, process all remaining nodes, including external references. + // This may still produce a continuation point, for external references. + { + let mut session = request.session.write(); + let type_tree = trace_read_lock!(context.type_tree); + for mut node in nodes.into_iter().chain(batch_nodes.into_iter()) { + node.resolve_external_references(&type_tree, &node_map); + + let (result, input_index) = + node.into_result(node_manager_count - 1, node_manager_count, &mut session); + results[input_index] = Some(result); + } + } + + // Cannot be None here, since we are guaranteed to always empty out nodes. + results.into_iter().map(Option::unwrap).collect() + }; + + Response { + message: BrowseNextResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(results), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn translate_browse_paths( + node_managers: NodeManagers, + request: Request, +) -> Response { + // - We're given a list of (NodeId, BrowsePath) pairs + // - For a node manager, ask them to explore the browse path, returning _all_ visited nodes in each layer. + // - This extends the list of (NodeId, BrowsePath) pairs, though each new node should have a shorter browse path. + // - We keep which node managers returned which nodes. Once every node manager has been asked about every + // returned node, the service is finished and we can collect all the node IDs in the bottom layer. + + let mut context = request.context(); + let paths = take_service_items!( + request, + request.request.browse_paths, + request + .info + .operational_limits + .max_nodes_per_translate_browse_paths_to_node_ids + ); + + let mut items: Vec<_> = paths + .iter() + .enumerate() + .map(|(i, p)| BrowsePathItem::new_root(p, i)) + .collect(); + + let mut idx = 0; + let mut iteration = 1; + let mut any_new_items_in_iteration = false; + let mut final_results = Vec::new(); + loop { + let mgr = &node_managers[idx]; + let mut chunk: Vec<_> = items + .iter_mut() + .filter(|it| { + // Item has not yet been marked bad, meaning it failed to resolve somewhere it should. + it.status().is_good() + // Either it's from a previous node manager, + && (it.node_manager_index() < idx + // Or it's not from a later node manager in the previous iteration. + || it.node_manager_index() > idx + && it.iteration_number() == iteration - 1) + // Or it may be an external reference with an unmatched browse name. + && (!it.path().is_empty() || it.unmatched_browse_name().is_some() && mgr.owns_node(it.node_id())) + }) + .collect(); + context.current_node_manager_index = idx; + + if !chunk.is_empty() { + // Call translate on any of the target IDs. + if let Err(e) = mgr + .translate_browse_paths_to_node_ids(&context, &mut chunk) + .await + { + for n in &mut chunk { + if mgr.owns_node(n.node_id()) { + n.set_status(e); + } + } + } else { + let mut next = Vec::new(); + for n in &mut chunk { + let index = n.input_index(); + for el in n.results_mut().drain(..) { + next.push((el, index)); + } + if n.path().is_empty() && n.unmatched_browse_name().is_none() { + final_results.push(n.clone()) + } + } + + for (n, input_index) in next { + let item = + BrowsePathItem::new(n, input_index, &items[input_index], idx, iteration); + if item.path().is_empty() && item.unmatched_browse_name().is_none() { + final_results.push(item); + } else { + any_new_items_in_iteration = true; + items.push(item); + } + } + } + } + + idx += 1; + if idx == node_managers.len() { + idx = 0; + iteration += 1; + if !any_new_items_in_iteration { + break; + } + any_new_items_in_iteration = false; + } + + idx = (idx + 1) % node_managers.len(); + } + // Collect all final paths. + let mut results: Vec<_> = items + .iter() + .take(paths.len()) + .map(|p| BrowsePathResult { + status_code: p.status(), + targets: Some(Vec::new()), + }) + .collect(); + + for res in final_results { + results[res.input_index()] + .targets + .as_mut() + .unwrap() + .push(BrowsePathTarget { + target_id: res.node.into(), + // External server references are not yet supported. + remaining_path_index: u32::MAX, + }); + } + + for res in results.iter_mut() { + if res.targets.is_none() || res.targets.as_ref().is_some_and(|t| t.is_empty()) { + res.targets = None; + if res.status_code.is_good() { + res.status_code = StatusCode::BadNoMatch; + } + } + } + + Response { + message: TranslateBrowsePathsToNodeIdsResponse { + response_header: ResponseHeader::new_good(request.request_handle), + results: Some(results), + diagnostic_infos: None, + } + .into(), + request_id: request.request_id, + } +} + +pub async fn register_nodes( + node_managers: NodeManagers, + request: Request, +) -> Response { + let context = request.context(); + + let Some(nodes_to_register) = request.request.nodes_to_register else { + return service_fault!(request, StatusCode::BadNothingToDo); + }; + + if nodes_to_register.is_empty() { + return service_fault!(request, StatusCode::BadNothingToDo); + } + + if nodes_to_register.len() > request.info.operational_limits.max_nodes_per_register_nodes { + return service_fault!(request, StatusCode::BadTooManyOperations); + } + + let mut items: Vec<_> = nodes_to_register + .into_iter() + .map(|n| RegisterNodeItem::new(n)) + .collect(); + + for mgr in &node_managers { + let mut owned: Vec<_> = items + .iter_mut() + .filter(|n| mgr.owns_node(n.node_id())) + .collect(); + + if owned.is_empty() { + continue; + } + + // All errors are fatal in this case, node managers should avoid them. + if let Err(e) = mgr.register_nodes(&context, &mut owned).await { + error!("Register nodes failed for node manager {}: {e}", mgr.name()); + return service_fault!(request, e); + } + } + + let registered_node_ids: Vec<_> = items.into_iter().filter_map(|n| n.into_result()).collect(); + + Response { + message: RegisterNodesResponse { + response_header: ResponseHeader::new_good(request.request_handle), + registered_node_ids: Some(registered_node_ids), + } + .into(), + request_id: request.request_id, + } +} + +pub async fn unregister_nodes( + node_managers: NodeManagers, + request: Request, +) -> Response { + let context = request.context(); + + let Some(nodes_to_unregister) = request.request.nodes_to_unregister else { + return service_fault!(request, StatusCode::BadNothingToDo); + }; + + if nodes_to_unregister.is_empty() { + return service_fault!(request, StatusCode::BadNothingToDo); + } + + if nodes_to_unregister.len() > request.info.operational_limits.max_nodes_per_register_nodes { + return service_fault!(request, StatusCode::BadTooManyOperations); + } + + for mgr in &node_managers { + let owned: Vec<_> = nodes_to_unregister + .iter() + .filter(|n| mgr.owns_node(n)) + .collect(); + + if owned.is_empty() { + continue; + } + + // All errors are fatal in this case, node managers should avoid them. + if let Err(e) = mgr.unregister_nodes(&context, &owned).await { + error!( + "Unregister nodes failed for node manager {}: {e}", + mgr.name() + ); + return service_fault!(request, e); + } + } + + Response { + message: UnregisterNodesResponse { + response_header: ResponseHeader::new_good(request.request_handle), + } + .into(), + request_id: request.request_id, + } +} diff --git a/lib/src/server/session_diagnostics.rs b/lib/src/server/session_diagnostics.rs deleted file mode 100644 index 92f7cb4a7..000000000 --- a/lib/src/server/session_diagnostics.rs +++ /dev/null @@ -1,192 +0,0 @@ -use std::collections::HashMap; - -use crate::types::{node_ids::ObjectTypeId, service_types::ServiceCounterDataType}; - -use super::{ - address_space::{address_space::AddressSpace, object::ObjectBuilder}, - session::Session, -}; - -/// This object tracks session diagnostics for exposure through the address space - -#[derive(Default)] -pub(crate) struct SessionDiagnostics { - total_request_count: u32, - unauthorized_request_count: u32, - service_counters: HashMap<&'static str, ServiceCounterDataType>, -} - -impl SessionDiagnostics { - /// Registers a session object - pub(crate) fn register_session(&self, session: &Session, address_space: &mut AddressSpace) { - // TODO SessionDiagnosticsObjectType - - let session_id = session.session_id(); - debug!("register_session for session id {}", session_id); - - debug!("Adding an object node for the session id {}", session_id); - let _ = ObjectBuilder::new( - session_id, - format!("{}", session_id), - format!("{}", session_id), - ) - .has_type_definition(ObjectTypeId::SessionDiagnosticsObjectType) - .insert(address_space); - - // Now add variables - /* - 12816 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics), - 12817 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SessionId), - 12818 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SessionName), - 12819 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ClientDescription), - 12820 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ServerUri), - 12821 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_EndpointUrl), - 12822 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_LocaleIds), - 12823 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ActualSessionTimeout), - 12824 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_MaxResponseMessageSize), - 12825 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ClientConnectionTime), - 12826 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ClientLastContactTime), - 12827 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CurrentSubscriptionsCount), - 12828 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CurrentMonitoredItemsCount), - 12829 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CurrentPublishRequestsInQueue), - - 12830 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_TotalRequestCount), - 12831 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_UnauthorizedRequestCount), - - 12832 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ReadCount), - 12833 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_HistoryReadCount), - 12834 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_WriteCount), - 12835 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_HistoryUpdateCount), - 12836 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CallCount), - 12837 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CreateMonitoredItemsCount), - 12838 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ModifyMonitoredItemsCount), - 12839 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SetMonitoringModeCount), - 12840 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SetTriggeringCount), - 12841 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteMonitoredItemsCount), - 12842 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CreateSubscriptionCount), - 12843 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ModifySubscriptionCount), - 12844 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SetPublishingModeCount), - 12845 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_PublishCount), - 12846 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_RepublishCount), - 12847 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_TransferSubscriptionsCount), - 12848 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteSubscriptionsCount), - 12849 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_AddNodesCount), - 12850 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_AddReferencesCount), - 12851 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteNodesCount), - 12852 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteReferencesCount), - 12853 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_BrowseCount), - 12854 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_BrowseNextCount), - 12855 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_TranslateBrowsePathsToNodeIdsCount), - 12856 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_QueryFirstCount), - 12857 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_QueryNextCount), - 12858 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_RegisterNodesCount), - 12859 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_UnregisterNodesCount), - */ - - // Browse name shall be session name - // session id is the nodeid - - // SessionDiagnostics - SessionDiagnosticsDataType - // SessionId - NodeId - // SessionName - String - // ClientDescription - Application Description - // ServerUri - String - // EndpointUrl - String - // LocaleId - LocaleId[] - // MaxResponseMessageSize - UInt32 - // ActualSessionTimeout - Duration - // ClientConnectionTime - UtcTime - // ClientLastContactTime - UtcTime - // CurrentSubscriptionsCount - UInt32 - // CurrentMonitoredItemsCount - UInt32 - // CurrentPublishRequestsInQueue - UInt32 - // TotalRequestCount - ServiceCounterData - // UnauthorizedRequestCount - UInt32 - // ReadCount - ServiceCounterData - // HistoryReadCount - ServiceCounterData - // WriteCount - ServiceCounterData - // HistoryUpdateCount - // SessionSecurityDiagnostics - SessionSecurityDiagnosticDataType - // SeubscriptionDiagnosticsArray - SubscriptionDiagnosticsArray - } - - /// Deregisters a session object - pub(crate) fn deregister_session(&self, session: &Session, address_space: &mut AddressSpace) { - address_space.delete(session.session_id(), true); - } - - /// Called on every request - pub(crate) fn request(&mut self) { - self.total_request_count += 1; - } - - /// Called on an authorized request - pub(crate) fn unauthorized_request(&mut self) { - self.unauthorized_request_count += 1; - self.total_request_count += 1; - } - - /// Fetches a snapshot of the current service counter value - pub(crate) fn service_counter( - &mut self, - diagnostic_key: &'static str, - ) -> ServiceCounterDataType { - if let Some(counter) = self.service_counters.get_mut(diagnostic_key) { - counter.clone() - } else { - ServiceCounterDataType::default() - } - } - - /// Increments the service counter for a successful service call - pub(crate) fn service_success(&mut self, diagnostic_key: &'static str) { - if let Some(counter) = self.service_counters.get_mut(diagnostic_key) { - counter.success(); - } else { - let mut counter = ServiceCounterDataType::default(); - counter.success(); - self.service_counters.insert(diagnostic_key, counter); - } - } - - /// Increments the service counter for a failed service call - pub(crate) fn service_error(&mut self, diagnostic_key: &'static str) { - if let Some(counter) = self.service_counters.get_mut(diagnostic_key) { - counter.error(); - } else { - let mut counter = ServiceCounterDataType::default(); - counter.error(); - self.service_counters.insert(diagnostic_key, counter); - } - } -} - -pub(crate) const READ_COUNT: &str = "ReadCount"; -pub(crate) const HISTORY_READ_COUNT: &str = "HistoryReadCount"; -pub(crate) const WRITE_COUNT: &str = "WriteCount"; -pub(crate) const HISTORY_UPDATE_COUNT: &str = "HistoryUpdateCount"; -pub(crate) const CALL_COUNT: &str = "CallCount"; -pub(crate) const CREATE_MONITORED_ITEMS_COUNT: &str = "CreateMonitoredItemsCount"; -pub(crate) const MODIFY_MONITORED_ITEMS_COUNT: &str = "ModifyMonitoredItemsCount"; -pub(crate) const SET_MONITORING_MODE_COUNT: &str = "SetMonitoringModeCount"; -pub(crate) const SET_TRIGGERING_COUNT: &str = "SetTriggeringCount"; -pub(crate) const DELETE_MONITORED_ITEMS_COUNT: &str = "DeleteMonitoredItemsCount"; -pub(crate) const CREATE_SUBSCRIPTION_COUNT: &str = "CreateSubscriptionCount"; -pub(crate) const MODIFY_SUBSCRIPTION_COUNT: &str = "ModifySubscriptionCount"; -pub(crate) const SET_PUBLISHING_MODE_COUNT: &str = "SetPublishingModeCount"; -//pub(crate) const PUBLISH_COUNT: &str = "PublishCount"; -pub(crate) const REPUBLISH_COUNT: &str = "RepublishCount"; -pub(crate) const TRANSFER_SUBSCRIPTIONS_COUNT: &str = "TransferSubscriptionsCount"; -pub(crate) const DELETE_SUBSCRIPTIONS_COUNT: &str = "DeleteSubscriptionsCount"; -pub(crate) const ADD_NODES_COUNT: &str = "AddNodesCount"; -pub(crate) const ADD_REFERENCES_COUNT: &str = "AddReferencesCount"; -pub(crate) const DELETE_NODES_COUNT: &str = "DeleteNodesCount"; -pub(crate) const DELETE_REFERENCES_COUNT: &str = "DeleteReferencesCount"; -pub(crate) const BROWSE_COUNT: &str = "BrowseCount"; -pub(crate) const BROWSE_NEXT_COUNT: &str = "BrowseNextCount"; -pub(crate) const TRANSLATE_BROWSE_PATHS_TO_NODE_IDS_COUNT: &str = - "TranslateBrowsePathsToNodeIdsCount"; -//pub(crate) const QUERY_FIRST_COUNT: &str = "QueryFirstCount"; -//pub(crate) const QUERY_NEXT_COUNT: &str = "QueryNextCount"; -pub(crate) const REGISTER_NODES_COUNT: &str = "RegisterNodesCount"; -pub(crate) const UNREGISTER_NODES_COUNT: &str = "UnregisterNodesCount"; diff --git a/lib/src/server/subscriptions/mod.rs b/lib/src/server/subscriptions/mod.rs index 2275fa3f9..a13fdd635 100644 --- a/lib/src/server/subscriptions/mod.rs +++ b/lib/src/server/subscriptions/mod.rs @@ -1,39 +1,787 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock +mod monitored_item; +mod session_subscriptions; +mod subscription; -use std::time::Duration; +use std::{sync::Arc, time::Instant}; -use crate::core::supported_message::SupportedMessage; -use crate::types::{service_types::PublishRequest, status_code::StatusCode}; +use chrono::Utc; +use hashbrown::{Equivalent, HashMap}; +pub use monitored_item::{CreateMonitoredItem, MonitoredItem}; +pub use session_subscriptions::SessionSubscriptions; +use subscription::TickReason; +pub use subscription::{MonitoredItemHandle, Subscription, SubscriptionState}; -/// The publish request entry preserves the request_id which is part of the chunk layer but clients -/// are fickle about receiving responses from the same as the request. Normally this is easy because -/// request and response are synchronous, but publish requests are async, so we preserve the request_id -/// so that later we can send out responses that have the proper req id -#[derive(Clone)] -pub struct PublishRequestEntry { - // The request id - pub request_id: u32, - // The request itself - pub request: PublishRequest, - // The result of clearing acknowledgments when the request was received. - pub results: Option>, +use crate::{ + core::SupportedMessage, + sync::{Mutex, RwLock}, + types::{ + AttributeId, CreateSubscriptionRequest, CreateSubscriptionResponse, DataValue, DateTimeUtc, + MessageSecurityMode, ModifySubscriptionRequest, ModifySubscriptionResponse, + MonitoredItemCreateResult, MonitoredItemModifyRequest, MonitoringMode, NodeId, + NotificationMessage, NumericRange, ObjectId, PublishRequest, QualifiedName, + RepublishRequest, RepublishResponse, ResponseHeader, SetPublishingModeRequest, + SetPublishingModeResponse, StatusCode, TimestampsToReturn, TransferResult, + TransferSubscriptionsRequest, TransferSubscriptionsResponse, + }, +}; + +use super::{ + authenticator::UserToken, + info::ServerInfo, + node_manager::{ + MonitoredItemRef, MonitoredItemUpdateRef, RequestContext, ServerContext, TypeTree, + }, + session::instance::Session, + Event, SubscriptionLimits, +}; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +struct MonitoredItemKey { + id: NodeId, + attribute_id: AttributeId, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +struct MonitoredItemKeyRef<'a> { + id: &'a NodeId, + attribute_id: AttributeId, +} + +impl<'a> Equivalent for MonitoredItemKeyRef<'a> { + fn equivalent(&self, key: &MonitoredItemKey) -> bool { + self.id == &key.id && self.attribute_id == key.attribute_id + } +} + +struct MonitoredItemEntry { + enabled: bool, + data_encoding: QualifiedName, + index_range: NumericRange, +} + +struct SubscriptionCacheInner { + /// Map from session ID to subscription cache + session_subscriptions: HashMap>>, + /// Map from subscription ID to session ID. + subscription_to_session: HashMap, + /// Map from notifier node ID to monitored item handles. + monitored_items: HashMap>, +} + +/// Structure storing all subscriptions and monitored items on the server. +/// Used to notify users of changes. +/// +/// Subscriptions can outlive sessions, and sessions can outlive connections, +/// so neither can be owned by the connection. This provides convenient methods for +/// manipulating subscriptions. +pub struct SubscriptionCache { + inner: RwLock, + /// Configured limits on subscriptions. + limits: SubscriptionLimits, +} + +impl SubscriptionCache { + pub(crate) fn new(limits: SubscriptionLimits) -> Self { + Self { + inner: RwLock::new(SubscriptionCacheInner { + session_subscriptions: HashMap::new(), + subscription_to_session: HashMap::new(), + monitored_items: HashMap::new(), + }), + limits, + } + } + + /// Get the `SessionSubscriptions` object for a single session by its numeric ID. + pub fn get_session_subscriptions( + &self, + session_id: u32, + ) -> Option>> { + let inner = trace_read_lock!(self.inner); + inner.session_subscriptions.get(&session_id).cloned() + } + + /// This is the periodic subscription tick where we check for + /// triggered subscriptions. + /// + pub(crate) async fn periodic_tick(&self, context: &ServerContext) { + // TODO: Look into replacing this with a smarter system, in theory it should be possible to + // always just sleep for the exact time until the next expired publish request, which could + // be more efficient, and would be more responsive. + let mut to_delete = Vec::new(); + let mut items_to_delete = Vec::new(); + { + let now = Utc::now(); + let now_instant = Instant::now(); + let lck = trace_read_lock!(self.inner); + for (session_id, sub) in lck.session_subscriptions.iter() { + let mut sub_lck = sub.lock(); + items_to_delete.push(( + sub_lck.session().clone(), + sub_lck.tick(&now, now_instant, TickReason::TickTimerFired), + )); + if sub_lck.is_ready_to_delete() { + to_delete.push(*session_id); + } + } + } + if !to_delete.is_empty() { + let mut lck = trace_write_lock!(self.inner); + for id in to_delete { + lck.session_subscriptions.remove(&id); + } + } + if !items_to_delete.is_empty() { + Self::delete_expired_monitored_items(context, items_to_delete).await; + } + } + + async fn delete_expired_monitored_items( + context: &ServerContext, + items_to_delete: Vec<(Arc>, Vec)>, + ) { + for (session, items) in items_to_delete { + // Create a local request context, since we need to call delete monitored items. + + let (id, token) = { + let lck = session.read(); + let Some(token) = lck.user_token() else { + error!("Active session missing user token, this should be impossible"); + continue; + }; + + (lck.session_id_numeric(), token.clone()) + }; + let ctx = RequestContext { + session, + session_id: id, + authenticator: context.authenticator.clone(), + token, + current_node_manager_index: 0, + type_tree: context.type_tree.clone(), + subscriptions: context.subscriptions.clone(), + info: context.info.clone(), + }; + + for mgr in context.node_managers.iter() { + let owned: Vec<_> = items + .iter() + .filter(|n| mgr.owns_node(n.node_id())) + .collect(); + + if owned.is_empty() { + continue; + } + + mgr.delete_monitored_items(&ctx, &owned).await; + } + } + } + + pub(crate) fn get_monitored_item_count( + &self, + session_id: u32, + subscription_id: u32, + ) -> Option { + let Some(cache) = ({ + let lck = trace_read_lock!(self.inner); + lck.session_subscriptions.get(&session_id).cloned() + }) else { + return None; + }; + let cache_lck = cache.lock(); + cache_lck.get_monitored_item_count(subscription_id) + } + + pub(crate) fn create_subscription( + &self, + session_id: u32, + session: &Arc>, + request: &CreateSubscriptionRequest, + info: &ServerInfo, + ) -> Result { + let mut lck = trace_write_lock!(self.inner); + let cache = lck + .session_subscriptions + .entry(session_id) + .or_insert_with(|| { + Arc::new(Mutex::new(SessionSubscriptions::new( + self.limits, + Self::get_key(session), + session.clone(), + ))) + }) + .clone(); + let mut cache_lck = cache.lock(); + let res = cache_lck.create_subscription(request, info)?; + lck.subscription_to_session + .insert(res.subscription_id, session_id); + Ok(res) + } + + pub(crate) fn modify_subscription( + &self, + session_id: u32, + request: &ModifySubscriptionRequest, + info: &ServerInfo, + ) -> Result { + let Some(cache) = ({ + let lck = trace_read_lock!(self.inner); + lck.session_subscriptions.get(&session_id).cloned() + }) else { + return Err(StatusCode::BadNoSubscription); + }; + let mut cache_lck = cache.lock(); + cache_lck.modify_subscription(request, info) + } + + pub(crate) fn set_publishing_mode( + &self, + session_id: u32, + request: &SetPublishingModeRequest, + ) -> Result { + let Some(cache) = ({ + let lck = trace_read_lock!(self.inner); + lck.session_subscriptions.get(&session_id).cloned() + }) else { + return Err(StatusCode::BadNoSubscription); + }; + let mut cache_lck = cache.lock(); + cache_lck.set_publishing_mode(request) + } + + pub(crate) fn republish( + &self, + session_id: u32, + request: &RepublishRequest, + ) -> Result { + let Some(cache) = ({ + let lck = trace_read_lock!(self.inner); + lck.session_subscriptions.get(&session_id).cloned() + }) else { + return Err(StatusCode::BadNoSubscription); + }; + let cache_lck = cache.lock(); + cache_lck.republish(request) + } + + pub(crate) fn enqueue_publish_request( + &self, + session_id: u32, + now: &DateTimeUtc, + now_instant: Instant, + request: PendingPublish, + ) -> Result<(), StatusCode> { + let Some(cache) = ({ + let lck = trace_read_lock!(self.inner); + lck.session_subscriptions.get(&session_id).cloned() + }) else { + return Err(StatusCode::BadNoSubscription); + }; + + let mut cache_lck = cache.lock(); + cache_lck.enqueue_publish_request(now, now_instant, request); + Ok(()) + } + + /// Notify any listening clients about a list of data changes. + /// This can be called any time anything changes on the server, or only for values with + /// an existing monitored item. Either way this method will deal with distributing the values + /// to the appropriate monitored items. + pub fn notify_data_change<'a>( + &self, + items: impl Iterator, + ) { + let lck = trace_read_lock!(self.inner); + let mut by_subscription: HashMap> = HashMap::new(); + for (dv, node_id, attribute_id) in items { + // You can't subscribe to changes in EventNotifier, as subscribing to that value means + // subscribing to events. Intercept any updates here, for sanity. + if attribute_id == AttributeId::EventNotifier { + continue; + } + + let key = MonitoredItemKeyRef { + id: node_id, + attribute_id, + }; + let Some(items) = lck.monitored_items.get(&key) else { + continue; + }; + + for (handle, entry) in items { + if !entry.enabled { + continue; + } + by_subscription + .entry(handle.subscription_id) + .or_default() + .push((*handle, dv.clone())); + } + } + + for (sub_id, items) in by_subscription { + let Some(session_id) = lck.subscription_to_session.get(&sub_id) else { + continue; + }; + let Some(cache) = lck.session_subscriptions.get(session_id) else { + continue; + }; + let mut cache_lck = cache.lock(); + cache_lck.notify_data_changes(items); + } + } + + /// Notify with a dynamic sampler, to avoid getting values for nodes that + /// may not have monitored items. + /// This is potentially much more efficient than simply notifying blindly, but is + /// also somewhat harder to use. + pub fn maybe_notify<'a>( + &self, + items: impl Iterator, + sample: impl Fn(&NodeId, AttributeId, &NumericRange, &QualifiedName) -> Option, + ) { + let lck = trace_read_lock!(self.inner); + let mut by_subscription: HashMap> = HashMap::new(); + for (node_id, attribute_id) in items { + if attribute_id == AttributeId::EventNotifier { + continue; + } + + let key = MonitoredItemKeyRef { + id: node_id, + attribute_id, + }; + let Some(items) = lck.monitored_items.get(&key) else { + continue; + }; + + for (handle, entry) in items { + if !entry.enabled { + continue; + } + let Some(dv) = sample( + node_id, + attribute_id, + &entry.index_range, + &entry.data_encoding, + ) else { + continue; + }; + by_subscription + .entry(handle.subscription_id) + .or_default() + .push((*handle, dv)); + } + } + + for (sub_id, items) in by_subscription { + let Some(session_id) = lck.subscription_to_session.get(&sub_id) else { + continue; + }; + let Some(cache) = lck.session_subscriptions.get(session_id) else { + continue; + }; + let mut cache_lck = cache.lock(); + cache_lck.notify_data_changes(items); + } + } + + /// Notify listening clients to events. Without a custom node manager implementing + /// event history, this is the only way to report events in the server. + pub fn notify_events<'a>(&self, items: impl Iterator) { + let lck = trace_read_lock!(self.inner); + let mut by_subscription = HashMap::>::new(); + for (evt, notifier) in items { + let notifier_key = MonitoredItemKeyRef { + id: ¬ifier, + attribute_id: AttributeId::EventNotifier, + }; + if let Some(items) = lck.monitored_items.get(¬ifier_key) { + for (handle, item) in items { + if !item.enabled { + continue; + } + by_subscription + .entry(handle.subscription_id) + .or_default() + .push((*handle, evt)); + } + } + // The server gets all notifications. + let server_id: NodeId = ObjectId::Server.into(); + if notifier != &server_id { + let server_key = MonitoredItemKeyRef { + id: &server_id, + attribute_id: AttributeId::EventNotifier, + }; + let Some(items) = lck.monitored_items.get(&server_key) else { + continue; + }; + for (handle, item) in items { + if !item.enabled { + continue; + } + by_subscription + .entry(handle.subscription_id) + .or_default() + .push((*handle, evt)); + } + } + } + + for (sub_id, items) in by_subscription { + let Some(session_id) = lck.subscription_to_session.get(&sub_id) else { + continue; + }; + let Some(cache) = lck.session_subscriptions.get(session_id) else { + continue; + }; + let mut cache_lck = cache.lock(); + cache_lck.notify_events(items); + } + } + + pub(crate) fn create_monitored_items( + &self, + session_id: u32, + subscription_id: u32, + requests: &[CreateMonitoredItem], + ) -> Result, StatusCode> { + let mut lck = trace_write_lock!(self.inner); + let Some(cache) = lck.session_subscriptions.get(&session_id).cloned() else { + return Err(StatusCode::BadNoSubscription); + }; + + let mut cache_lck = cache.lock(); + let result = cache_lck.create_monitored_items(subscription_id, requests); + if let Ok(res) = &result { + for (create, res) in requests.iter().zip(res.iter()) { + if res.status_code.is_good() { + let key = MonitoredItemKey { + id: create.item_to_monitor().node_id.clone(), + attribute_id: create.item_to_monitor().attribute_id, + }; + + let index_range = create.item_to_monitor().index_range.clone(); + + lck.monitored_items.entry(key).or_default().insert( + create.handle(), + MonitoredItemEntry { + enabled: !matches!(create.monitoring_mode(), MonitoringMode::Disabled), + index_range, + data_encoding: create.item_to_monitor().data_encoding.clone(), + }, + ); + } + } + } + + result + } + + pub(crate) fn modify_monitored_items( + &self, + session_id: u32, + subscription_id: u32, + info: &ServerInfo, + timestamps_to_return: TimestampsToReturn, + requests: Vec, + type_tree: &TypeTree, + ) -> Result, StatusCode> { + let Some(cache) = ({ + let lck = trace_read_lock!(self.inner); + lck.session_subscriptions.get(&session_id).cloned() + }) else { + return Err(StatusCode::BadNoSubscription); + }; + + let mut cache_lck = cache.lock(); + cache_lck.modify_monitored_items( + subscription_id, + info, + timestamps_to_return, + requests, + type_tree, + ) + } + + fn get_key(session: &RwLock) -> PersistentSessionKey { + let lck = trace_read_lock!(session); + PersistentSessionKey::new( + lck.user_token().unwrap(), + lck.message_security_mode(), + &lck.application_description().application_uri.as_ref(), + ) + } + + pub(crate) fn set_monitoring_mode( + &self, + session_id: u32, + subscription_id: u32, + monitoring_mode: MonitoringMode, + items: Vec, + ) -> Result, StatusCode> { + let mut lck = trace_write_lock!(self.inner); + let Some(cache) = lck.session_subscriptions.get(&session_id).cloned() else { + return Err(StatusCode::BadNoSubscription); + }; + + let mut cache_lck = cache.lock(); + let result = cache_lck.set_monitoring_mode(subscription_id, monitoring_mode, items); + + if let Ok(res) = &result { + for (status, rf) in res { + if status.is_good() { + let key = MonitoredItemKeyRef { + id: rf.node_id(), + attribute_id: rf.attribute(), + }; + if let Some(it) = lck + .monitored_items + .get_mut(&key) + .and_then(|it| it.get_mut(&rf.handle())) + { + it.enabled = !matches!(monitoring_mode, MonitoringMode::Disabled); + } + } + } + } + result + } + + pub(crate) fn set_triggering( + &self, + session_id: u32, + subscription_id: u32, + triggering_item_id: u32, + links_to_add: Vec, + links_to_remove: Vec, + ) -> Result<(Vec, Vec), StatusCode> { + let Some(cache) = ({ + let lck = trace_read_lock!(self.inner); + lck.session_subscriptions.get(&session_id).cloned() + }) else { + return Err(StatusCode::BadNoSubscription); + }; + + let mut cache_lck = cache.lock(); + cache_lck.set_triggering( + subscription_id, + triggering_item_id, + links_to_add, + links_to_remove, + ) + } + + pub(crate) fn delete_monitored_items( + &self, + session_id: u32, + subscription_id: u32, + items: &[u32], + ) -> Result, StatusCode> { + let mut lck = trace_write_lock!(self.inner); + let Some(cache) = lck.session_subscriptions.get(&session_id).cloned() else { + return Err(StatusCode::BadNoSubscription); + }; + + let mut cache_lck = cache.lock(); + let result = cache_lck.delete_monitored_items(subscription_id, items); + if let Ok(res) = &result { + for (status, rf) in res { + if status.is_good() { + let key = MonitoredItemKeyRef { + id: rf.node_id(), + attribute_id: rf.attribute(), + }; + if let Some(it) = lck.monitored_items.get_mut(&key) { + it.remove(&rf.handle()); + } + } + } + } + result + } + + pub(crate) fn delete_subscriptions( + &self, + session_id: u32, + ids: &[u32], + ) -> Result)>, StatusCode> { + let mut lck = trace_write_lock!(self.inner); + let Some(cache) = lck.session_subscriptions.get(&session_id).cloned() else { + return Err(StatusCode::BadNoSubscription); + }; + let mut cache_lck = cache.lock(); + for id in ids { + if cache_lck.contains(*id) { + lck.subscription_to_session.remove(id); + } + } + let result = cache_lck.delete_subscriptions(ids); + + for (status, item_res) in &result { + if !status.is_good() { + continue; + } + for rf in item_res { + if rf.attribute() == AttributeId::EventNotifier { + let key = MonitoredItemKeyRef { + id: rf.node_id(), + attribute_id: rf.attribute(), + }; + if let Some(it) = lck.monitored_items.get_mut(&key) { + it.remove(&rf.handle()); + } + } + } + } + + Ok(result) + } + + pub(crate) fn get_session_subscription_ids(&self, session_id: u32) -> Vec { + let Some(cache) = ({ + let lck = trace_read_lock!(self.inner); + lck.session_subscriptions.get(&session_id).cloned() + }) else { + return Vec::new(); + }; + + let cache_lck = cache.lock(); + cache_lck.subscription_ids() + } + + pub(crate) fn transfer( + &self, + req: &TransferSubscriptionsRequest, + session_id: u32, + session: &Arc>, + ) -> TransferSubscriptionsResponse { + let mut results: Vec<_> = req + .subscription_ids + .iter() + .flatten() + .map(|id| { + ( + *id, + TransferResult { + status_code: StatusCode::BadSubscriptionIdInvalid, + available_sequence_numbers: None, + }, + ) + }) + .collect(); + + let key = Self::get_key(session); + { + let mut lck = trace_write_lock!(self.inner); + let session_subs = lck + .session_subscriptions + .entry(session_id) + .or_insert_with(|| { + Arc::new(Mutex::new(SessionSubscriptions::new( + self.limits, + key.clone(), + session.clone(), + ))) + }) + .clone(); + let mut session_subs_lck = session_subs.lock(); + + for (sub_id, res) in &mut results { + let Some(inner_session_id) = lck.subscription_to_session.get(sub_id) else { + continue; + }; + if session_id == *inner_session_id { + res.status_code = StatusCode::Good; + res.available_sequence_numbers = + session_subs_lck.available_sequence_numbers(*sub_id); + continue; + } + + let Some(session_cache) = lck.session_subscriptions.get(inner_session_id).cloned() + else { + continue; + }; + + let mut session_lck = session_cache.lock(); + + if !session_lck.user_token().is_equivalent_for_transfer(&key) { + res.status_code = StatusCode::BadUserAccessDenied; + continue; + } + + if let (Some(sub), notifs) = session_lck.remove(*sub_id) { + res.status_code = StatusCode::Good; + res.available_sequence_numbers = + Some(notifs.iter().map(|n| n.message.sequence_number).collect()); + + if let Err((e, sub, notifs)) = session_subs_lck.insert(sub, notifs) { + res.status_code = e; + let _ = session_lck.insert(sub, notifs); + } else { + if req.send_initial_values { + if let Some(sub) = session_subs_lck.get_mut(*sub_id) { + sub.set_resend_data(); + } + } + lck.subscription_to_session.insert(*sub_id, session_id); + } + } + } + } + + TransferSubscriptionsResponse { + response_header: ResponseHeader::new_good(&req.request_header), + results: Some(results.into_iter().map(|r| r.1).collect()), + diagnostic_infos: None, + } + } } -#[derive(Clone, Debug)] -pub struct PublishResponseEntry { - pub request_id: u32, - pub response: SupportedMessage, +pub(crate) struct PendingPublish { + pub response: tokio::sync::oneshot::Sender, + pub request: Box, + pub ack_results: Option>, + pub deadline: Instant, } -/// This converts an OPC UA Duration into a time duration used for testing for interval elapsed -fn duration_from_ms(d: f64) -> Duration { - // Duration is a floating point number in millis so turn to microseconds for greater accuracy - // 1 millisecond = 1000 microsecond - Duration::from_micros((d * 1000f64) as u64) +struct NonAckedPublish { + message: NotificationMessage, + subscription_id: u32, } -pub mod monitored_item; -pub mod subscription; -pub mod subscriptions; +#[derive(Debug, Clone)] +struct PersistentSessionKey { + token: UserToken, + security_mode: MessageSecurityMode, + application_uri: String, +} + +impl PersistentSessionKey { + pub fn new( + token: &UserToken, + security_mode: MessageSecurityMode, + application_uri: &str, + ) -> Self { + Self { + token: token.clone(), + security_mode, + application_uri: application_uri.to_owned(), + } + } + + pub fn is_equivalent_for_transfer(&self, other: &PersistentSessionKey) -> bool { + if self.token.is_anonymous() { + other.token.is_anonymous() + && matches!( + other.security_mode, + MessageSecurityMode::Sign | MessageSecurityMode::SignAndEncrypt + ) + && self.application_uri == other.application_uri + } else { + other.token == self.token + } + } +} diff --git a/lib/src/server/subscriptions/monitored_item.rs b/lib/src/server/subscriptions/monitored_item.rs index f5ff9a7ee..6d99cbfa5 100644 --- a/lib/src/server/subscriptions/monitored_item.rs +++ b/lib/src/server/subscriptions/monitored_item.rs @@ -1,25 +1,20 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - use std::collections::{BTreeSet, VecDeque}; -use std::result::Result; -use crate::types::{ - node_ids::ObjectId, - service_types::{ - DataChangeFilter, EventFieldList, EventFilter, MonitoredItemCreateRequest, - MonitoredItemModifyRequest, MonitoredItemNotification, ReadValueId, TimestampsToReturn, +use crate::{ + server::{ + info::ServerInfo, + node_manager::{ParsedReadValueId, TypeTree}, + Event, ParsedEventFilter, + }, + types::{ + DataChangeFilter, DataValue, DateTime, DecodingOptions, EventFieldList, EventFilter, + EventFilterResult, ExtensionObject, MonitoredItemCreateRequest, MonitoredItemModifyRequest, + MonitoredItemNotification, MonitoringMode, NumericRange, ObjectId, StatusCode, + TimestampsToReturn, Variant, }, - status_code::StatusCode, - *, }; -use crate::server::{ - address_space::{node::Node, AddressSpace, EventNotifier}, - events::event_filter, - state::ServerState, -}; +use super::MonitoredItemHandle; #[derive(Debug, Clone, PartialEq, Serialize)] pub enum Notification { @@ -39,39 +34,50 @@ impl From for Notification { } } -#[derive(Debug, Clone, PartialEq, Serialize)] -pub(crate) enum FilterType { +#[derive(Debug, Clone)] +/// Parsed filter type for a monitored item. +pub enum FilterType { None, DataChangeFilter(DataChangeFilter), - EventFilter(EventFilter), + EventFilter(ParsedEventFilter), } impl FilterType { + /// Try to create a filter from an extension object, returning + /// an `EventFilterResult` if the filter is for events. pub fn from_filter( filter: &ExtensionObject, decoding_options: &DecodingOptions, - ) -> Result { + type_tree: &TypeTree, + ) -> (Option, Result) { // Check if the filter is a supported filter type let filter_type_id = &filter.node_id; if filter_type_id.is_null() { // No data filter was passed, so just a dumb value comparison - Ok(FilterType::None) + (None, Ok(FilterType::None)) } else if let Ok(filter_type_id) = filter_type_id.as_object_id() { match filter_type_id { - ObjectId::DataChangeFilter_Encoding_DefaultBinary => { - Ok(FilterType::DataChangeFilter( - filter.decode_inner::(decoding_options)?, - )) + ObjectId::DataChangeFilter_Encoding_DefaultBinary => ( + None, + filter + .decode_inner::(decoding_options) + .map(FilterType::DataChangeFilter), + ), + ObjectId::EventFilter_Encoding_DefaultBinary => { + let r = filter.decode_inner::(decoding_options); + let raw_filter = match r { + Ok(filter) => filter, + Err(e) => return (None, Err(e)), + }; + let (res, filter_res) = ParsedEventFilter::new(raw_filter, type_tree); + (Some(res), filter_res.map(FilterType::EventFilter)) } - ObjectId::EventFilter_Encoding_DefaultBinary => Ok(FilterType::EventFilter( - filter.decode_inner::(decoding_options)?, - )), _ => { error!( "Requested data filter type is not supported, {:?}", filter_type_id ); - Err(StatusCode::BadFilterNotAllowed) + (None, Err(StatusCode::BadFilterNotAllowed)) } } } else { @@ -79,547 +85,835 @@ impl FilterType { "Requested data filter type is not an object id, {:?}", filter_type_id ); - Err(StatusCode::BadFilterNotAllowed) + (None, Err(StatusCode::BadFilterNotAllowed)) } } } -#[derive(Debug, Clone, PartialEq, Serialize)] -pub(crate) struct MonitoredItem { - monitored_item_id: u32, - item_to_monitor: ReadValueId, +#[derive(Debug)] +/// Container for a request to create a single monitored item. +pub struct CreateMonitoredItem { + id: u32, + subscription_id: u32, + item_to_monitor: ParsedReadValueId, + monitoring_mode: MonitoringMode, + client_handle: u32, + discard_oldest: bool, + queue_size: usize, + sampling_interval: f64, + initial_value: Option, + status_code: StatusCode, + filter: FilterType, + filter_res: Option, + timestamps_to_return: TimestampsToReturn, +} + +/// Takes the requested sampling interval value supplied by client and ensures it is within +/// the range supported by the server +fn sanitize_sampling_interval(info: &ServerInfo, requested_sampling_interval: f64) -> f64 { + if requested_sampling_interval < 0.0 { + // From spec "any negative number is interpreted as -1" + // -1 means monitored item's sampling interval defaults to the subscription's publishing interval + -1.0 + } else if requested_sampling_interval == 0.0 + || requested_sampling_interval < info.config.limits.subscriptions.min_sampling_interval_ms + { + info.config.limits.subscriptions.min_sampling_interval_ms + } else { + requested_sampling_interval + } +} + +/// Takes the requested queue size and ensures it is within the range supported by the server +fn sanitize_queue_size(info: &ServerInfo, requested_queue_size: usize) -> usize { + if requested_queue_size == 0 || requested_queue_size == 1 { + // For data monitored items 0 -> 1 + // Future - for event monitored items, queue size should be the default queue size for event notifications + 1 + // Future - for event monitored items, the minimum queue size the server requires for event notifications + } else if requested_queue_size + > info + .config + .limits + .subscriptions + .max_monitored_item_queue_size + { + info.config + .limits + .subscriptions + .max_monitored_item_queue_size + // Future - for event monitored items MaxUInt32 returns the maximum queue size the server support + // for event notifications + } else { + requested_queue_size + } +} + +impl CreateMonitoredItem { + pub(crate) fn new( + req: MonitoredItemCreateRequest, + id: u32, + sub_id: u32, + info: &ServerInfo, + timestamps_to_return: TimestampsToReturn, + type_tree: &TypeTree, + ) -> Self { + let (filter_res, filter) = FilterType::from_filter( + &req.requested_parameters.filter, + &info.decoding_options(), + type_tree, + ); + let sampling_interval = + sanitize_sampling_interval(info, req.requested_parameters.sampling_interval); + let queue_size = sanitize_queue_size(info, req.requested_parameters.queue_size as usize); + + let (filter, mut status) = match filter { + Ok(s) => (s, StatusCode::BadNodeIdUnknown), + Err(e) => (FilterType::None, e), + }; + + let item_to_monitor = match ParsedReadValueId::parse(req.item_to_monitor) { + Ok(r) => r, + Err(e) => { + status = e; + ParsedReadValueId::null() + } + }; + + Self { + id, + subscription_id: sub_id, + item_to_monitor, + monitoring_mode: req.monitoring_mode, + client_handle: req.requested_parameters.client_handle, + discard_oldest: req.requested_parameters.discard_oldest, + queue_size, + sampling_interval, + initial_value: None, + status_code: status, + filter, + timestamps_to_return, + filter_res, + } + } + + /// Get the monitored item handle of this create request. + pub fn handle(&self) -> MonitoredItemHandle { + MonitoredItemHandle { + monitored_item_id: self.id, + subscription_id: self.subscription_id, + } + } + + /// Set the initial value of the monitored item. + pub fn set_initial_value(&mut self, value: DataValue) { + self.initial_value = Some(value); + } + + /// Set the status of the monitored item create request. + /// If this is an error after all node managers have been evulated, the + /// monitored item will not be created on the server. + /// + /// Note: Only consider a monitored item to be created if this is set to a + /// `Good` status code. + pub fn set_status(&mut self, status: StatusCode) { + self.status_code = status; + } + + /// Attribute to monitor. + pub fn item_to_monitor(&self) -> &ParsedReadValueId { + &self.item_to_monitor + } + + /// Requested monitoring mode. + pub fn monitoring_mode(&self) -> MonitoringMode { + self.monitoring_mode + } + + /// Requested sampling interval in milliseconds. + pub fn sampling_interval(&self) -> f64 { + self.sampling_interval + } + + /// Requested queue size. + pub fn queue_size(&self) -> usize { + self.queue_size + } + + /// Requested filter type. + pub fn filter(&self) -> &FilterType { + &self.filter + } + + /// Revise the queue size, setting it equal to the given `queue_size` if it is smaller + /// or if the requested queue size is 0. + pub fn revise_queue_size(&mut self, queue_size: usize) { + if queue_size < self.queue_size && queue_size > 0 || self.queue_size == 0 { + self.queue_size = queue_size; + } + } + + /// Revise the sampling interval, settign it equal to the given `sampling_interval` if + /// it is larger. + pub fn revise_sampling_interval(&mut self, sampling_interval: f64) { + if sampling_interval < self.sampling_interval && sampling_interval > 0.0 + || self.sampling_interval == 0.0 + { + self.sampling_interval = sampling_interval; + } + } + + /// Requested timestamps to return. + pub fn timestamps_to_return(&self) -> TimestampsToReturn { + self.timestamps_to_return + } + + /// Get the current result status code. + pub fn status_code(&self) -> StatusCode { + self.status_code + } + + pub(crate) fn filter_res(&self) -> Option<&EventFilterResult> { + self.filter_res.as_ref() + } +} + +#[derive(Debug)] +/// State of an active monitored item on the server. +pub struct MonitoredItem { + id: u32, + item_to_monitor: ParsedReadValueId, monitoring_mode: MonitoringMode, // Triggered items are other monitored items in the same subscription which are reported if this // monitored item changes. triggered_items: BTreeSet, client_handle: u32, - sampling_interval: Duration, + sampling_interval: f64, filter: FilterType, discard_oldest: bool, queue_size: usize, - /// The notification queue is arranged from oldest to newest, i.e. pop front gets the oldest - /// message, pop back gets the most recent. notification_queue: VecDeque, queue_overflow: bool, timestamps_to_return: TimestampsToReturn, - last_sample_time: DateTimeUtc, last_data_value: Option, -} - -#[derive(Debug, Clone, PartialEq)] -pub(crate) enum TickResult { - /// The value changed and it should be reported - ReportValueChanged, - /// The value changed and it should not be reported (sampling) - ValueChanged, - /// The value did not change - NoChange, + any_new_notification: bool, } impl MonitoredItem { - pub fn new( - now: &DateTimeUtc, - monitored_item_id: u32, - timestamps_to_return: TimestampsToReturn, - server_state: &ServerState, - request: &MonitoredItemCreateRequest, - ) -> Result { - let filter = FilterType::from_filter( - &request.requested_parameters.filter, - &server_state.decoding_options(), - )?; - let sampling_interval = Self::sanitize_sampling_interval( - server_state, - request.requested_parameters.sampling_interval, - ); - let queue_size = Self::sanitize_queue_size( - server_state, - request.requested_parameters.queue_size as usize, - ); - Ok(MonitoredItem { - monitored_item_id, + pub(super) fn new(request: &CreateMonitoredItem) -> Self { + let mut v = Self { + id: request.id, item_to_monitor: request.item_to_monitor.clone(), monitoring_mode: request.monitoring_mode, triggered_items: BTreeSet::new(), - client_handle: request.requested_parameters.client_handle, - sampling_interval, - filter, - discard_oldest: request.requested_parameters.discard_oldest, - timestamps_to_return, - last_sample_time: *now, + client_handle: request.client_handle, + sampling_interval: request.sampling_interval, + filter: request.filter.clone(), + discard_oldest: request.discard_oldest, + timestamps_to_return: request.timestamps_to_return, last_data_value: None, - queue_size, - notification_queue: VecDeque::with_capacity(queue_size), + queue_size: request.queue_size, + notification_queue: VecDeque::new(), queue_overflow: false, - }) + any_new_notification: false, + }; + if let Some(val) = request.initial_value.as_ref() { + v.notify_data_value(val.clone()); + } else { + let now = DateTime::now(); + v.notify_data_value(DataValue { + value: Some(Variant::Empty), + status: Some(StatusCode::BadWaitingForInitialData), + source_timestamp: Some(now), + source_picoseconds: None, + server_timestamp: Some(now), + server_picoseconds: None, + }); + } + v } /// Modifies the existing item with the values of the modify request. On success, the result /// holds the filter result. - pub fn modify( + pub(super) fn modify( &mut self, - server_state: &ServerState, - address_space: &AddressSpace, + info: &ServerInfo, timestamps_to_return: TimestampsToReturn, request: &MonitoredItemModifyRequest, - ) -> Result { + type_tree: &TypeTree, + ) -> (Option, StatusCode) { self.timestamps_to_return = timestamps_to_return; - self.filter = FilterType::from_filter( + let (filter_res, filter) = FilterType::from_filter( &request.requested_parameters.filter, - &server_state.decoding_options(), - )?; - self.sampling_interval = Self::sanitize_sampling_interval( - server_state, - request.requested_parameters.sampling_interval, - ); - self.queue_size = Self::sanitize_queue_size( - server_state, - request.requested_parameters.queue_size as usize, + &info.decoding_options(), + type_tree, ); + self.filter = match filter { + Ok(f) => f, + Err(e) => return (filter_res, e), + }; + self.sampling_interval = + sanitize_sampling_interval(info, request.requested_parameters.sampling_interval); + self.queue_size = + sanitize_queue_size(info, request.requested_parameters.queue_size as usize); self.client_handle = request.requested_parameters.client_handle; self.discard_oldest = request.requested_parameters.discard_oldest; // Shrink / grow the notification queue to the new threshold if self.notification_queue.len() > self.queue_size { // Discard old notifications - let discard = self.queue_size - self.notification_queue.len(); - let _ = self.notification_queue.drain(0..discard); - // TODO potential edge case with discard oldest behaviour + let discard = self.notification_queue.len() - self.queue_size; + for _ in 0..discard { + if self.discard_oldest { + let _ = self.notification_queue.pop_back(); + } else { + let _ = self.notification_queue.pop_front(); + } + } // Shrink the queue self.notification_queue.shrink_to_fit(); - } else if self.notification_queue.capacity() < self.queue_size { - // Reserve space for more elements - let extra_capacity = self.queue_size - self.notification_queue.capacity(); - self.notification_queue.reserve(extra_capacity); } - // Validate the filter, return that from this function - self.validate_filter(address_space) + (filter_res, StatusCode::Good) } - /// Adds or removes other monitored items which will be triggered when this monitored item changes - pub fn set_triggering(&mut self, items_to_add: &[u32], items_to_remove: &[u32]) { - // Spec says to process remove items before adding new ones. - items_to_remove.iter().for_each(|i| { - self.triggered_items.remove(i); - }); - items_to_add.iter().for_each(|i| { - self.triggered_items.insert(*i); - }); - } - - /// Validates the filter associated with the monitored item and returns the filter result - /// encoded in an extension object. - pub fn validate_filter( - &self, - address_space: &AddressSpace, - ) -> Result { - // Event filter must be validated - let filter_result = if let FilterType::EventFilter(ref event_filter) = self.filter { - let filter_result = event_filter::validate(event_filter, address_space)?; - ExtensionObject::from_encodable( - ObjectId::EventFilterResult_Encoding_DefaultBinary, - &filter_result, - ) - } else { - // DataChangeFilter has no result - ExtensionObject::null() + fn filter_by_sampling_interval(&self, old: &DataValue, new: &DataValue) -> bool { + let (Some(old), Some(new)) = (&old.source_timestamp, &new.source_timestamp) else { + // Always include measurements without source timestamp, we don't know enough about these, + // assume the server implementation did filtering elsewhere. + return true; }; - Ok(filter_result) + + let elapsed = new + .as_chrono() + .signed_duration_since(old.as_chrono()) + .to_std() + .unwrap(); + let sampling_interval = + std::time::Duration::from_micros((self.sampling_interval * 1000f64) as u64); + elapsed >= sampling_interval } - /// Called repeatedly on the monitored item. - /// - /// If the monitored item has a negative interval and subscription interval has elapsed, - /// the value is tested immediately. Otherwise, the monitored items sampling interval is enforced - /// the subscriptions and controls the rate. - /// - /// Function returns a `TickResult` denoting if the value changed or not, and whether it should - /// be reported. - pub fn tick( - &mut self, - now: &DateTimeUtc, - address_space: &AddressSpace, - publishing_interval_elapsed: bool, - resend_data: bool, - ) -> TickResult { + pub(super) fn notify_data_value(&mut self, mut value: DataValue) -> bool { if self.monitoring_mode == MonitoringMode::Disabled { - TickResult::NoChange - } else { - let check_value = if resend_data { - // Always check for resend_data flag - true - } else if self.sampling_interval < 0f64 { - // -1 means use the subscription publishing interval so if the publishing interval elapsed, - // then this monitored item is evaluated otherwise it won't be. - publishing_interval_elapsed - } else if self.sampling_interval == 0f64 { - // 0 means fastest practical rate, i.e. the tick quantum itself - // 0 is also used for clients subscribing for events. - true - } else { - // Compare sample interval to the time elapsed - let sampling_interval = super::duration_from_ms(self.sampling_interval); - let elapsed = now - .signed_duration_since(self.last_sample_time) - .to_std() - .unwrap(); - elapsed >= sampling_interval - }; - - // Test the value (or don't) - let value_changed = check_value && { - // Indicate a change if reporting is enabled - let first_tick = !self.is_event_filter() && self.last_data_value.is_none(); - let value_changed = self.check_value(address_space, now, resend_data); - first_tick || value_changed || !self.notification_queue.is_empty() - }; + return false; + } - if value_changed { - if self.monitoring_mode == MonitoringMode::Reporting { - TickResult::ReportValueChanged - } else { - TickResult::ValueChanged + if !matches!(self.item_to_monitor.index_range, NumericRange::None) { + if let Some(v) = value.value { + match v.range_of(self.item_to_monitor.index_range.clone()) { + Ok(r) => value.value = Some(r), + Err(e) => { + value.status = Some(e); + value.value = Some(Variant::Empty); + } } - } else { - TickResult::NoChange } } - } - /// Gets the event notifier bits for a node, or empty if there are no bits - fn get_event_notifier(node: &dyn Node) -> EventNotifier { - if let Some(v) = node.get_attribute( - TimestampsToReturn::Neither, - AttributeId::EventNotifier, - NumericRange::None, - &QualifiedName::null(), - ) { - if let Variant::Byte(v) = v.value.unwrap_or_else(|| 0u8.into()) { - EventNotifier::from_bits_truncate(v) - } else { - EventNotifier::empty() + let data_change = match (&self.last_data_value, &self.filter) { + (Some(last_dv), FilterType::DataChangeFilter(filter)) => { + !filter.compare(&value, last_dv, None) + && self.filter_by_sampling_interval(last_dv, &value) } - } else { - EventNotifier::empty() - } - } - - /// Check for - fn check_for_events( - &mut self, - address_space: &AddressSpace, - happened_since: &DateTimeUtc, - node: &dyn Node, - ) -> bool { - match self.filter { - FilterType::EventFilter(ref filter) => { - // Node has to allow subscribe to events - if Self::get_event_notifier(node).contains(EventNotifier::SUBSCRIBE_TO_EVENTS) { - let object_id = node.node_id(); - if let Some(events) = event_filter::evaluate( - &object_id, - filter, - address_space, - happened_since, - self.client_handle, - ) { - events - .into_iter() - .for_each(|event| self.enqueue_notification_message(event)); - true - } else { - false - } - } else { - false - } + (Some(last_dv), FilterType::None) => { + value.value != last_dv.value && self.filter_by_sampling_interval(last_dv, &value) } - _ => panic!(), - } - } + (None, _) => true, + _ => false, + }; - fn check_for_data_change( - &mut self, - _address_space: &AddressSpace, - resend_data: bool, - attribute_id: AttributeId, - node: &dyn Node, - ) -> bool { - let data_value = node.get_attribute( - TimestampsToReturn::Neither, - attribute_id, - NumericRange::None, - &QualifiedName::null(), - ); - if let Some(mut data_value) = data_value { - // Test for data change - let data_change = if resend_data { - true - } else if let Some(ref last_data_value) = self.last_data_value { - // If there is a filter on the monitored item then the filter determines - // if the value is considered to have changed, otherwise it is a straight - // equality test. - match self.filter { - FilterType::None => data_value.value != last_data_value.value, - FilterType::DataChangeFilter(ref filter) => { - !filter.compare(&data_value, last_data_value, None) - } - _ => { - // Unrecognized filter - false - } - } - } else { - // There is no previous data value so yes consider it changed - trace!( - "No last data value so item has changed, node {:?}", - self.item_to_monitor.node_id - ); - true - }; - if data_change { - trace!( - "Data change on item -, node {:?}, data_value = {:?}", - self.item_to_monitor.node_id, - data_value - ); - - // Store current data value to compare against on the next tick - self.last_data_value = Some(data_value.clone()); - - // Strip out timestamps that subscriber is not interested in - match self.timestamps_to_return { - TimestampsToReturn::Neither | TimestampsToReturn::Invalid => { - data_value.source_timestamp = None; - data_value.source_picoseconds = None; - data_value.server_timestamp = None; - data_value.server_picoseconds = None - } - TimestampsToReturn::Server => { - data_value.source_timestamp = None; - data_value.source_picoseconds = None; - } - TimestampsToReturn::Source => { - data_value.server_timestamp = None; - data_value.server_picoseconds = None - } - TimestampsToReturn::Both => { - // DO NOTHING - } - } + if !data_change { + return false; + } - // Enqueue notification message - let client_handle = self.client_handle; - self.enqueue_notification_message(MonitoredItemNotification { - client_handle, - value: data_value, - }); + self.last_data_value = Some(value.clone()); - trace!("Monitored item state = {:?}", self); - } else { - trace!( - "No data change on item, node {:?}", - self.item_to_monitor.node_id - ); + match self.timestamps_to_return { + TimestampsToReturn::Neither | TimestampsToReturn::Invalid => { + value.source_timestamp = None; + value.source_picoseconds = None; + value.server_timestamp = None; + value.server_picoseconds = None + } + TimestampsToReturn::Server => { + value.source_timestamp = None; + value.source_picoseconds = None; + } + TimestampsToReturn::Source => { + value.server_timestamp = None; + value.server_picoseconds = None + } + TimestampsToReturn::Both => { + // DO NOTHING } - data_change - } else { - false } - } - fn is_event_filter(&self) -> bool { - matches!(self.filter, FilterType::EventFilter(_)) + let client_handle = self.client_handle; + self.enqueue_notification(MonitoredItemNotification { + client_handle, + value, + }); + + true } - /// Fetches the most recent value of the monitored item from the source and compares - /// it to the last value. If the value has changed according to a filter / equality - /// check, the latest value and its timestamps will be stored in the monitored item. - /// - /// The function will return true if the value was changed, false otherwise. - pub fn check_value( - &mut self, - address_space: &AddressSpace, - now: &DateTimeUtc, - resend_data: bool, - ) -> bool { + pub(super) fn notify_event(&mut self, event: &dyn Event) -> bool { if self.monitoring_mode == MonitoringMode::Disabled { - panic!("Should not check value while monitoring mode is disabled"); + return false; } - let changed = if let Some(node) = address_space.find_node(&self.item_to_monitor.node_id) { - match AttributeId::from_u32(self.item_to_monitor.attribute_id) { - Ok(attribute_id) => { - let node = node.as_node(); - match self.filter { - FilterType::EventFilter(_) => { - // EventFilter is only relevant on the EventNotifier attribute - if attribute_id == AttributeId::EventNotifier { - let happened_since = self.last_sample_time; - self.check_for_events(address_space, &happened_since, node) - } else { - false - } - } - _ => self.check_for_data_change( - address_space, - resend_data, - attribute_id, - node, - ), - } - } - Err(_) => { - trace!( - "Item has no attribute_id {} so it hasn't changed, node {:?}", - self.item_to_monitor.attribute_id, - self.item_to_monitor.node_id - ); - false - } - } - } else { - trace!( - "Cannot find item to monitor, node {:?}", - self.item_to_monitor.node_id - ); - false + + let FilterType::EventFilter(filter) = &self.filter else { + return false; + }; + + let Some(notif) = filter.evaluate(event, self.client_handle) else { + return false; }; - self.last_sample_time = *now; - changed + + self.enqueue_notification(notif); + + true } - /// Enqueues a notification message for the monitored item - pub fn enqueue_notification_message(&mut self, notification: T) - where - T: Into, - { - // test for overflow - let overflow = if self.notification_queue.len() == self.queue_size { - trace!( - "Data change overflow, node {:?}", - self.item_to_monitor.node_id - ); - // Overflow behaviour + fn enqueue_notification(&mut self, notification: impl Into) { + self.any_new_notification = true; + let overflow = self.notification_queue.len() == self.queue_size; + if overflow { if self.discard_oldest { - // Throw away oldest item (the one at the start) to make space at the end - let _ = self.notification_queue.pop_front(); + self.notification_queue.pop_front(); } else { - // Remove the latest notification self.notification_queue.pop_back(); } - // Overflow only affects queues > 1 element - self.queue_size > 1 - } else { - false - }; + } + let mut notification = notification.into(); if overflow { - if let Notification::MonitoredItemNotification(ref mut notification) = notification { - // Set the overflow bit on the data value's status - notification.value.status = Some(notification.value.status().set_overflow(true)); + if let Notification::MonitoredItemNotification(n) = &mut notification { + n.value.status = Some(n.value.status().set_overflow(true)); } self.queue_overflow = true; } + self.notification_queue.push_back(notification); } - /// Gets the oldest notification message from the notification queue - #[cfg(test)] - pub fn oldest_notification_message(&mut self) -> Option { - if self.notification_queue.is_empty() { - None - } else { - self.queue_overflow = false; - self.notification_queue.pop_front() + pub(super) fn add_current_value_to_queue(&mut self) { + // Check if the last value is already enqueued + let last_value = self.notification_queue.get(0); + if let Some(Notification::MonitoredItemNotification(it)) = last_value { + if Some(&it.value) == self.last_data_value.as_ref() { + return; + } } + + let Some(value) = self.last_data_value.as_ref() else { + return; + }; + + self.enqueue_notification(Notification::MonitoredItemNotification( + MonitoredItemNotification { + client_handle: self.client_handle, + value: value.clone(), + }, + )); } - /// Retrieves all the notification messages from the queue, oldest to newest - pub fn all_notifications(&mut self) -> Option> { - if self.notification_queue.is_empty() { - None - } else { - // Removes all the queued notifications to the output - self.queue_overflow = false; - Some(self.notification_queue.drain(..).collect()) - } + /// Return `true` if this item has a stored last value. + pub fn has_last_value(&self) -> bool { + self.last_data_value.is_some() } - /// Takes the requested sampling interval value supplied by client and ensures it is within - /// the range supported by the server - fn sanitize_sampling_interval( - server_state: &ServerState, - requested_sampling_interval: f64, - ) -> f64 { - if requested_sampling_interval < 0.0 { - // From spec "any negative number is interpreted as -1" - // -1 means monitored item's sampling interval defaults to the subscription's publishing interval - -1.0 - } else if requested_sampling_interval == 0.0 - || requested_sampling_interval < server_state.min_sampling_interval_ms - { - server_state.min_sampling_interval_ms - } else { - requested_sampling_interval - } + /// Return `true` if this item has any new notifications. + /// Note that this clears the `any_new_notification` flag and should + /// be used with care. + pub(super) fn has_new_notifications(&mut self) -> bool { + let any_new = self.any_new_notification; + self.any_new_notification = false; + any_new } - /// Takes the requested queue size and ensures it is within the range supported by the server - fn sanitize_queue_size(server_state: &ServerState, requested_queue_size: usize) -> usize { - if requested_queue_size == 0 || requested_queue_size == 1 { - // For data monitored items 0 -> 1 - // Future - for event monitored items, queue size should be the default queue size for event notifications - 1 - // Future - for event monitored items, the minimum queue size the server requires for event notifications - } else if requested_queue_size > server_state.max_monitored_item_queue_size { - server_state.max_monitored_item_queue_size - // Future - for event monitored items MaxUInt32 returns the maximum queue size the server support - // for event notifications - } else { - requested_queue_size - } + pub(super) fn pop_notification(&mut self) -> Option { + self.notification_queue.pop_front() + } + + /// Adds or removes other monitored items which will be triggered when this monitored item changes + pub(super) fn set_triggering(&mut self, items_to_add: &[u32], items_to_remove: &[u32]) { + // Spec says to process remove items before adding new ones. + items_to_remove.iter().for_each(|i| { + self.triggered_items.remove(i); + }); + items_to_add.iter().for_each(|i| { + self.triggered_items.insert(*i); + }); } - pub fn monitored_item_id(&self) -> u32 { - self.monitored_item_id + pub(super) fn remove_dead_trigger(&mut self, id: u32) { + self.triggered_items.remove(&id); } - pub fn client_handle(&self) -> u32 { - self.client_handle + /// Whether this monitored item is currently reporting new values. + pub fn is_reporting(&self) -> bool { + matches!(self.monitoring_mode, MonitoringMode::Reporting) } - pub fn sampling_interval(&self) -> Duration { - self.sampling_interval + /// Whether this monitored item is currently storing new values. + pub fn is_sampling(&self) -> bool { + matches!( + self.monitoring_mode, + MonitoringMode::Reporting | MonitoringMode::Sampling + ) } + /// Items that are triggered by updates to this monitored item. pub fn triggered_items(&self) -> &BTreeSet { &self.triggered_items } - pub fn set_monitoring_mode(&mut self, monitoring_mode: MonitoringMode) { + /// Whether this monitored item has enqueued notifications. + pub fn has_notifications(&self) -> bool { + !self.notification_queue.is_empty() + } + + /// Monitored item ID. + pub fn id(&self) -> u32 { + self.id + } + + /// Sampling interval. + pub fn sampling_interval(&self) -> f64 { + self.sampling_interval + } + + /// Current maximum queue size. + pub fn queue_size(&self) -> usize { + self.queue_size + } + + /// Item being monitored. + pub fn item_to_monitor(&self) -> &ParsedReadValueId { + &self.item_to_monitor + } + + pub(super) fn set_monitoring_mode(&mut self, monitoring_mode: MonitoringMode) { self.monitoring_mode = monitoring_mode; } + /// Current monitoring mode. pub fn monitoring_mode(&self) -> MonitoringMode { self.monitoring_mode } - pub fn queue_size(&self) -> usize { - self.queue_size + /// Whether oldest or newest values are discarded when the queue + /// overflows. + pub fn discard_oldest(&self) -> bool { + self.discard_oldest + } +} + +#[cfg(test)] +pub(super) mod tests { + use chrono::{Duration, Utc}; + + use crate::{ + server::{node_manager::ParsedReadValueId, subscriptions::monitored_item::Notification}, + types::{ + AttributeId, DataChangeFilter, DataChangeTrigger, DataValue, DateTime, DeadbandType, + MonitoringMode, NodeId, ReadValueId, StatusCode, Variant, + }, + }; + + use super::{FilterType, MonitoredItem}; + + pub fn new_monitored_item( + id: u32, + item_to_monitor: ReadValueId, + monitoring_mode: MonitoringMode, + filter: FilterType, + sampling_interval: f64, + discard_oldest: bool, + initial_value: Option, + ) -> MonitoredItem { + let mut v = MonitoredItem { + id, + item_to_monitor: ParsedReadValueId::parse(item_to_monitor).unwrap(), + monitoring_mode, + triggered_items: Default::default(), + client_handle: Default::default(), + sampling_interval, + filter, + discard_oldest, + queue_size: 10, + notification_queue: Default::default(), + queue_overflow: false, + timestamps_to_return: crate::types::TimestampsToReturn::Both, + last_data_value: None, + any_new_notification: false, + }; + + if let Some(val) = initial_value { + v.notify_data_value(val); + } else { + let now = DateTime::now(); + v.notify_data_value(DataValue { + value: Some(Variant::Empty), + status: Some(StatusCode::BadWaitingForInitialData), + source_timestamp: Some(now), + source_picoseconds: None, + server_timestamp: Some(now), + server_picoseconds: None, + }); + } + + v + } + + #[test] + fn data_change_filter() { + let mut filter = DataChangeFilter { + trigger: DataChangeTrigger::Status, + deadband_type: DeadbandType::None as u32, + deadband_value: 0f64, + }; + + let mut v1 = DataValue { + value: None, + status: None, + source_timestamp: None, + source_picoseconds: None, + server_timestamp: None, + server_picoseconds: None, + }; + + let mut v2 = DataValue { + value: None, + status: None, + source_timestamp: None, + source_picoseconds: None, + server_timestamp: None, + server_picoseconds: None, + }; + + assert_eq!(filter.compare(&v1, &v2, None), true); + + // Change v1 status + v1.status = Some(StatusCode::Good); + assert_eq!(filter.compare(&v1, &v2, None), false); + + // Change v2 status + v2.status = Some(StatusCode::Good); + assert_eq!(filter.compare(&v1, &v2, None), true); + + // Change value - but since trigger is status, this should not matter + v1.value = Some(Variant::Boolean(true)); + assert_eq!(filter.compare(&v1, &v2, None), true); + + // Change trigger to status-value and change should matter + filter.trigger = DataChangeTrigger::StatusValue; + assert_eq!(filter.compare(&v1, &v2, None), false); + + // Now values are the same + v2.value = Some(Variant::Boolean(true)); + assert_eq!(filter.compare(&v1, &v2, None), true); + + // And for status-value-timestamp + filter.trigger = DataChangeTrigger::StatusValueTimestamp; + assert_eq!(filter.compare(&v1, &v2, None), true); + + // Change timestamps to differ + let now = DateTime::now(); + v1.server_timestamp = Some(now.clone()); + assert_eq!(filter.compare(&v1, &v2, None), false); } - #[cfg(test)] - pub fn queue_overflow(&self) -> bool { - self.queue_overflow + #[test] + fn data_change_deadband_abs() { + let filter = DataChangeFilter { + trigger: DataChangeTrigger::StatusValue, + // Abs compare + deadband_type: DeadbandType::Absolute as u32, + deadband_value: 1f64, + }; + + let v1 = DataValue { + value: Some(Variant::Double(10f64)), + status: None, + source_timestamp: None, + source_picoseconds: None, + server_timestamp: None, + server_picoseconds: None, + }; + + let mut v2 = DataValue { + value: Some(Variant::Double(10f64)), + status: None, + source_timestamp: None, + source_picoseconds: None, + server_timestamp: None, + server_picoseconds: None, + }; + + // Values are the same so deadband should not matter + assert_eq!(filter.compare(&v1, &v2, None), true); + + // Adjust by less than deadband + v2.value = Some(Variant::Double(10.9f64)); + assert_eq!(filter.compare(&v1, &v2, None), true); + + // Adjust by equal deadband + v2.value = Some(Variant::Double(11f64)); + assert_eq!(filter.compare(&v1, &v2, None), true); + + // Adjust by equal deadband plus a little bit + v2.value = Some(Variant::Double(11.00001f64)); + assert_eq!(filter.compare(&v1, &v2, None), false); + } + + // Straight tests of abs function + #[test] + fn deadband_abs() { + assert_eq!(DataChangeFilter::abs_compare(100f64, 100f64, 0f64), true); + assert_eq!(DataChangeFilter::abs_compare(100f64, 100f64, 1f64), true); + assert_eq!(DataChangeFilter::abs_compare(100f64, 101f64, 1f64), true); + assert_eq!(DataChangeFilter::abs_compare(101f64, 100f64, 1f64), true); + assert_eq!( + DataChangeFilter::abs_compare(101.001f64, 100f64, 1f64), + false + ); + assert_eq!( + DataChangeFilter::abs_compare(100f64, 101.001f64, 1f64), + false + ); + } + + // Straight tests of pct function + #[test] + fn deadband_pct() { + assert_eq!( + DataChangeFilter::pct_compare(100f64, 101f64, 0f64, 100f64, 0f64), + false + ); + assert_eq!( + DataChangeFilter::pct_compare(100f64, 101f64, 0f64, 100f64, 1f64), + true + ); + assert_eq!( + DataChangeFilter::pct_compare(100f64, 101.0001f64, 0f64, 100f64, 1f64), + false + ); + assert_eq!( + DataChangeFilter::pct_compare(101.0001f64, 100f64, 0f64, 100f64, 1f64), + false + ); + assert_eq!( + DataChangeFilter::pct_compare(101.0001f64, 100f64, 0f64, 100f64, 1.0002f64), + true + ); } - #[cfg(test)] - pub fn notification_queue(&self) -> &VecDeque { - &self.notification_queue + #[test] + fn monitored_item_filter() { + let start = Utc::now(); + let mut item = new_monitored_item( + 1, + ReadValueId { + node_id: NodeId::null(), + attribute_id: AttributeId::Value as u32, + ..Default::default() + }, + MonitoringMode::Reporting, + FilterType::DataChangeFilter(DataChangeFilter { + trigger: DataChangeTrigger::StatusValue, + // Abs compare + deadband_type: DeadbandType::Absolute as u32, + deadband_value: 0.9f64, + }), + 100.0, + true, + Some(DataValue::new_at(1.0, start.into())), + ); + + // Not within sampling interval + assert!(!item.notify_data_value(DataValue::new_at( + 2.0, + (start + Duration::try_milliseconds(50).unwrap()).into() + ))); + // In deadband + assert!(!item.notify_data_value(DataValue::new_at( + 1.5, + (start + Duration::try_milliseconds(100).unwrap()).into() + ))); + // Sampling is disabled, don't notify anything. + item.set_monitoring_mode(MonitoringMode::Disabled); + assert!(!item.notify_data_value(DataValue::new_at( + 3.0, + (start + Duration::try_milliseconds(250).unwrap()).into() + ))); + item.set_monitoring_mode(MonitoringMode::Reporting); + // Ok + assert!(item.notify_data_value(DataValue::new_at( + 2.0, + (start + Duration::try_milliseconds(100).unwrap()).into() + ))); + // Now in deadband + assert!(!item.notify_data_value(DataValue::new_at( + 2.5, + (start + Duration::try_milliseconds(200).unwrap()).into() + ))); + // And outside deadband + assert!(item.notify_data_value(DataValue::new_at( + 3.0, + (start + Duration::try_milliseconds(250).unwrap()).into() + ))); + assert_eq!(item.notification_queue.len(), 3); } - #[cfg(test)] - pub(crate) fn set_discard_oldest(&mut self, discard_oldest: bool) { - self.discard_oldest = discard_oldest; + #[test] + fn monitored_item_overflow() { + let start = Utc::now(); + let mut item = new_monitored_item( + 1, + ReadValueId { + node_id: NodeId::null(), + attribute_id: AttributeId::Value as u32, + ..Default::default() + }, + MonitoringMode::Reporting, + FilterType::None, + 100.0, + true, + Some(DataValue::new_at(0, start.into())), + ); + item.queue_size = 5; + for i in 0..4 { + assert!(item.notify_data_value(DataValue::new_at( + i as i32 + 1, + (start + Duration::try_milliseconds(100 * i + 100).unwrap()).into(), + ))); + } + assert_eq!(item.notification_queue.len(), 5); + + assert!(item.notify_data_value(DataValue::new_at( + 5, + (start + Duration::try_milliseconds(600).unwrap()).into(), + ))); + + assert_eq!(item.notification_queue.len(), 5); + let items: Vec<_> = item.notification_queue.drain(..).collect(); + for (idx, notif) in items.iter().enumerate() { + let Notification::MonitoredItemNotification(n) = notif else { + panic!("Wrong notification type"); + }; + let Some(Variant::Int32(v)) = &n.value.value else { + panic!("Wrong value type"); + }; + // Values should be 1, 2, 3, 4, 5, since the first value 0 was dropped. + assert_eq!(*v, idx as i32 + 1); + // Last status code should have the overflow flag set. + if idx == 4 { + assert_eq!(n.value.status, Some(StatusCode::Good.set_overflow(true))); + } else { + assert_eq!(n.value.status, Some(StatusCode::Good)); + } + } } } diff --git a/lib/src/server/subscriptions/session_subscriptions.rs b/lib/src/server/subscriptions/session_subscriptions.rs new file mode 100644 index 000000000..ecd2410dd --- /dev/null +++ b/lib/src/server/subscriptions/session_subscriptions.rs @@ -0,0 +1,797 @@ +use std::{ + collections::VecDeque, + sync::Arc, + time::{Duration, Instant}, +}; + +use super::{ + monitored_item::MonitoredItem, + subscription::{MonitoredItemHandle, Subscription, TickReason, TickResult}, + CreateMonitoredItem, NonAckedPublish, PendingPublish, PersistentSessionKey, +}; +use hashbrown::{HashMap, HashSet}; + +use crate::{ + server::{ + info::ServerInfo, + node_manager::{MonitoredItemRef, MonitoredItemUpdateRef, TypeTree}, + session::instance::Session, + Event, SubscriptionLimits, + }, + sync::RwLock, + types::{ + AttributeId, CreateSubscriptionRequest, CreateSubscriptionResponse, DataValue, DateTime, + DateTimeUtc, ExtensionObject, ModifySubscriptionRequest, ModifySubscriptionResponse, + MonitoredItemCreateResult, MonitoredItemModifyRequest, MonitoredItemModifyResult, + MonitoringMode, NodeId, NotificationMessage, ObjectId, PublishRequest, PublishResponse, + RepublishRequest, RepublishResponse, ResponseHeader, ServiceFault, + SetPublishingModeRequest, SetPublishingModeResponse, StatusCode, TimestampsToReturn, + }, +}; + +/// Subscriptions belonging to a single session. Note that they are technically _owned_ by +/// a user token, which means that they can be transfered to a different session. +pub struct SessionSubscriptions { + /// Identity token of the user that created the subscription, used for transfer subscriptions. + user_token: PersistentSessionKey, + /// Subscriptions associated with the session. + subscriptions: HashMap, + /// Publish request queue (requests by the client on the session) + publish_request_queue: VecDeque, + /// Notifications that have been sent but have yet to be acknowledged (retransmission queue). + retransmission_queue: VecDeque, + /// Configured limits on subscriptions. + limits: SubscriptionLimits, + + /// Static reference to the session owning this, required to cleanly handle deletion. + session: Arc>, +} + +impl SessionSubscriptions { + pub(super) fn new( + limits: SubscriptionLimits, + user_token: PersistentSessionKey, + session: Arc>, + ) -> Self { + Self { + user_token, + subscriptions: HashMap::new(), + publish_request_queue: VecDeque::new(), + retransmission_queue: VecDeque::new(), + limits, + session, + } + } + + fn max_publish_requests(&self) -> usize { + self.limits + .max_pending_publish_requests + .min(self.subscriptions.len() * self.limits.max_publish_requests_per_subscription) + .max(1) + } + + pub(super) fn is_ready_to_delete(&self) -> bool { + self.subscriptions.is_empty() && self.publish_request_queue.is_empty() + } + + pub(super) fn insert( + &mut self, + subscription: Subscription, + notifs: Vec, + ) -> Result<(), (StatusCode, Subscription, Vec)> { + if self.subscriptions.len() >= self.limits.max_subscriptions_per_session { + return Err((StatusCode::BadTooManySubscriptions, subscription, notifs)); + } + self.subscriptions.insert(subscription.id(), subscription); + for notif in notifs { + self.retransmission_queue.push_back(notif); + } + Ok(()) + } + + /// Return `true` if the session has a subscription with ID given by + /// `sub_id`. + pub fn contains(&self, sub_id: u32) -> bool { + self.subscriptions.contains_key(&sub_id) + } + + /// Return a vector of all the subscription IDs in this session. + pub fn subscription_ids(&self) -> Vec { + self.subscriptions.keys().copied().collect() + } + + pub(super) fn remove( + &mut self, + subscription_id: u32, + ) -> (Option, Vec) { + let mut notifs = Vec::new(); + let mut idx = 0; + while idx < self.retransmission_queue.len() { + if self.retransmission_queue[idx].subscription_id == subscription_id { + notifs.push(self.retransmission_queue.remove(idx).unwrap()); + } else { + idx += 1; + } + } + + (self.subscriptions.remove(&subscription_id), notifs) + } + + /// Get a mutable reference to a subscription by ID. + pub fn get_mut(&mut self, subscription_id: u32) -> Option<&mut Subscription> { + self.subscriptions.get_mut(&subscription_id) + } + + /// Get a reference to a subscription by ID. + pub fn get(&self, subscription_id: u32) -> Option<&Subscription> { + self.subscriptions.get(&subscription_id) + } + + pub(super) fn create_subscription( + &mut self, + request: &CreateSubscriptionRequest, + info: &ServerInfo, + ) -> Result { + if self.subscriptions.len() >= self.limits.max_subscriptions_per_session { + return Err(StatusCode::BadTooManySubscriptions); + } + let subscription_id = info.subscription_id_handle.next(); + + let (revised_publishing_interval, revised_max_keep_alive_count, revised_lifetime_count) = + Self::revise_subscription_values( + info, + request.requested_publishing_interval, + request.requested_max_keep_alive_count, + request.requested_lifetime_count, + ); + + let subscription = Subscription::new( + subscription_id, + request.publishing_enabled, + Duration::from_millis(revised_publishing_interval as u64), + revised_lifetime_count, + revised_max_keep_alive_count, + request.priority, + self.limits.max_queued_notifications, + self.revise_max_notifications_per_publish(request.max_notifications_per_publish), + ); + self.subscriptions.insert(subscription.id(), subscription); + Ok(CreateSubscriptionResponse { + response_header: ResponseHeader::new_good(&request.request_header), + subscription_id, + revised_publishing_interval, + revised_lifetime_count, + revised_max_keep_alive_count, + }) + } + + pub(super) fn modify_subscription( + &mut self, + request: &ModifySubscriptionRequest, + info: &ServerInfo, + ) -> Result { + let max_notifications_per_publish = + self.revise_max_notifications_per_publish(request.max_notifications_per_publish); + let Some(subscription) = self.subscriptions.get_mut(&request.subscription_id) else { + return Err(StatusCode::BadSubscriptionIdInvalid); + }; + + let (revised_publishing_interval, revised_max_keep_alive_count, revised_lifetime_count) = + Self::revise_subscription_values( + info, + request.requested_publishing_interval, + request.requested_max_keep_alive_count, + request.requested_lifetime_count, + ); + + subscription.set_publishing_interval(Duration::from_micros( + (revised_publishing_interval * 1000.0) as u64, + )); + subscription.set_max_keep_alive_counter(revised_max_keep_alive_count); + subscription.set_max_lifetime_counter(revised_lifetime_count); + subscription.set_priority(request.priority); + subscription.reset_lifetime_counter(); + subscription.reset_keep_alive_counter(); + subscription.set_max_notifications_per_publish(max_notifications_per_publish); + + Ok(ModifySubscriptionResponse { + response_header: ResponseHeader::new_good(&request.request_header), + revised_publishing_interval, + revised_lifetime_count, + revised_max_keep_alive_count, + }) + } + + pub(super) fn set_publishing_mode( + &mut self, + request: &SetPublishingModeRequest, + ) -> Result { + let Some(ids) = &request.subscription_ids else { + return Err(StatusCode::BadNothingToDo); + }; + if ids.is_empty() { + return Err(StatusCode::BadNothingToDo); + } + + let mut results = Vec::new(); + for id in ids { + results.push(match self.subscriptions.get_mut(id) { + Some(sub) => { + sub.set_publishing_enabled(request.publishing_enabled); + sub.reset_lifetime_counter(); + StatusCode::Good + } + None => StatusCode::BadSubscriptionIdInvalid, + }) + } + Ok(SetPublishingModeResponse { + response_header: ResponseHeader::new_good(&request.request_header), + results: Some(results), + diagnostic_infos: None, + }) + } + + pub(super) fn republish( + &self, + request: &RepublishRequest, + ) -> Result { + let msg = self.find_notification_message( + request.subscription_id, + request.retransmit_sequence_number, + )?; + Ok(RepublishResponse { + response_header: ResponseHeader::new_good(&request.request_header), + notification_message: msg, + }) + } + + pub(super) fn create_monitored_items( + &mut self, + subscription_id: u32, + requests: &[CreateMonitoredItem], + ) -> Result, StatusCode> { + let Some(sub) = self.subscriptions.get_mut(&subscription_id) else { + return Err(StatusCode::BadSubscriptionIdInvalid); + }; + + let mut results = Vec::with_capacity(requests.len()); + for item in requests { + let filter_result = item + .filter_res() + .map(|r| { + ExtensionObject::from_encodable( + ObjectId::EventFilterResult_Encoding_DefaultBinary, + r, + ) + }) + .unwrap_or_else(|| ExtensionObject::null()); + if item.status_code().is_good() { + let new_item = MonitoredItem::new(&item); + results.push(MonitoredItemCreateResult { + status_code: StatusCode::Good, + monitored_item_id: new_item.id(), + revised_sampling_interval: new_item.sampling_interval(), + revised_queue_size: new_item.queue_size() as u32, + filter_result, + }); + sub.insert(new_item.id(), new_item); + } else { + results.push(MonitoredItemCreateResult { + status_code: item.status_code(), + monitored_item_id: 0, + revised_sampling_interval: item.sampling_interval(), + revised_queue_size: item.queue_size() as u32, + filter_result, + }); + } + } + + Ok(results) + } + + pub(super) fn modify_monitored_items( + &mut self, + subscription_id: u32, + info: &ServerInfo, + timestamps_to_return: TimestampsToReturn, + requests: Vec, + type_tree: &TypeTree, + ) -> Result, StatusCode> { + let Some(sub) = self.subscriptions.get_mut(&subscription_id) else { + return Err(StatusCode::BadSubscriptionIdInvalid); + }; + let mut results = Vec::with_capacity(requests.len()); + for request in requests { + if let Some(item) = sub.get_mut(&request.monitored_item_id) { + let (filter_result, status) = + item.modify(info, timestamps_to_return, &request, type_tree); + let filter_result = filter_result + .map(|f| { + ExtensionObject::from_encodable( + ObjectId::EventFilterResult_Encoding_DefaultBinary, + &f, + ) + }) + .unwrap_or_else(|| ExtensionObject::null()); + + results.push(MonitoredItemUpdateRef::new( + MonitoredItemHandle { + subscription_id, + monitored_item_id: item.id(), + }, + item.item_to_monitor().node_id.clone(), + item.item_to_monitor().attribute_id, + MonitoredItemModifyResult { + status_code: status, + revised_sampling_interval: item.sampling_interval(), + revised_queue_size: item.queue_size() as u32, + filter_result, + }, + )); + } else { + results.push(MonitoredItemUpdateRef::new( + MonitoredItemHandle { + subscription_id, + monitored_item_id: request.monitored_item_id, + }, + NodeId::null(), + AttributeId::NodeId, + MonitoredItemModifyResult { + status_code: StatusCode::BadMonitoredItemIdInvalid, + revised_sampling_interval: 0.0, + revised_queue_size: 0, + filter_result: ExtensionObject::null(), + }, + )); + } + } + + Ok(results) + } + + pub(super) fn set_monitoring_mode( + &mut self, + subscription_id: u32, + monitoring_mode: MonitoringMode, + items: Vec, + ) -> Result, StatusCode> { + let Some(sub) = self.subscriptions.get_mut(&subscription_id) else { + return Err(StatusCode::BadSubscriptionIdInvalid); + }; + let mut results = Vec::with_capacity(items.len()); + for id in items { + let handle = MonitoredItemHandle { + subscription_id, + monitored_item_id: id, + }; + if let Some(item) = sub.get_mut(&id) { + results.push(( + StatusCode::Good, + MonitoredItemRef::new( + handle, + item.item_to_monitor().node_id.clone(), + item.item_to_monitor().attribute_id, + ), + )); + item.set_monitoring_mode(monitoring_mode); + } else { + results.push(( + StatusCode::BadMonitoredItemIdInvalid, + MonitoredItemRef::new(handle, NodeId::null(), AttributeId::NodeId), + )); + } + } + Ok(results) + } + + fn filter_links(links: Vec, sub: &Subscription) -> (Vec, Vec) { + let mut to_apply = Vec::with_capacity(links.len()); + let mut results = Vec::with_capacity(links.len()); + + for link in links { + if sub.contains_key(&link) { + to_apply.push(link); + results.push(StatusCode::Good); + } else { + results.push(StatusCode::BadMonitoredItemIdInvalid); + } + } + (to_apply, results) + } + + pub(super) fn set_triggering( + &mut self, + subscription_id: u32, + triggering_item_id: u32, + links_to_add: Vec, + links_to_remove: Vec, + ) -> Result<(Vec, Vec), StatusCode> { + let Some(sub) = self.subscriptions.get_mut(&subscription_id) else { + return Err(StatusCode::BadSubscriptionIdInvalid); + }; + if !sub.contains_key(&triggering_item_id) { + return Err(StatusCode::BadMonitoredItemIdInvalid); + } + + let (to_add, add_results) = Self::filter_links(links_to_add, &sub); + let (to_remove, remove_results) = Self::filter_links(links_to_remove, &sub); + + let item = sub.get_mut(&triggering_item_id).unwrap(); + + item.set_triggering(&to_add, &to_remove); + + Ok((add_results, remove_results)) + } + + pub(super) fn delete_monitored_items( + &mut self, + subscription_id: u32, + items: &[u32], + ) -> Result, StatusCode> { + let Some(sub) = self.subscriptions.get_mut(&subscription_id) else { + return Err(StatusCode::BadSubscriptionIdInvalid); + }; + let mut results = Vec::with_capacity(items.len()); + for id in items { + let handle = MonitoredItemHandle { + subscription_id, + monitored_item_id: *id, + }; + if let Some(item) = sub.remove(&id) { + results.push(( + StatusCode::Good, + MonitoredItemRef::new( + handle, + item.item_to_monitor().node_id.clone(), + item.item_to_monitor().attribute_id, + ), + )); + } else { + results.push(( + StatusCode::BadMonitoredItemIdInvalid, + MonitoredItemRef::new(handle, NodeId::null(), AttributeId::NodeId), + )) + } + } + Ok(results) + } + + pub(super) fn delete_subscriptions( + &mut self, + ids: &[u32], + ) -> Vec<(StatusCode, Vec)> { + let id_set: HashSet<_> = ids.iter().copied().collect(); + let mut result = Vec::with_capacity(ids.len()); + for id in ids { + let Some(mut sub) = self.subscriptions.remove(id) else { + result.push((StatusCode::BadSubscriptionIdInvalid, Vec::new())); + continue; + }; + + let items = sub + .drain() + .map(|item| { + MonitoredItemRef::new( + MonitoredItemHandle { + subscription_id: *id, + monitored_item_id: item.1.id(), + }, + item.1.item_to_monitor().node_id.clone(), + item.1.item_to_monitor().attribute_id, + ) + }) + .collect(); + + result.push((StatusCode::Good, items)) + } + + self.retransmission_queue + .retain(|r| !id_set.contains(&r.subscription_id)); + + result + } + + /// This function takes the requested values passed in a create / modify and returns revised + /// values that conform to the server's limits. For simplicity the return type is a tuple + fn revise_subscription_values( + info: &ServerInfo, + requested_publishing_interval: f64, + requested_max_keep_alive_count: u32, + requested_lifetime_count: u32, + ) -> (f64, u32, u32) { + let revised_publishing_interval = f64::max( + requested_publishing_interval, + info.config.limits.subscriptions.min_publishing_interval_ms, + ); + let revised_max_keep_alive_count = if requested_max_keep_alive_count + > info.config.limits.subscriptions.max_keep_alive_count + { + info.config.limits.subscriptions.max_keep_alive_count + } else if requested_max_keep_alive_count == 0 { + info.config.limits.subscriptions.default_keep_alive_count + } else { + requested_max_keep_alive_count + }; + // Lifetime count must exceed keep alive count by at least a multiple of + let min_lifetime_count = revised_max_keep_alive_count * 3; + let revised_lifetime_count = if requested_lifetime_count < min_lifetime_count { + min_lifetime_count + } else if requested_lifetime_count > info.config.limits.subscriptions.max_lifetime_count { + info.config.limits.subscriptions.max_lifetime_count + } else { + requested_lifetime_count + }; + ( + revised_publishing_interval, + revised_max_keep_alive_count, + revised_lifetime_count, + ) + } + + fn revise_max_notifications_per_publish(&self, inp: u32) -> u64 { + if self.limits.max_notifications_per_publish == 0 { + inp as u64 + } else if inp as u64 > self.limits.max_notifications_per_publish { + self.limits.max_notifications_per_publish + } else if inp == 0 { + self.limits.max_notifications_per_publish + } else { + inp as u64 + } + } + + pub(crate) fn enqueue_publish_request( + &mut self, + now: &DateTimeUtc, + now_instant: Instant, + mut request: PendingPublish, + ) { + if self.publish_request_queue.len() >= self.max_publish_requests() { + // Tick to trigger publish, maybe remove a request to make space for new one + let _ = self.tick(now, now_instant, TickReason::ReceivePublishRequest); + } + + if self.publish_request_queue.len() >= self.max_publish_requests() { + // Pop the oldest publish request from the queue and return it with an error + let req = self.publish_request_queue.pop_front().unwrap(); + // Ignore the result of this, if it fails it just means that the + // channel is disconnected. + let _ = req.response.send( + ServiceFault::new( + &req.request.request_header, + StatusCode::BadTooManyPublishRequests, + ) + .into(), + ); + } + + request.ack_results = self.process_subscription_acks(&request.request); + self.publish_request_queue.push_back(request); + self.tick(now, now_instant, TickReason::ReceivePublishRequest); + } + + pub(crate) fn tick( + &mut self, + now: &DateTimeUtc, + now_instant: Instant, + tick_reason: TickReason, + ) -> Vec { + let mut to_delete = Vec::new(); + if self.subscriptions.is_empty() { + for pb in self.publish_request_queue.drain(..) { + let _ = pb.response.send( + ServiceFault::new(&pb.request.request_header, StatusCode::BadNoSubscription) + .into(), + ); + } + return to_delete; + } + + self.remove_expired_publish_requests(now_instant); + + let subscription_ids = { + // Sort subscriptions by priority + let mut subscription_priority: Vec<(u32, u8)> = self + .subscriptions + .values() + .map(|v| (v.id(), v.priority())) + .collect(); + subscription_priority.sort_by(|s1, s2| s1.1.cmp(&s2.1)); + subscription_priority.into_iter().map(|s| s.0) + }; + + let mut responses = Vec::new(); + let mut more_notifications = false; + + for sub_id in subscription_ids { + let subscription = self.subscriptions.get_mut(&sub_id).unwrap(); + let res = subscription.tick( + now, + now_instant, + tick_reason, + !self.publish_request_queue.is_empty(), + ); + // Get notifications and publish request pairs while there are any of either left. + while !self.publish_request_queue.is_empty() { + if let Some(notification_message) = subscription.take_notification() { + let publish_request = self.publish_request_queue.pop_front().unwrap(); + responses.push((publish_request, notification_message, sub_id)); + } else { + break; + } + } + // Make sure to note if there are more notifications in any subscription. + more_notifications |= subscription.more_notifications(); + + // If the subscription expired, make sure to collect any deleted monitored items. + + if matches!(res, TickResult::Expired) { + to_delete.extend(subscription.drain().map(|item| { + MonitoredItemRef::new( + MonitoredItemHandle { + subscription_id: sub_id, + monitored_item_id: item.1.id(), + }, + item.1.item_to_monitor().node_id.clone(), + item.1.item_to_monitor().attribute_id, + ) + })) + } + + if subscription.ready_to_remove() { + self.subscriptions.remove(&sub_id); + self.retransmission_queue + .retain(|f| f.subscription_id != sub_id); + } + } + + let num_responses = responses.len(); + for (idx, (publish_request, notification, subscription_id)) in + responses.into_iter().enumerate() + { + let is_last = idx == num_responses - 1; + + let available_sequence_numbers = self.available_sequence_numbers(subscription_id); + + if self.retransmission_queue.len() >= self.max_publish_requests() * 2 { + self.retransmission_queue.pop_front(); + } + self.retransmission_queue.push_back(NonAckedPublish { + message: notification.clone(), + subscription_id, + }); + + let _ = publish_request.response.send( + PublishResponse { + response_header: ResponseHeader::new_timestamped_service_result( + DateTime::from(*now), + &publish_request.request.request_header, + StatusCode::Good, + ), + subscription_id, + available_sequence_numbers, + // Only set more_notifications on the last publish response. + more_notifications: is_last && more_notifications, + notification_message: notification, + results: publish_request.ack_results, + diagnostic_infos: None, + } + .into(), + ); + } + + to_delete + } + + fn find_notification_message( + &self, + subscription_id: u32, + sequence_number: u32, + ) -> Result { + if !self.subscriptions.contains_key(&subscription_id) { + return Err(StatusCode::BadSubscriptionIdInvalid); + } + let Some(notification) = self.retransmission_queue.iter().find(|m| { + m.subscription_id == subscription_id && m.message.sequence_number == sequence_number + }) else { + return Err(StatusCode::BadMessageNotAvailable); + }; + Ok(notification.message.clone()) + } + + fn remove_expired_publish_requests(&mut self, now: Instant) { + let mut idx = 0; + while idx < self.publish_request_queue.len() { + if self.publish_request_queue[idx].deadline < now { + let req = self.publish_request_queue.remove(idx).unwrap(); + let _ = req.response.send( + ServiceFault::new(&req.request.request_header, StatusCode::BadTimeout).into(), + ); + } else { + idx += 1; + } + } + } + + fn process_subscription_acks(&mut self, request: &PublishRequest) -> Option> { + let acks = request.subscription_acknowledgements.as_ref()?; + if acks.is_empty() { + return None; + } + + Some( + acks.iter() + .map(|ack| { + if !self.subscriptions.contains_key(&ack.subscription_id) { + StatusCode::BadSubscriptionIdInvalid + } else if let Some((idx, _)) = + self.retransmission_queue.iter().enumerate().find(|(_, p)| { + p.subscription_id == ack.subscription_id + && p.message.sequence_number == ack.sequence_number + }) + { + // This is potentially innefficient, but this is probably fine due to two factors: + // - we need unordered removal, _and_ ordered removal, which means we need to deal + // with this anyway. + // - The queue is likely to be short, and the element to be removed is likely to be the + // first. + self.retransmission_queue.remove(idx); + StatusCode::Good + } else { + StatusCode::BadSequenceNumberUnknown + } + }) + .collect(), + ) + } + + /// Returns the array of available sequence numbers in the retransmission queue for the specified subscription + pub(super) fn available_sequence_numbers(&self, subscription_id: u32) -> Option> { + if self.retransmission_queue.is_empty() { + return None; + } + // Find the notifications matching this subscription id in the retransmission queue + let sequence_numbers: Vec = self + .retransmission_queue + .iter() + .filter(|&k| k.subscription_id == subscription_id) + .map(|k| k.message.sequence_number) + .collect(); + if sequence_numbers.is_empty() { + None + } else { + Some(sequence_numbers) + } + } + + pub(super) fn notify_data_changes(&mut self, values: Vec<(MonitoredItemHandle, DataValue)>) { + for (handle, value) in values { + let Some(sub) = self.subscriptions.get_mut(&handle.subscription_id) else { + continue; + }; + sub.notify_data_value(&handle.monitored_item_id, value); + } + } + + pub(super) fn notify_events(&mut self, events: Vec<(MonitoredItemHandle, &dyn Event)>) { + for (handle, event) in events { + let Some(sub) = self.subscriptions.get_mut(&handle.subscription_id) else { + continue; + }; + sub.notify_event(&handle.monitored_item_id, event); + } + } + + pub(super) fn user_token(&self) -> &PersistentSessionKey { + &self.user_token + } + + pub(super) fn get_monitored_item_count(&self, subscription_id: u32) -> Option { + self.subscriptions.get(&subscription_id).map(|s| s.len()) + } + + /// Get a reference to the session this subscription collection is owned by. + pub fn session(&self) -> &Arc> { + &self.session + } +} diff --git a/lib/src/server/subscriptions/subscription.rs b/lib/src/server/subscriptions/subscription.rs index d454cdfe1..f6c13a0f2 100644 --- a/lib/src/server/subscriptions/subscription.rs +++ b/lib/src/server/subscriptions/subscription.rs @@ -1,33 +1,18 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::collections::{BTreeSet, HashMap, VecDeque}; -use std::sync::Arc; - -use crate::sync::*; -use crate::types::{ - service_types::{ - MonitoredItemCreateRequest, MonitoredItemCreateResult, MonitoredItemModifyRequest, - MonitoredItemModifyResult, NotificationMessage, TimestampsToReturn, - }, - status_code::StatusCode, - *, +use std::{ + collections::{HashMap, HashSet, VecDeque}, + time::{Duration, Instant}, }; -use crate::core::handle::Handle; - -use crate::server::{ - address_space::AddressSpace, - constants, - diagnostics::ServerDiagnostics, - state::ServerState, - subscriptions::monitored_item::{MonitoredItem, Notification, TickResult}, +use crate::{ + core::handle::Handle, + server::Event, + types::{DataValue, DateTime, DateTimeUtc, NotificationMessage, StatusCode}, }; -/// The state of the subscription -#[derive(Debug, Copy, Clone, PartialEq, Serialize)] -pub(crate) enum SubscriptionState { +use super::monitored_item::{MonitoredItem, Notification}; + +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum SubscriptionState { Closed, Creating, Normal, @@ -35,15 +20,20 @@ pub(crate) enum SubscriptionState { KeepAlive, } +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct MonitoredItemHandle { + pub subscription_id: u32, + pub monitored_item_id: u32, +} + #[derive(Debug)] pub(crate) struct SubscriptionStateParams { pub notifications_available: bool, pub more_notifications: bool, pub publishing_req_queued: bool, - pub publishing_timer_expired: bool, } -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum UpdateStateAction { None, // Return a keep alive @@ -56,6 +46,13 @@ pub enum UpdateStateAction { SubscriptionExpired, } +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(super) enum TickResult { + Expired, + Enqueued, + None, +} + /// This is for debugging purposes. It allows the caller to validate the output state if required. /// /// Values correspond to state table in OPC UA Part 4 5.13.1.2 @@ -81,47 +78,17 @@ pub(crate) enum HandledState { Closed27 = 27, } -/// This is for debugging purposes. It allows the caller to validate the output state if required. #[derive(Debug)] -pub(crate) struct UpdateStateResult { - pub handled_state: HandledState, - pub update_state_action: UpdateStateAction, -} - -impl UpdateStateResult { - pub fn new( - handled_state: HandledState, - update_state_action: UpdateStateAction, - ) -> UpdateStateResult { - UpdateStateResult { - handled_state, - update_state_action, - } - } -} - -#[derive(Debug, Copy, Clone, PartialEq)] -pub(crate) enum TickReason { - ReceivePublishRequest, - TickTimerFired, -} - -#[derive(Debug, Clone, Serialize)] +/// A single subscription maintained by the server. pub struct Subscription { - /// Subscription id - subscription_id: u32, - /// Publishing interval in milliseconds + id: u32, publishing_interval: Duration, - /// The lifetime count reset value max_lifetime_counter: u32, - /// Keep alive count reset value max_keep_alive_counter: u32, - /// Relative priority of the subscription. When more than one subscriptio - /// needs to send notifications the highest priority subscription should - /// be sent first. priority: u8, - /// Map of monitored items monitored_items: HashMap, + /// Monitored items that have seen notifications. + notified_monitored_items: HashSet, /// State of the subscription state: SubscriptionState, /// A value that contains the number of consecutive publishing timer expirations without Client @@ -146,341 +113,400 @@ pub struct Subscription { /// 1 and be sequential - it that doesn't happen the server will panic because something went /// wrong somewhere. last_sequence_number: u32, - // The last monitored item id - next_monitored_item_id: u32, // The time that the subscription interval last fired - last_time_publishing_interval_elapsed: DateTimeUtc, + last_time_publishing_interval_elapsed: Instant, // Currently outstanding notifications to send - #[serde(skip)] notifications: VecDeque, - /// Server diagnostics to track creation / destruction / modification of the subscription - #[serde(skip)] - diagnostics: Arc>, - /// Stops the subscription calling diagnostics on drop - #[serde(skip)] - diagnostics_on_drop: bool, + /// Maximum number of queued notifications. + max_queued_notifications: usize, + /// Maximum number of notifications per publish. + max_notifications_per_publish: usize, } -impl Drop for Subscription { - fn drop(&mut self) { - if self.diagnostics_on_drop { - let mut diagnostics = trace_write_lock!(self.diagnostics); - diagnostics.on_destroy_subscription(self); - } - } +#[derive(Debug, Copy, Clone, PartialEq)] +pub(crate) enum TickReason { + ReceivePublishRequest, + TickTimerFired, } impl Subscription { - pub fn new( - diagnostics: Arc>, - subscription_id: u32, + pub(super) fn new( + id: u32, publishing_enabled: bool, publishing_interval: Duration, lifetime_counter: u32, keep_alive_counter: u32, priority: u8, - ) -> Subscription { - let subscription = Subscription { - subscription_id, + max_queued_notifications: usize, + max_notifications_per_publish: u64, + ) -> Self { + Self { + id, publishing_interval, - priority, - monitored_items: HashMap::with_capacity(constants::DEFAULT_MONITORED_ITEM_CAPACITY), max_lifetime_counter: lifetime_counter, max_keep_alive_counter: keep_alive_counter, + priority, + monitored_items: HashMap::new(), + notified_monitored_items: HashSet::new(), // State variables state: SubscriptionState::Creating, lifetime_counter, keep_alive_counter, first_message_sent: false, - publishing_enabled, resend_data: false, + publishing_enabled, // Counters for new items sequence_number: Handle::new(1), last_sequence_number: 0, - next_monitored_item_id: 1, - last_time_publishing_interval_elapsed: chrono::Utc::now(), - notifications: VecDeque::with_capacity(100), - diagnostics, - diagnostics_on_drop: true, - }; - { - let mut diagnostics = trace_write_lock!(subscription.diagnostics); - diagnostics.on_create_subscription(&subscription); + last_time_publishing_interval_elapsed: Instant::now(), + notifications: VecDeque::new(), + max_queued_notifications, + max_notifications_per_publish: max_notifications_per_publish as usize, } - subscription } - pub(crate) fn ready_to_remove(&self) -> bool { - self.state == SubscriptionState::Closed && self.notifications.is_empty() + /// Get the number of monitored items in this subscription. + pub fn len(&self) -> usize { + self.monitored_items.len() } - /// Creates a MonitoredItemCreateResult containing an error code - fn monitored_item_create_error(status_code: StatusCode) -> MonitoredItemCreateResult { - MonitoredItemCreateResult { - status_code, - monitored_item_id: 0, - revised_sampling_interval: 0f64, - revised_queue_size: 0, - filter_result: ExtensionObject::null(), - } + pub(super) fn get_mut(&mut self, id: &u32) -> Option<&mut MonitoredItem> { + self.monitored_items.get_mut(id) } - pub fn monitored_items_len(&self) -> usize { - self.monitored_items.len() + /// Get a reference to a monitored item managed by this subscription. + pub fn get(&self, id: &u32) -> Option<&MonitoredItem> { + self.monitored_items.get(id) } - /// Creates monitored items on the specified subscription, returning the creation results - pub fn create_monitored_items( - &mut self, - server_state: &ServerState, - address_space: &AddressSpace, - now: &DateTimeUtc, - timestamps_to_return: TimestampsToReturn, - items_to_create: &[MonitoredItemCreateRequest], - ) -> Vec { - self.reset_lifetime_counter(); - - // Add items to the subscription if they're not already in its - items_to_create - .iter() - .map(|item_to_create| { - if !address_space.node_exists(&item_to_create.item_to_monitor.node_id) { - Self::monitored_item_create_error(StatusCode::BadNodeIdUnknown) - } else { - // TODO validate the attribute id for the type of node - // TODO validate the index range for the node + /// Return whether the subscription contains the given monitored item ID. + pub fn contains_key(&self, id: &u32) -> bool { + self.monitored_items.contains_key(id) + } - // Create a monitored item, if possible - let monitored_item_id = self.next_monitored_item_id; - match MonitoredItem::new( - now, - monitored_item_id, - timestamps_to_return, - server_state, - item_to_create, - ) { - Ok(monitored_item) => { - if server_state.max_monitored_items_per_sub == 0 - || self.monitored_items.len() - <= server_state.max_monitored_items_per_sub - { - let revised_sampling_interval = monitored_item.sampling_interval(); - let revised_queue_size = monitored_item.queue_size() as u32; - // Validate the filter before registering the item - match monitored_item.validate_filter(address_space) { - Ok(filter_result) => { - // Register the item with the subscription - self.monitored_items - .insert(monitored_item_id, monitored_item); - self.next_monitored_item_id += 1; - MonitoredItemCreateResult { - status_code: StatusCode::Good, - monitored_item_id, - revised_sampling_interval, - revised_queue_size, - filter_result, - } - } - Err(status_code) => { - Self::monitored_item_create_error(status_code) - } - } - } else { - // Number of monitored items exceeds limit per sub - Self::monitored_item_create_error( - StatusCode::BadTooManyMonitoredItems, - ) - } - } - Err(status_code) => Self::monitored_item_create_error(status_code), - } - } - }) - .collect() + pub(super) fn drain<'a>(&'a mut self) -> impl Iterator + 'a { + self.monitored_items.drain() } - /// Modify the specified monitored items, returning a result for each - pub fn modify_monitored_items( - &mut self, - server_state: &ServerState, - address_space: &AddressSpace, - timestamps_to_return: TimestampsToReturn, - items_to_modify: &[MonitoredItemModifyRequest], - ) -> Vec { - self.reset_lifetime_counter(); - items_to_modify - .iter() - .map(|item_to_modify| { - match self - .monitored_items - .get_mut(&item_to_modify.monitored_item_id) - { - Some(monitored_item) => { - // Try to change the monitored item according to the modify request - let modify_result = monitored_item.modify( - server_state, - address_space, - timestamps_to_return, - item_to_modify, - ); - match modify_result { - Ok(filter_result) => MonitoredItemModifyResult { - status_code: StatusCode::Good, - revised_sampling_interval: monitored_item.sampling_interval(), - revised_queue_size: monitored_item.queue_size() as u32, - filter_result, - }, - Err(err) => MonitoredItemModifyResult { - status_code: err, - revised_sampling_interval: 0f64, - revised_queue_size: 0, - filter_result: ExtensionObject::null(), - }, - } - } - // Item does not exist - None => MonitoredItemModifyResult { - status_code: StatusCode::BadMonitoredItemIdInvalid, - revised_sampling_interval: 0f64, - revised_queue_size: 0, - filter_result: ExtensionObject::null(), - }, - } - }) - .collect() + pub(super) fn set_resend_data(&mut self) { + self.resend_data = true; } - /// Sets the monitoring mode on one monitored item - pub fn set_monitoring_mode( - &mut self, - monitored_item_id: u32, - monitoring_mode: MonitoringMode, - ) -> StatusCode { - if let Some(monitored_item) = self.monitored_items.get_mut(&monitored_item_id) { - monitored_item.set_monitoring_mode(monitoring_mode); - StatusCode::Good - } else { - StatusCode::BadMonitoredItemIdInvalid - } + pub(super) fn remove(&mut self, id: &u32) -> Option { + self.monitored_items.remove(id) } - /// Delete the specified monitored items (by item id), returning a status code for each - pub fn delete_monitored_items(&mut self, items_to_delete: &[u32]) -> Vec { - self.reset_lifetime_counter(); - items_to_delete - .iter() - .map( - |item_to_delete| match self.monitored_items.remove(item_to_delete) { - Some(_) => StatusCode::Good, - None => StatusCode::BadMonitoredItemIdInvalid, - }, - ) - .collect() + pub(super) fn insert(&mut self, id: u32, item: MonitoredItem) { + self.monitored_items.insert(id, item); + self.notified_monitored_items.insert(id); } - // Returns two vecs representing the server and client handles for each monitored item. - // Called from the GetMonitoredItems impl - pub fn get_handles(&self) -> (Vec, Vec) { - let server_handles = self - .monitored_items - .values() - .map(|i| i.monitored_item_id()) - .collect(); - let client_handles = self - .monitored_items - .values() - .map(|i| i.client_handle()) - .collect(); - (server_handles, client_handles) + /// Notify the given monitored item of a new data value. + pub fn notify_data_value(&mut self, id: &u32, value: DataValue) { + if let Some(item) = self.monitored_items.get_mut(id) { + if item.notify_data_value(value) { + self.notified_monitored_items.insert(*id); + } + } } - /// Sets the resend data flag which means the next publish request will receive the latest value - /// of every monitored item whether it has changed in this cycle or not. - pub fn set_resend_data(&mut self) { - self.resend_data = true; + /// Notify the given monitored item of a new event. + pub fn notify_event(&mut self, id: &u32, event: &dyn Event) { + if let Some(item) = self.monitored_items.get_mut(id) { + if item.notify_event(event) { + self.notified_monitored_items.insert(*id); + } + } } /// Tests if the publishing interval has elapsed since the last time this function in which case /// it returns `true` and updates its internal state. - fn test_and_set_publishing_interval_elapsed(&mut self, now: &DateTimeUtc) -> bool { + fn test_and_set_publishing_interval_elapsed(&mut self, now: Instant) -> bool { // Look at the last expiration time compared to now and see if it matches // or exceeds the publishing interval - let publishing_interval = super::duration_from_ms(self.publishing_interval); - // TODO unwrap logic needs to change - let elapsed = now - .signed_duration_since(self.last_time_publishing_interval_elapsed) - .to_std() - .unwrap(); - if elapsed >= publishing_interval { - self.last_time_publishing_interval_elapsed = *now; + let elapsed = now - self.last_time_publishing_interval_elapsed; + if elapsed >= self.publishing_interval { + self.last_time_publishing_interval_elapsed = now; true } else { false } } - /// Checks the subscription and monitored items for state change, messages. Returns `true` - /// if there are zero or more notifications waiting to be processed. - pub(crate) fn tick( + fn get_state_transition( + &self, + tick_reason: TickReason, + p: SubscriptionStateParams, + ) -> HandledState { + // The full state transition table from Part 4 5.13.1. + // Note that the exact layout here is written to be as close as possible to the state transition + // table. Avoid changing it to clean it up or remove redundant checks. To make it easier to debug, + // it should be as one-to-one with the original document as possible. + match (self.state, tick_reason) { + (SubscriptionState::Creating, _) => HandledState::Create3, + (SubscriptionState::Normal, TickReason::ReceivePublishRequest) + if self.publishing_enabled || !self.publishing_enabled && !p.more_notifications => + { + HandledState::Normal4 + } + (SubscriptionState::Normal, TickReason::ReceivePublishRequest) + if self.publishing_enabled && p.more_notifications => + { + HandledState::Normal5 + } + (SubscriptionState::Normal, TickReason::TickTimerFired) + if p.publishing_req_queued + && self.publishing_enabled + && p.notifications_available => + { + HandledState::IntervalElapsed6 + } + (SubscriptionState::Normal, TickReason::TickTimerFired) + if p.publishing_req_queued + && !self.first_message_sent + && (!self.publishing_enabled + || self.publishing_enabled && !p.more_notifications) => + { + HandledState::IntervalElapsed7 + } + (SubscriptionState::Normal, TickReason::TickTimerFired) + if !p.publishing_req_queued + && (!self.first_message_sent + || self.publishing_enabled && p.notifications_available) => + { + HandledState::IntervalElapsed8 + } + (SubscriptionState::Normal, TickReason::TickTimerFired) + if self.first_message_sent + && (!self.publishing_enabled + || self.publishing_enabled && !p.more_notifications) => + { + HandledState::IntervalElapsed9 + } + (SubscriptionState::Late, TickReason::ReceivePublishRequest) + if self.publishing_enabled + && (p.notifications_available || p.more_notifications) => + { + HandledState::Late10 + } + (SubscriptionState::Late, TickReason::ReceivePublishRequest) + if !self.publishing_enabled + || self.publishing_enabled + && !p.notifications_available + && !p.more_notifications => + { + HandledState::Late11 + } + // This check is not in the spec, but without it the lifetime counter won't behave properly. + // This is probably an error in the standard. + (SubscriptionState::Late, TickReason::TickTimerFired) if self.lifetime_counter > 1 => { + HandledState::Late12 + } + (SubscriptionState::KeepAlive, TickReason::ReceivePublishRequest) => { + HandledState::KeepAlive13 + } + (SubscriptionState::KeepAlive, TickReason::TickTimerFired) + if self.publishing_enabled + && p.notifications_available + && p.publishing_req_queued => + { + HandledState::KeepAlive14 + } + (SubscriptionState::KeepAlive, TickReason::TickTimerFired) + if p.publishing_req_queued + && self.keep_alive_counter == 1 + && (!self.publishing_enabled + || self.publishing_enabled && !p.notifications_available) => + { + HandledState::KeepAlive15 + } + (SubscriptionState::KeepAlive, TickReason::TickTimerFired) + if self.keep_alive_counter > 1 + && (!self.publishing_enabled + || self.publishing_enabled && !p.notifications_available) => + { + HandledState::KeepAlive16 + } + (SubscriptionState::KeepAlive, TickReason::TickTimerFired) + if !p.publishing_req_queued + && (self.keep_alive_counter == 1 + || self.keep_alive_counter > 1 + && self.publishing_enabled + && p.notifications_available) => + { + HandledState::KeepAlive17 + } + // Late is unreachable in the next state. + ( + SubscriptionState::Normal | SubscriptionState::Late | SubscriptionState::KeepAlive, + TickReason::TickTimerFired, + ) if self.lifetime_counter <= 1 => HandledState::Closed27, + _ => HandledState::None0, + } + } + + fn handle_state_transition(&mut self, transition: HandledState) -> UpdateStateAction { + match transition { + HandledState::None0 => UpdateStateAction::None, + HandledState::Create3 => { + self.state = SubscriptionState::Normal; + self.first_message_sent = false; + UpdateStateAction::SubscriptionCreated + } + HandledState::Normal4 => { + // Publish req queued at session level. + UpdateStateAction::None + } + HandledState::Normal5 => { + self.reset_lifetime_counter(); + UpdateStateAction::ReturnNotifications + } + HandledState::IntervalElapsed6 => { + self.reset_lifetime_counter(); + self.start_publishing_timer(); + self.first_message_sent = true; + UpdateStateAction::ReturnNotifications + } + HandledState::IntervalElapsed7 => { + self.reset_lifetime_counter(); + self.start_publishing_timer(); + self.first_message_sent = true; + UpdateStateAction::ReturnKeepAlive + } + HandledState::IntervalElapsed8 => { + self.start_publishing_timer(); + self.state = SubscriptionState::Late; + UpdateStateAction::None + } + HandledState::IntervalElapsed9 => { + self.start_publishing_timer(); + self.reset_keep_alive_counter(); + self.state = SubscriptionState::KeepAlive; + UpdateStateAction::None + } + HandledState::Late10 => { + self.reset_lifetime_counter(); + self.first_message_sent = true; + self.state = SubscriptionState::Normal; + UpdateStateAction::ReturnNotifications + } + HandledState::Late11 => { + self.reset_lifetime_counter(); + self.first_message_sent = true; + self.state = SubscriptionState::KeepAlive; + UpdateStateAction::ReturnKeepAlive + } + HandledState::Late12 => { + self.start_publishing_timer(); + self.state = SubscriptionState::Late; + UpdateStateAction::None + } + HandledState::KeepAlive13 => { + // No-op, publish req enqueued at session level. + UpdateStateAction::None + } + HandledState::KeepAlive14 => { + self.reset_lifetime_counter(); + self.start_publishing_timer(); + self.first_message_sent = true; + self.state = SubscriptionState::Normal; + UpdateStateAction::ReturnNotifications + } + HandledState::KeepAlive15 => { + self.start_publishing_timer(); + self.reset_keep_alive_counter(); + UpdateStateAction::ReturnKeepAlive + } + HandledState::KeepAlive16 => { + self.start_publishing_timer(); + self.keep_alive_counter -= 1; + UpdateStateAction::None + } + HandledState::KeepAlive17 => { + self.start_publishing_timer(); + self.state = SubscriptionState::Late; + UpdateStateAction::None + } + HandledState::Closed27 => { + self.state = SubscriptionState::Closed; + UpdateStateAction::SubscriptionExpired + } + } + } + + fn notifications_available(&self, resend_data: bool) -> bool { + if !self.notified_monitored_items.is_empty() { + true + } else if resend_data { + self.monitored_items.iter().any(|it| it.1.has_last_value()) + } else { + false + } + } + + pub(super) fn tick( &mut self, now: &DateTimeUtc, - address_space: &AddressSpace, + now_instant: Instant, tick_reason: TickReason, publishing_req_queued: bool, - ) { - // Check if the publishing interval has elapsed. Only checks on the tick timer. + ) -> TickResult { let publishing_interval_elapsed = match tick_reason { TickReason::ReceivePublishRequest => false, TickReason::TickTimerFired => { if self.state == SubscriptionState::Creating { true - } else if self.publishing_interval <= 0f64 { - panic!("Publishing interval should have been revised to min interval") } else { - self.test_and_set_publishing_interval_elapsed(now) + self.test_and_set_publishing_interval_elapsed(now_instant) } } }; - // Do a tick on monitored items. Note that monitored items normally update when the interval - // elapses but they don't have to. So this is called every tick just to catch items with their - // own intervals. - - let notification = match self.state { - SubscriptionState::Closed | SubscriptionState::Creating => None, - _ => { - let resend_data = self.resend_data; - self.tick_monitored_items( - now, - address_space, - publishing_interval_elapsed, - resend_data, - ) + // We're not actually doing anything in this case. + if matches!(tick_reason, TickReason::TickTimerFired) && !publishing_interval_elapsed { + return TickResult::None; + } + // First, get the actual state transition we're in. + let transition = self.get_state_transition( + tick_reason, + SubscriptionStateParams { + notifications_available: self.notifications_available(self.resend_data), + more_notifications: self.notifications.len() > 0, + publishing_req_queued, + }, + ); + let action = self.handle_state_transition(transition); + + match action { + UpdateStateAction::None => TickResult::None, + UpdateStateAction::ReturnKeepAlive => { + let notification = NotificationMessage::keep_alive( + self.sequence_number.next(), + DateTime::from(*now), + ); + self.enqueue_notification(notification); + TickResult::Enqueued + } + UpdateStateAction::ReturnNotifications => { + let resend_data = std::mem::take(&mut self.resend_data); + let messages = self.tick_monitored_items(now, resend_data); + for msg in messages { + self.enqueue_notification(msg); + } + TickResult::Enqueued + } + UpdateStateAction::SubscriptionCreated => TickResult::None, + UpdateStateAction::SubscriptionExpired => { + debug!("Subscription status change to closed / timeout"); + self.monitored_items.clear(); + let notification = NotificationMessage::status_change( + self.sequence_number.next(), + DateTime::from(*now), + StatusCode::BadTimeout, + ); + self.enqueue_notification(notification); + TickResult::Expired } - }; - self.resend_data = false; - - let notifications_available = !self.notifications.is_empty() || notification.is_some(); - let more_notifications = self.notifications.len() > 1; - - // If items have changed or subscription interval elapsed then we may have notifications - // to send or state to update - if notifications_available || publishing_interval_elapsed || publishing_req_queued { - // Update the internal state of the subscription based on what happened - let update_state_result = self.update_state( - tick_reason, - SubscriptionStateParams { - publishing_req_queued, - notifications_available, - more_notifications, - publishing_timer_expired: publishing_interval_elapsed, - }, - ); - trace!( - "subscription tick - update_state_result = {:?}", - update_state_result - ); - self.handle_state_result(now, update_state_result, notification); } } @@ -497,650 +523,491 @@ impl Subscription { expected_sequence_number, notification.sequence_number ); } + if self.notifications.len() >= self.max_queued_notifications { + warn!("Maximum number of queued notifications exceeded, dropping oldest. Subscription ID: {}", self.id); + self.notifications.pop_front(); + } + // debug!("Enqueuing notification {:?}", notification); self.last_sequence_number = notification.sequence_number; self.notifications.push_back(notification); } - fn handle_state_result( + pub(super) fn take_notification(&mut self) -> Option { + self.notifications.pop_front() + } + + pub(super) fn more_notifications(&self) -> bool { + !self.notifications.is_empty() + } + + pub(super) fn ready_to_remove(&self) -> bool { + self.state == SubscriptionState::Closed && self.notifications.is_empty() + } + + fn handle_triggers( &mut self, now: &DateTimeUtc, - update_state_result: UpdateStateResult, - notification: Option, + triggers: Vec<(u32, u32)>, + notifications: &mut Vec, + messages: &mut Vec, ) { - // Now act on the state's action - match update_state_result.update_state_action { - UpdateStateAction::None => { - if let Some(ref notification) = notification { - // Reset the next sequence number to the discarded notification - let notification_sequence_number = notification.sequence_number; - self.sequence_number.set_next(notification_sequence_number); - debug!("Notification message nr {} was being ignored for a do-nothing, update state was {:?}", notification_sequence_number, update_state_result); + for (triggering_item, item_id) in triggers { + let Some(item) = self.monitored_items.get_mut(&item_id) else { + if let Some(item) = self.monitored_items.get_mut(&triggering_item) { + item.remove_dead_trigger(item_id); } - // Send nothing - } - UpdateStateAction::ReturnKeepAlive => { - if let Some(ref notification) = notification { - // Reset the next sequence number to the discarded notification - let notification_sequence_number = notification.sequence_number; - self.sequence_number.set_next(notification_sequence_number); - debug!("Notification message nr {} was being ignored for a keep alive, update state was {:?}", notification_sequence_number, update_state_result); - } - // Send a keep alive - debug!("Sending keep alive response"); - let notification = NotificationMessage::keep_alive( - self.sequence_number.next(), - DateTime::from(*now), - ); - self.enqueue_notification(notification); - } - UpdateStateAction::ReturnNotifications => { - // Add the notification message to the queue - if let Some(notification) = notification { - self.enqueue_notification(notification); - } - } - UpdateStateAction::SubscriptionCreated => { - if notification.is_some() { - panic!("SubscriptionCreated got a notification"); - } - // Subscription was created successfully - // let notification = NotificationMessage::status_change(self.sequence_number.next(), DateTime::from(now.clone()), StatusCode::Good); - // self.enqueue_notification(notification); - } - UpdateStateAction::SubscriptionExpired => { - if notification.is_some() { - panic!("SubscriptionExpired got a notification"); + continue; + }; + + while let Some(notif) = item.pop_notification() { + notifications.push(notif); + if notifications.len() >= self.max_notifications_per_publish + && self.max_notifications_per_publish > 0 + { + messages.push(Self::make_notification_message( + self.sequence_number.next(), + std::mem::take(notifications), + now, + )); } - // Delete the monitored items, issue a status change for the subscription - debug!("Subscription status change to closed / timeout"); - self.monitored_items.clear(); - let notification = NotificationMessage::status_change( - self.sequence_number.next(), - DateTime::from(*now), - StatusCode::BadTimeout, - ); - self.enqueue_notification(notification); } } } - pub(crate) fn take_notification(&mut self) -> Option { - self.notifications.pop_front() - } - - // See OPC UA Part 4 5.13.1.2 State Table - // - // This function implements the main guts of updating the subscription's state according to - // some input events and its existing internal state. - // - // Calls to the function will update the internal state of and return a tuple with any required - // actions. - // - // Note that some state events are handled outside of update_state. e.g. the subscription - // is created elsewhere which handles states 1, 2 and 3. - // - // Inputs: - // - // * publish_request - an optional publish request. May be used by subscription to remove acknowledged notifications - // * publishing_interval_elapsed - true if the publishing interval has elapsed - // - // Returns in order: - // - // * State id that handled this call. Useful for debugging which state handler triggered - // * Update state action - none, return notifications, return keep alive - // * Publishing request action - nothing, dequeue - // - pub(crate) fn update_state( - &mut self, - tick_reason: TickReason, - p: SubscriptionStateParams, - ) -> UpdateStateResult { - // This function is called when a publish request is received OR the timer expired, so getting - // both is invalid code somewhere - if tick_reason == TickReason::ReceivePublishRequest && p.publishing_timer_expired { - panic!("Should not be possible for timer to have expired and received publish request at same time") - } - - // Extra state debugging - { - use log::Level::Trace; - if log_enabled!(Trace) { - trace!( - r#"State inputs: - subscription_id: {} / state: {:?} - tick_reason: {:?} / state_params: {:?} - publishing_enabled: {} - keep_alive_counter / lifetime_counter: {} / {} - message_sent: {}"#, - self.subscription_id, - self.state, - tick_reason, - p, - self.publishing_enabled, - self.keep_alive_counter, - self.lifetime_counter, - self.first_message_sent - ); + fn make_notification_message( + next_sequence_number: u32, + notifications: Vec, + now: &DateTimeUtc, + ) -> NotificationMessage { + let mut data_change_notifications = Vec::new(); + let mut event_notifications = Vec::new(); + + for notif in notifications { + match notif { + Notification::MonitoredItemNotification(n) => data_change_notifications.push(n), + Notification::Event(n) => event_notifications.push(n), } } - // This is a state engine derived from OPC UA Part 4 Publish service and might look a - // little odd for that. - // - // Note in some cases, some of the actions have already happened outside of this function. - // For example, publish requests are already queued before we come in here and this function - // uses what its given. Likewise, this function does not "send" notifications, rather - // it returns them (if any) and it is up to the caller to send them - - // more state tests that match on more than one state - match self.state { - SubscriptionState::Normal | SubscriptionState::Late | SubscriptionState::KeepAlive => { - if self.lifetime_counter == 1 { - // State #27 - self.state = SubscriptionState::Closed; - return UpdateStateResult::new( - HandledState::Closed27, - UpdateStateAction::SubscriptionExpired, - ); - } - } - _ => { - // DO NOTHING - } + NotificationMessage::data_change( + next_sequence_number, + DateTime::from(*now), + data_change_notifications, + event_notifications, + ) + } + + fn tick_monitored_item( + monitored_item: &mut MonitoredItem, + now: &DateTimeUtc, + resend_data: bool, + max_notifications: usize, + triggers: &mut Vec<(u32, u32)>, + notifications: &mut Vec, + messages: &mut Vec, + sequence_numbers: &mut Handle, + ) { + if monitored_item.is_sampling() && monitored_item.has_new_notifications() { + triggers.extend( + monitored_item + .triggered_items() + .iter() + .copied() + .map(|id| (monitored_item.id(), id)), + ); } - match self.state { - SubscriptionState::Creating => { - // State #2 - // CreateSubscription fails, return negative response - // Handled in message handler - // State #3 - self.state = SubscriptionState::Normal; - self.first_message_sent = false; - return UpdateStateResult::new( - HandledState::Create3, - UpdateStateAction::SubscriptionCreated, - ); - } - SubscriptionState::Normal => { - if tick_reason == TickReason::ReceivePublishRequest - && (!self.publishing_enabled - || (self.publishing_enabled && !p.more_notifications)) - { - // State #4 - return UpdateStateResult::new(HandledState::Normal4, UpdateStateAction::None); - } else if tick_reason == TickReason::ReceivePublishRequest - && self.publishing_enabled - && p.more_notifications - { - // State #5 - self.reset_lifetime_counter(); - self.first_message_sent = true; - return UpdateStateResult::new( - HandledState::Normal5, - UpdateStateAction::ReturnNotifications, - ); - } else if p.publishing_timer_expired - && p.publishing_req_queued - && self.publishing_enabled - && p.notifications_available - { - // State #6 - self.reset_lifetime_counter(); - self.start_publishing_timer(); - self.first_message_sent = true; - return UpdateStateResult::new( - HandledState::IntervalElapsed6, - UpdateStateAction::ReturnNotifications, - ); - } else if p.publishing_timer_expired - && p.publishing_req_queued - && !self.first_message_sent - && (!self.publishing_enabled - || (self.publishing_enabled && !p.notifications_available)) - { - // State #7 - self.reset_lifetime_counter(); - self.start_publishing_timer(); - self.first_message_sent = true; - return UpdateStateResult::new( - HandledState::IntervalElapsed7, - UpdateStateAction::ReturnKeepAlive, - ); - } else if p.publishing_timer_expired - && !p.publishing_req_queued - && (!self.first_message_sent - || (self.publishing_enabled && p.notifications_available)) - { - // State #8 - self.start_publishing_timer(); - self.state = SubscriptionState::Late; - return UpdateStateResult::new( - HandledState::IntervalElapsed8, - UpdateStateAction::None, - ); - } else if p.publishing_timer_expired - && self.first_message_sent - && (!self.publishing_enabled - || (self.publishing_enabled && !p.notifications_available)) - { - // State #9 - self.start_publishing_timer(); - self.reset_keep_alive_counter(); - self.state = SubscriptionState::KeepAlive; - return UpdateStateResult::new( - HandledState::IntervalElapsed9, - UpdateStateAction::None, - ); - } + if monitored_item.is_reporting() { + if resend_data { + monitored_item.add_current_value_to_queue(); } - SubscriptionState::Late => { - if tick_reason == TickReason::ReceivePublishRequest - && self.publishing_enabled - && (p.notifications_available || p.more_notifications) - { - // State #10 - self.reset_lifetime_counter(); - self.state = SubscriptionState::Normal; - self.first_message_sent = true; - return UpdateStateResult::new( - HandledState::Late10, - UpdateStateAction::ReturnNotifications, - ); - } else if tick_reason == TickReason::ReceivePublishRequest - && (!self.publishing_enabled - || (self.publishing_enabled - && !p.notifications_available - && !p.more_notifications)) - { - // State #11 - self.reset_lifetime_counter(); - self.state = SubscriptionState::KeepAlive; - self.first_message_sent = true; - return UpdateStateResult::new( - HandledState::Late11, - UpdateStateAction::ReturnKeepAlive, - ); - } else if p.publishing_timer_expired { - // State #12 - self.start_publishing_timer(); - return UpdateStateResult::new(HandledState::Late12, UpdateStateAction::None); - } - } - SubscriptionState::KeepAlive => { - if tick_reason == TickReason::ReceivePublishRequest { - // State #13 - return UpdateStateResult::new( - HandledState::KeepAlive13, - UpdateStateAction::None, - ); - } else if p.publishing_timer_expired - && self.publishing_enabled - && p.notifications_available - && p.publishing_req_queued - { - // State #14 - self.first_message_sent = true; - self.state = SubscriptionState::Normal; - return UpdateStateResult::new( - HandledState::KeepAlive14, - UpdateStateAction::ReturnNotifications, - ); - } else if p.publishing_timer_expired - && p.publishing_req_queued - && self.keep_alive_counter == 1 - && (!self.publishing_enabled - || (self.publishing_enabled && p.notifications_available)) - { - // State #15 - self.start_publishing_timer(); - self.reset_keep_alive_counter(); - return UpdateStateResult::new( - HandledState::KeepAlive15, - UpdateStateAction::ReturnKeepAlive, - ); - } else if p.publishing_timer_expired - && self.keep_alive_counter > 1 - && (!self.publishing_enabled - || (self.publishing_enabled && !p.notifications_available)) - { - // State #16 - self.start_publishing_timer(); - self.keep_alive_counter -= 1; - return UpdateStateResult::new( - HandledState::KeepAlive16, - UpdateStateAction::None, - ); - } else if p.publishing_timer_expired - && !p.publishing_req_queued - && (self.keep_alive_counter == 1 - || (self.keep_alive_counter > 1 - && self.publishing_enabled - && p.notifications_available)) - { - // State #17 - self.start_publishing_timer(); - self.state = SubscriptionState::Late; - return UpdateStateResult::new( - HandledState::KeepAlive17, - UpdateStateAction::None, - ); + if monitored_item.has_notifications() { + while let Some(notif) = monitored_item.pop_notification() { + notifications.push(notif); + if notifications.len() >= max_notifications && max_notifications > 0 { + messages.push(Self::make_notification_message( + sequence_numbers.next(), + std::mem::take(notifications), + now, + )); + } } } - _ => { - // DO NOTHING - } } - - UpdateStateResult::new(HandledState::None0, UpdateStateAction::None) } - /// Iterate through the monitored items belonging to the subscription, calling tick on each in turn. - /// - /// Items that are in a reporting state, or triggered to report will be have their pending notifications - /// collected together when the publish interval elapsed flag is `true`. - /// - /// The function returns a `notifications` and a `more_notifications` boolean to indicate if the notifications - /// are available. fn tick_monitored_items( &mut self, now: &DateTimeUtc, - address_space: &AddressSpace, - publishing_interval_elapsed: bool, resend_data: bool, - ) -> Option { - let mut triggered_items: BTreeSet = BTreeSet::new(); - let mut monitored_item_notifications = Vec::with_capacity(self.monitored_items.len() * 2); - - for monitored_item in self.monitored_items.values_mut() { - // If this returns true then the monitored item wants to report its notification - let monitoring_mode = monitored_item.monitoring_mode(); - match monitored_item.tick(now, address_space, publishing_interval_elapsed, resend_data) - { - TickResult::ReportValueChanged => { - if publishing_interval_elapsed { - // If this monitored item has triggered items, then they need to be handled - match monitoring_mode { - MonitoringMode::Reporting => { - // From triggering docs - // If the monitoring mode of the triggering item is REPORTING, then it is reported when the - // triggering item triggers the items to report. - monitored_item.triggered_items().iter().for_each(|i| { - triggered_items.insert(*i); - }) - } - _ => { - // Sampling should have gone in the other branch. Disabled shouldn't do anything. - panic!("How can there be changes to report when monitored item is in this monitoring mode {:?}", monitoring_mode); - } - } - // Take some / all of the monitored item's pending notifications - if let Some(mut item_notification_messages) = - monitored_item.all_notifications() - { - monitored_item_notifications.append(&mut item_notification_messages); - } - } - } - TickResult::ValueChanged => { - // The monitored item doesn't have changes to report but its value did change so it - // is still necessary to check its triggered items. - if publishing_interval_elapsed { - match monitoring_mode { - MonitoringMode::Sampling => { - // If the monitoring mode of the triggering item is SAMPLING, then it is not reported when the - // triggering item triggers the items to report. - monitored_item.triggered_items().iter().for_each(|i| { - triggered_items.insert(*i); - }) - } - _ => { - // Reporting should have gone in the other branch. Disabled shouldn't do anything. - panic!("How can there be a value change when the mode is not sampling?"); - } - } - } - } - TickResult::NoChange => { - // Ignore - } + ) -> Vec { + let mut notifications = Vec::new(); + let mut messages = Vec::new(); + let mut triggers = Vec::new(); + + // If resend data is true, we must visit ever monitored item + if resend_data { + for monitored_item in self.monitored_items.values_mut() { + Self::tick_monitored_item( + monitored_item, + now, + resend_data, + self.max_notifications_per_publish, + &mut triggers, + &mut notifications, + &mut messages, + &mut self.sequence_number, + ); } - } - - // Are there any triggered items to force a change on? - triggered_items.iter().for_each(|i| { - if let Some(ref mut monitored_item) = self.monitored_items.get_mut(i) { - // Check the monitoring mode of the item to report - match monitored_item.monitoring_mode() { - MonitoringMode::Sampling => { - // If the monitoring mode of the item to report is SAMPLING, then it is reported when the - // triggering item triggers the i tems to report. - // - // Call with the resend_data flag as true to force the monitored item to - monitored_item.check_value(address_space, now, true); - if let Some(mut notifications) = monitored_item.all_notifications() { - monitored_item_notifications.append(&mut notifications); - } - } - MonitoringMode::Reporting => { - // If the monitoring mode of the item to report is REPORTING, this effectively causes the - // triggering item to be ignored. All notifications of the items to report are sent after the - // publishing interval expires. - // - // DO NOTHING - } - MonitoringMode::Disabled => { - // DO NOTHING - } - } - } else { - // It is possible that a monitored item contains a triggered id which has been deleted, so silently - // ignore that case. + } else { + for item_id in self.notified_monitored_items.drain() { + let Some(monitored_item) = self.monitored_items.get_mut(&item_id) else { + continue; + }; + Self::tick_monitored_item( + monitored_item, + now, + resend_data, + self.max_notifications_per_publish, + &mut triggers, + &mut notifications, + &mut messages, + &mut self.sequence_number, + ); } - }); - - // Produce a data change notification - if !monitored_item_notifications.is_empty() { - let next_sequence_number = self.sequence_number.next(); + } - trace!( - "Create notification for subscription {}, sequence number {}", - self.subscription_id, - next_sequence_number - ); + self.handle_triggers(now, triggers, &mut notifications, &mut messages); - // Collect all datachange notifications - let data_change_notifications = monitored_item_notifications - .iter() - .filter(|v| matches!(v, Notification::MonitoredItemNotification(_))) - .map(|v| { - if let Notification::MonitoredItemNotification(v) = v { - v.clone() - } else { - panic!() - } - }) - .collect(); - - // Collect event notifications - let event_notifications = monitored_item_notifications - .iter() - .filter(|v| matches!(v, Notification::Event(_))) - .map(|v| { - if let Notification::Event(v) = v { - v.clone() - } else { - panic!() - } - }) - .collect(); - - // Make a notification - let notification = NotificationMessage::data_change( - next_sequence_number, - DateTime::from(*now), - data_change_notifications, - event_notifications, - ); - Some(notification) - } else { - None + if notifications.len() > 0 { + messages.push(Self::make_notification_message( + self.sequence_number.next(), + notifications, + now, + )); } + + messages } /// Reset the keep-alive counter to the maximum keep-alive count of the Subscription. /// The maximum keep-alive count is set by the Client when the Subscription is created /// and may be modified using the ModifySubscription Service - pub fn reset_keep_alive_counter(&mut self) { + pub(super) fn reset_keep_alive_counter(&mut self) { self.keep_alive_counter = self.max_keep_alive_counter; } /// Reset the lifetime counter to the value specified for the life time of the subscription /// in the create subscription service - pub fn reset_lifetime_counter(&mut self) { + pub(super) fn reset_lifetime_counter(&mut self) { self.lifetime_counter = self.max_lifetime_counter; } /// Start or restart the publishing timer and decrement the LifetimeCounter Variable. - pub fn start_publishing_timer(&mut self) { + pub(super) fn start_publishing_timer(&mut self) { self.lifetime_counter -= 1; trace!("Decrementing life time counter {}", self.lifetime_counter); } - pub fn subscription_id(&self) -> u32 { - self.subscription_id + /// The ID of this subscription. + pub fn id(&self) -> u32 { + self.id } - pub fn lifetime_counter(&self) -> u32 { - self.lifetime_counter - } - - #[cfg(test)] - pub(crate) fn set_current_lifetime_count(&mut self, current_lifetime_count: u32) { - self.lifetime_counter = current_lifetime_count; + /// The priority of this subscription. + pub fn priority(&self) -> u8 { + self.priority } - pub fn keep_alive_counter(&self) -> u32 { - self.keep_alive_counter + pub(super) fn set_publishing_interval(&mut self, publishing_interval: Duration) { + self.publishing_interval = publishing_interval; + self.reset_lifetime_counter(); } - #[cfg(test)] - pub(crate) fn set_keep_alive_counter(&mut self, keep_alive_counter: u32) { - self.keep_alive_counter = keep_alive_counter; + pub(super) fn set_max_lifetime_counter(&mut self, max_lifetime_counter: u32) { + self.max_lifetime_counter = max_lifetime_counter; } - #[cfg(test)] - pub(crate) fn state(&self) -> SubscriptionState { - self.state + pub(super) fn set_max_keep_alive_counter(&mut self, max_keep_alive_counter: u32) { + self.max_keep_alive_counter = max_keep_alive_counter; } - #[cfg(test)] - pub(crate) fn set_state(&mut self, state: SubscriptionState) { - self.state = state; + pub(super) fn set_priority(&mut self, priority: u8) { + self.priority = priority; } - pub fn message_sent(&self) -> bool { - self.first_message_sent + pub(super) fn set_max_notifications_per_publish(&mut self, max_notifications_per_publish: u64) { + self.max_notifications_per_publish = max_notifications_per_publish as usize; } - #[cfg(test)] - pub(crate) fn set_message_sent(&mut self, message_sent: bool) { - self.first_message_sent = message_sent; + pub(super) fn set_publishing_enabled(&mut self, publishing_enabled: bool) { + self.publishing_enabled = publishing_enabled; } + /// The publishing interval of this subscription. pub fn publishing_interval(&self) -> Duration { self.publishing_interval } - pub(crate) fn set_publishing_interval(&mut self, publishing_interval: Duration) { - self.publishing_interval = publishing_interval; - self.reset_lifetime_counter(); - } - - pub fn max_keep_alive_count(&self) -> u32 { - self.max_keep_alive_counter + /// Whether publishing is enabled on this subscription. + pub fn publishing_enabled(&self) -> bool { + self.publishing_enabled } - pub(crate) fn set_max_keep_alive_count(&mut self, max_keep_alive_count: u32) { - self.max_keep_alive_counter = max_keep_alive_count; + /// The maximum number of notification messages queued for this subscription. + pub fn max_queued_notifications(&self) -> usize { + self.max_queued_notifications } - pub fn max_lifetime_count(&self) -> u32 { - self.max_lifetime_counter - } - - pub(crate) fn set_max_lifetime_count(&mut self, max_lifetime_count: u32) { - self.max_lifetime_counter = max_lifetime_count; - } - - pub fn priority(&self) -> u8 { - self.priority + /// The maximum number of notifications per notification message for this + /// subscription. + pub fn max_notifications_per_publish(&self) -> usize { + self.max_notifications_per_publish } - pub(crate) fn set_priority(&mut self, priority: u8) { - self.priority = priority; - } - - pub(crate) fn set_publishing_enabled(&mut self, publishing_enabled: bool) { - self.publishing_enabled = publishing_enabled; - self.reset_lifetime_counter(); - } - - pub(crate) fn set_diagnostics_on_drop(&mut self, diagnostics_on_drop: bool) { - self.diagnostics_on_drop = diagnostics_on_drop; + /// The current state of the subscription. + pub fn state(&self) -> SubscriptionState { + self.state } +} - fn validate_triggered_items( - &self, - monitored_item_id: u32, - items: &[u32], - ) -> (Vec, Vec) { - // Monitored items can only trigger on other items in the subscription that exist - let is_good_monitored_item = - |i| self.monitored_items.contains_key(i) && *i != monitored_item_id; - let is_good_monitored_item_result = |i| { - if is_good_monitored_item(i) { - StatusCode::Good - } else { - StatusCode::BadMonitoredItemIdInvalid +#[cfg(test)] +mod tests { + use std::time::{Duration, Instant}; + + use chrono::Utc; + + use crate::{ + server::{ + subscriptions::monitored_item::{tests::new_monitored_item, FilterType, Notification}, + SubscriptionState, + }, + types::{ + AttributeId, DataChangeNotification, DataValue, DateTime, DateTimeUtc, DecodingOptions, + EventNotificationList, MonitoringMode, NodeId, NotificationMessage, ObjectId, + ReadValueId, StatusChangeNotification, StatusCode, Variant, + }, + }; + + use super::{Subscription, TickReason}; + + fn get_notifications(message: &NotificationMessage) -> Vec { + let mut res = Vec::new(); + for it in message.notification_data.iter().flatten() { + match it.node_id.as_object_id().unwrap() { + ObjectId::DataChangeNotification_Encoding_DefaultBinary => { + let notif = it + .decode_inner::(&DecodingOptions::test()) + .unwrap(); + for n in notif.monitored_items.into_iter().flatten() { + res.push(Notification::MonitoredItemNotification(n)); + } + } + ObjectId::EventNotificationList_Encoding_DefaultBinary => { + let notif = it + .decode_inner::(&DecodingOptions::test()) + .unwrap(); + for n in notif.events.into_iter().flatten() { + res.push(Notification::Event(n)); + } + } + _ => panic!("Wrong message type"), } + } + res + } + + fn offset(time: DateTimeUtc, time_inst: Instant, ms: u64) -> (DateTimeUtc, Instant) { + ( + time + chrono::Duration::try_milliseconds(ms as i64).unwrap(), + time_inst + Duration::from_millis(ms), + ) + } + + #[test] + fn tick() { + let mut sub = Subscription::new(1, true, Duration::from_millis(100), 100, 20, 1, 100, 1000); + let start = Instant::now(); + let start_dt = Utc::now(); + + sub.last_time_publishing_interval_elapsed = start; + + // Subscription is creating, handle the first tick. + assert_eq!(sub.state, SubscriptionState::Creating); + sub.tick(&start_dt, start, TickReason::TickTimerFired, true); + assert_eq!(sub.state, SubscriptionState::Normal); + assert!(!sub.first_message_sent); + + // Tick again before the publishing interval has elapsed, should change nothing. + sub.tick(&start_dt, start, TickReason::TickTimerFired, true); + assert_eq!(sub.state, SubscriptionState::Normal); + assert!(!sub.first_message_sent); + + // Add a monitored item + sub.insert( + 1, + new_monitored_item( + 1, + ReadValueId { + node_id: NodeId::null(), + attribute_id: AttributeId::Value as u32, + ..Default::default() + }, + MonitoringMode::Reporting, + FilterType::None, + 100.0, + false, + Some(DataValue::new_now(123)), + ), + ); + // New tick at next publishing interval should produce something + let (time, time_inst) = offset(start_dt, start, 100); + sub.tick(&time, time_inst, TickReason::TickTimerFired, true); + assert_eq!(sub.state, SubscriptionState::Normal); + assert!(sub.first_message_sent); + let notif = sub.take_notification().unwrap(); + let its = get_notifications(¬if); + assert_eq!(its.len(), 1); + let Notification::MonitoredItemNotification(m) = &its[0] else { + panic!("Wrong notification type"); }; + assert_eq!(m.value.value, Some(Variant::Int32(123))); + + // Next tick produces nothing + let (time, time_inst) = offset(start_dt, start, 200); + + sub.tick(&time, time_inst, TickReason::TickTimerFired, true); + // State transitions to keep alive due to empty publish. + assert_eq!(sub.state, SubscriptionState::KeepAlive); + assert_eq!(sub.lifetime_counter, 98); + assert!(sub.first_message_sent); + assert!(sub.take_notification().is_none()); + + // Enqueue a new notification + sub.notify_data_value( + &1, + DataValue::new_at( + 321, + DateTime::from(start_dt + chrono::Duration::try_milliseconds(300).unwrap()), + ), + ); + let (time, time_inst) = offset(start_dt, start, 300); + sub.tick(&time, time_inst, TickReason::TickTimerFired, true); + // State transitions back to normal. + assert_eq!(sub.state, SubscriptionState::Normal); + assert!(sub.first_message_sent); + assert_eq!(sub.lifetime_counter, 99); + let notif = sub.take_notification().unwrap(); + let its = get_notifications(¬if); + assert_eq!(its.len(), 1); + let Notification::MonitoredItemNotification(m) = &its[0] else { + panic!("Wrong notification type"); + }; + assert_eq!(m.value.value, Some(Variant::Int32(321))); + + for i in 0..20 { + let (time, time_inst) = offset(start_dt, start, 1000 + i * 100); + sub.tick(&time, time_inst, TickReason::TickTimerFired, true); + assert_eq!(sub.state, SubscriptionState::KeepAlive); + assert_eq!(sub.lifetime_counter, (99 - i - 1) as u32); + assert_eq!(sub.keep_alive_counter, (20 - i) as u32); + assert!(sub.take_notification().is_none()); + } + assert_eq!(sub.lifetime_counter, 79); + assert_eq!(sub.keep_alive_counter, 1); + + // Tick one more time to get a keep alive + let (time, time_inst) = offset(start_dt, start, 3000); + sub.tick(&time, time_inst, TickReason::TickTimerFired, true); + assert_eq!(sub.state, SubscriptionState::KeepAlive); + assert_eq!(sub.lifetime_counter, 78); + assert_eq!(sub.keep_alive_counter, 20); + let notif = sub.take_notification().unwrap(); + let its = get_notifications(¬if); + assert!(its.is_empty()); + + // Tick another 20 times to become late + for i in 0..19 { + let (time, time_inst) = offset(start_dt, start, 3100 + i * 100); + sub.tick(&time, time_inst, TickReason::TickTimerFired, false); + assert_eq!(sub.state, SubscriptionState::KeepAlive); + assert_eq!(sub.lifetime_counter, (78 - i - 1) as u32); + } - // Find monitored items that do or do not exist - let results: Vec = items.iter().map(is_good_monitored_item_result).collect(); - let items: Vec = items - .iter() - .filter(|i| is_good_monitored_item(i)) - .copied() - .collect(); - - (results, items) - } - - /// Sets the triggering monitored items on a subscription. This function will validate that - /// the items to add / remove actually exist and will only pass through existing monitored items - /// onto the monitored item itself. - pub(crate) fn set_triggering( - &mut self, - monitored_item_id: u32, - items_to_add: &[u32], - items_to_remove: &[u32], - ) -> Result<(Vec, Vec), StatusCode> { - // Find monitored items that do or do not exist - let (add_results, items_to_add) = - self.validate_triggered_items(monitored_item_id, items_to_add); - let (remove_results, items_to_remove) = - self.validate_triggered_items(monitored_item_id, items_to_remove); - - if let Some(ref mut monitored_item) = self.monitored_items.get_mut(&monitored_item_id) { - // Set the triggering monitored items - monitored_item.set_triggering(items_to_add.as_slice(), items_to_remove.as_slice()); - - Ok((add_results, remove_results)) - } else { - // This monitored item is unrecognized - Err(StatusCode::BadMonitoredItemIdInvalid) + // Tick another 58 times to expire + for i in 0..58 { + let (time, time_inst) = offset(start_dt, start, 5100 + i * 100); + sub.tick(&time, time_inst, TickReason::TickTimerFired, false); + assert_eq!(sub.state, SubscriptionState::Late); + assert_eq!(sub.lifetime_counter, (58 - i) as u32); + } + assert_eq!(sub.lifetime_counter, 1); + + let (time, time_inst) = offset(start_dt, start, 20000); + sub.tick(&time, time_inst, TickReason::TickTimerFired, false); + assert_eq!(sub.state, SubscriptionState::Closed); + let notif = sub.take_notification().unwrap(); + assert_eq!(1, notif.notification_data.as_ref().unwrap().len()); + let status_change = notif.notification_data.as_ref().unwrap()[0] + .decode_inner::(&DecodingOptions::test()) + .unwrap(); + assert_eq!(status_change.status, StatusCode::BadTimeout); + } + + #[test] + fn monitored_item_triggers() { + let mut sub = Subscription::new(1, true, Duration::from_millis(100), 100, 20, 1, 100, 1000); + let start = Instant::now(); + let start_dt = Utc::now(); + + sub.last_time_publishing_interval_elapsed = start; + for i in 0..4 { + sub.insert( + i + 1, + new_monitored_item( + i + 1, + ReadValueId { + node_id: NodeId::null(), + attribute_id: AttributeId::Value as u32, + ..Default::default() + }, + if i == 0 { + MonitoringMode::Reporting + } else if i == 3 { + MonitoringMode::Disabled + } else { + MonitoringMode::Sampling + }, + FilterType::None, + 100.0, + false, + Some(DataValue::new_at(0, start_dt.into())), + ), + ); + } + sub.get_mut(&1).unwrap().set_triggering(&[1, 2, 3, 4], &[]); + // Notify the two sampling items and the disabled item + let (time, time_inst) = offset(start_dt, start, 100); + sub.notify_data_value(&2, DataValue::new_at(1, time.into())); + sub.notify_data_value(&3, DataValue::new_at(1, time.into())); + sub.notify_data_value(&4, DataValue::new_at(1, time.into())); + + // Should not cause a notification + sub.tick(&time, time_inst, TickReason::TickTimerFired, true); + assert!(sub.take_notification().is_none()); + + // Notify the first item + sub.notify_data_value(&1, DataValue::new_at(1, time.into())); + let (time, time_inst) = offset(start_dt, start, 200); + sub.tick(&time, time_inst, TickReason::TickTimerFired, true); + let notif = sub.take_notification().unwrap(); + let its = get_notifications(¬if); + assert_eq!(its.len(), 6); + for it in its { + let Notification::MonitoredItemNotification(_m) = it else { + panic!("Wrong notification type"); + }; } } } diff --git a/lib/src/server/subscriptions/subscriptions.rs b/lib/src/server/subscriptions/subscriptions.rs deleted file mode 100644 index ffa704e25..000000000 --- a/lib/src/server/subscriptions/subscriptions.rs +++ /dev/null @@ -1,502 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::{ - collections::{BTreeMap, VecDeque}, - time::Duration, -}; - -use crate::types::{ - service_types::{NotificationMessage, PublishRequest, PublishResponse, ServiceFault}, - status_code::StatusCode, - *, -}; - -use crate::server::{ - address_space::types::AddressSpace, - subscriptions::{ - subscription::{Subscription, TickReason}, - PublishRequestEntry, PublishResponseEntry, - }, -}; - -/// The `Subscriptions` manages zero or more subscriptions, pairing publish requests coming from -/// the client with notifications coming from the subscriptions. Therefore the subscriptions has -/// an incoming queue of publish requests and an outgoing queue of publish responses. The transport -/// layer adds to the one and removes from the other. -/// -/// Subscriptions are processed inside `tick()` which is called periodically from a timer. Each -/// tick produces notifications which are ready to publish via a transmission queue. Once a -/// notification is published, it is held in a retransmission queue until it is acknowledged by the -/// client, or purged. -pub(crate) struct Subscriptions { - /// The publish request queue (requests by the client on the session) - publish_request_queue: VecDeque, - /// The publish response queue arranged oldest to latest - publish_response_queue: VecDeque, - // Timeout period for requests in ms - publish_request_timeout: i64, - /// Subscriptions associated with the session - subscriptions: BTreeMap, - // Notifications waiting to be sent - Value is subscription id and notification message. - transmission_queue: VecDeque<(u32, PublishRequestEntry, NotificationMessage)>, - // Notifications that have been sent but have yet to be acknowledged (retransmission queue). - // Key is (subscription_id, sequence_number). Value is notification message. - retransmission_queue: BTreeMap<(u32, u32), NotificationMessage>, -} - -#[derive(Serialize)] -pub struct Metrics { - pub subscriptions: Vec, - pub publish_request_queue_len: usize, - pub publish_response_queue_len: usize, - pub transmission_queue_len: usize, - pub retransmission_queue_len: usize, -} - -impl Subscriptions { - pub fn new(max_subscriptions: usize, publish_request_timeout: i64) -> Subscriptions { - let max_publish_requests = if max_subscriptions > 0 { - 2 * max_subscriptions - } else { - 100 - }; - Subscriptions { - publish_request_queue: VecDeque::with_capacity(max_publish_requests), - publish_response_queue: VecDeque::with_capacity(max_publish_requests), - publish_request_timeout, - subscriptions: BTreeMap::new(), - transmission_queue: VecDeque::with_capacity(max_publish_requests), - retransmission_queue: BTreeMap::new(), - } - } - - pub(crate) fn metrics(&self) -> Metrics { - // Subscriptions - let subscriptions = self - .subscriptions() - .iter() - .map(|subscription_pair| { - let mut subscription = subscription_pair.1.clone(); - subscription.set_diagnostics_on_drop(false); - subscription - }) - .collect(); - Metrics { - subscriptions, - publish_request_queue_len: self.publish_request_queue.len(), - publish_response_queue_len: self.publish_response_queue.len(), - transmission_queue_len: self.transmission_queue.len(), - retransmission_queue_len: self.retransmission_queue.len(), - } - } - - #[cfg(test)] - pub(crate) fn publish_request_queue(&mut self) -> &mut VecDeque { - &mut self.publish_request_queue - } - - #[cfg(test)] - pub(crate) fn publish_response_queue(&mut self) -> &mut VecDeque { - &mut self.publish_response_queue - } - - #[cfg(test)] - pub(crate) fn retransmission_queue( - &mut self, - ) -> &mut BTreeMap<(u32, u32), NotificationMessage> { - &mut self.retransmission_queue - } - - /// Takes the publish responses which are queued for the client and returns them to the caller, - /// or returns None if there are none to process. - pub fn take_publish_responses(&mut self) -> Option> { - if self.publish_response_queue.is_empty() { - None - } else { - // Take the publish responses from the subscriptions - let mut publish_responses = VecDeque::with_capacity(self.publish_response_queue.len()); - publish_responses.append(&mut self.publish_response_queue); - Some(publish_responses) - } - } - - /// Returns the number of maxmimum publish requests allowable for the current number of subscriptions - pub fn max_publish_requests(&self) -> usize { - // Allow for two requests per subscription - self.subscriptions.len() * 2 - } - - /// Places a new publish request onto the queue of publish requests. - /// - /// If the queue is full this call will pop the oldest and generate a service fault - /// for that before pushing the new one. - pub(crate) fn enqueue_publish_request( - &mut self, - now: &DateTimeUtc, - request_id: u32, - request: PublishRequest, - address_space: &AddressSpace, - ) -> Result<(), StatusCode> { - // Check if we have too requests waiting already - let max_publish_requests = self.max_publish_requests(); - if self.publish_request_queue.len() >= max_publish_requests { - // Tick to trigger publish, maybe remove a request to make space for new one - let _ = self.tick(now, address_space, TickReason::ReceivePublishRequest); - } - - // Enqueue request or return error - if self.publish_request_queue.len() >= max_publish_requests { - error!( - "Too many publish requests {} for capacity {}", - self.publish_request_queue.len(), - max_publish_requests - ); - Err(StatusCode::BadTooManyPublishRequests) - } else { - // Add to the front of the queue - older items are popped from the back - let results = self.process_subscription_acknowledgements(&request); - self.publish_request_queue.push_front(PublishRequestEntry { - request_id, - request, - results, - }); - // Tick to trigger publish - self.tick(now, address_space, TickReason::ReceivePublishRequest) - } - } - - /// Tests if there are no subscriptions/ - pub fn is_empty(&self) -> bool { - self.subscriptions.is_empty() - } - - /// Returns the length of subscriptions. - pub fn len(&self) -> usize { - self.subscriptions.len() - } - - /// Returns a reference to the collection holding the subscriptions. - pub fn subscriptions(&self) -> &BTreeMap { - &self.subscriptions - } - - /// Tests if the subscriptions contain the supplied subscription id. - pub fn contains(&self, subscription_id: u32) -> bool { - self.subscriptions.contains_key(&subscription_id) - } - - pub fn insert(&mut self, subscription_id: u32, subscription: Subscription) { - self.subscriptions.insert(subscription_id, subscription); - } - - pub fn remove(&mut self, subscription_id: u32) -> Option { - self.subscriptions.remove(&subscription_id) - } - - pub fn get_mut(&mut self, subscription_id: u32) -> Option<&mut Subscription> { - self.subscriptions.get_mut(&subscription_id) - } - - /// The tick causes the subscription manager to iterate through individual subscriptions calling tick - /// on each in order of priority. In each case this could generate data change notifications. Data change - /// notifications will be attached to the next available publish response and queued for sending - /// to the client. - pub(crate) fn tick( - &mut self, - now: &DateTimeUtc, - address_space: &AddressSpace, - tick_reason: TickReason, - ) -> Result<(), StatusCode> { - let subscription_ids = { - // Sort subscriptions by priority - let mut subscription_priority: Vec<(u32, u8)> = self - .subscriptions - .values() - .map(|v| (v.subscription_id(), v.priority())) - .collect(); - subscription_priority.sort_by(|s1, s2| s1.1.cmp(&s2.1)); - subscription_priority - .iter() - .map(|s| s.0) - .collect::>() - }; - - // Iterate through all subscriptions. If there is a publish request it will be used to - // acknowledge notifications and the response to return new notifications. - - // Now tick over the subscriptions - for subscription_id in subscription_ids { - let publishing_req_queued = !self.publish_request_queue.is_empty(); - let subscription = self.subscriptions.get_mut(&subscription_id).unwrap(); - - // Now tick the subscription to see if it has any notifications. If there are - // notifications then the publish response will be associated with his subscription - // and ready to go. - subscription.tick(now, address_space, tick_reason, publishing_req_queued); - - // Process any notifications - loop { - if !self.publish_request_queue.is_empty() { - if let Some(notification_message) = subscription.take_notification() { - let publish_request = self.publish_request_queue.pop_back().unwrap(); - // Consume the publish request and queue the notification onto the transmission queue - self.transmission_queue.push_front(( - subscription_id, - publish_request, - notification_message, - )); - } else { - break; - } - } else { - break; - } - } - - // Remove the subscription if it is done - if subscription.ready_to_remove() { - self.subscriptions.remove(&subscription_id); - } - } - - // Iterate through notifications from oldest to latest in the transmission making publish - // responses. - while !self.transmission_queue.is_empty() { - // Get the oldest notification to send - let (subscription_id, publish_request, notification_message) = - self.transmission_queue.pop_back().unwrap(); - - // Search the transmission queue for more notifications from this same subscription - let more_notifications = self.more_notifications(subscription_id); - - // Get a list of available sequence numbers - let available_sequence_numbers = self.available_sequence_numbers(subscription_id); - - // The notification to be sent is now put into the retransmission queue - self.retransmission_queue.insert( - (subscription_id, notification_message.sequence_number), - notification_message.clone(), - ); - - // Enqueue a publish response - let response = self.make_publish_response( - publish_request, - subscription_id, - now, - notification_message, - more_notifications, - available_sequence_numbers, - ); - self.publish_response_queue.push_back(response); - } - - // Clean up the retransmission queue - self.remove_old_unacknowledged_notifications(); - - Ok(()) - } - - /// Iterates through the existing queued publish requests and creates a timeout - /// publish response any that have expired. - pub fn expire_stale_publish_requests(&mut self, now: &DateTimeUtc) { - if self.publish_request_queue.is_empty() { - return; - } - - // Remove publish requests that have expired - let publish_request_timeout = self.publish_request_timeout; - - // Create timeout responses for each expired publish request - let mut expired_publish_responses = - VecDeque::with_capacity(self.publish_request_queue.len()); - - self.publish_request_queue.retain(|request| { - let request_header = &request.request.request_header; - let request_timestamp: DateTimeUtc = request_header.timestamp.into(); - let publish_request_timeout = Duration::from_millis(if request_header.timeout_hint > 0 && (request_header.timeout_hint as i64) < publish_request_timeout { - request_header.timeout_hint as u64 - } else { - publish_request_timeout as u64 - }); - // The request has timed out if the timestamp plus hint exceeds the input time - // TODO unwrap logic needs to change - let signed_duration_since: Duration = now.signed_duration_since(request_timestamp).to_std().unwrap(); - if signed_duration_since > publish_request_timeout { - debug!("Publish request {} has expired - timestamp = {:?}, expiration hint = {}, publish timeout = {:?}, time now = {:?}, ", request_header.request_handle, request_timestamp, request_timestamp, publish_request_timeout, now); - expired_publish_responses.push_front(PublishResponseEntry { - request_id: request.request_id, - response: ServiceFault { - response_header: ResponseHeader::new_timestamped_service_result(DateTime::now(), &request.request.request_header, StatusCode::BadTimeout), - }.into(), - }); - false - } else { - true - } - }); - // Queue responses for each expired request - self.publish_response_queue - .append(&mut expired_publish_responses); - } - - /// Deletes the acknowledged notifications, returning a list of status code for each according - /// to whether it was found or not. - /// - /// Good - deleted notification - /// BadSubscriptionIdInvalid - Subscription doesn't exist - /// BadSequenceNumberUnknown - Sequence number doesn't exist - /// - fn process_subscription_acknowledgements( - &mut self, - request: &PublishRequest, - ) -> Option> { - trace!("Processing subscription acknowledgements"); - if let Some(ref subscription_acknowledgements) = request.subscription_acknowledgements { - let results = subscription_acknowledgements.iter() - .map(|subscription_acknowledgement| { - let subscription_id = subscription_acknowledgement.subscription_id; - let sequence_number = subscription_acknowledgement.sequence_number; - // Check the subscription id exists - if self.subscriptions.contains_key(&subscription_id) { - // Clear notification by its sequence number - if self.retransmission_queue.remove(&(subscription_id, sequence_number)).is_some() { - trace!("Removing subscription {} sequence number {} from retransmission queue", subscription_id, sequence_number); - StatusCode::Good - } else { - error!("Cannot find acknowledged notification with sequence number {}", sequence_number); - StatusCode::BadSequenceNumberUnknown - } - } else { - error!("Cannot find acknowledged notification subscription id {}", subscription_id); - StatusCode::BadSubscriptionIdInvalid - } - }) - .collect(); - Some(results) - } else { - None - } - } - - /// Searches the transmission queue to see if there are more notifications for the specified - /// subscription id - fn more_notifications(&self, subscription_id: u32) -> bool { - // At least one match means more notifications - self.transmission_queue - .iter() - .any(|v| v.0 == subscription_id) - } - - /// Returns the array of available sequence numbers in the retransmission queue for the specified subscription - fn available_sequence_numbers(&self, subscription_id: u32) -> Option> { - if self.retransmission_queue.is_empty() { - None - } else { - // Find the notifications matching this subscription id in the retransmission queue - let sequence_numbers: Vec = self - .retransmission_queue - .iter() - .filter(|&(k, _)| k.0 == subscription_id) - .map(|(k, _)| k.1) - .collect(); - if sequence_numbers.is_empty() { - None - } else { - Some(sequence_numbers) - } - } - } - - fn make_publish_response( - &self, - publish_request: PublishRequestEntry, - subscription_id: u32, - now: &DateTimeUtc, - notification_message: NotificationMessage, - more_notifications: bool, - available_sequence_numbers: Option>, - ) -> PublishResponseEntry { - let now = DateTime::from(*now); - PublishResponseEntry { - request_id: publish_request.request_id, - response: PublishResponse { - response_header: ResponseHeader::new_timestamped_service_result( - now, - &publish_request.request.request_header, - StatusCode::Good, - ), - subscription_id, - available_sequence_numbers, - more_notifications, - notification_message, - results: publish_request.results, - diagnostic_infos: None, - } - .into(), - } - } - - /// Finds a notification message in the retransmission queue matching the supplied subscription id - /// and sequence number. Returns `BadSubscriptionIdInvalid` or `BadMessageNotAvailable` if a matching - /// notification is not found. - pub fn find_notification_message( - &self, - subscription_id: u32, - sequence_number: u32, - ) -> Result { - // Look for the subscription - if self.subscriptions.get(&subscription_id).is_some() { - // Look for the sequence number - if let Some(notification_message) = self - .retransmission_queue - .get(&(subscription_id, sequence_number)) - { - Ok((*notification_message).clone()) - } else { - Err(StatusCode::BadMessageNotAvailable) - } - } else { - Err(StatusCode::BadSubscriptionIdInvalid) - } - } - - fn remove_notifications(&mut self, sequence_nrs_to_remove: &[(u32, u32)]) { - sequence_nrs_to_remove.iter().for_each(|n| { - trace!( - "Removing notification for subscription {}, sequence nr {}", - n.0, - n.1 - ); - let _ = self.retransmission_queue.remove(n); - }); - } - - /// Purges notifications waiting for acknowledgement if they are stale or the max permissible - /// is exceeded. - fn remove_old_unacknowledged_notifications(&mut self) { - // Strip out notifications for subscriptions that no longer exist - let sequence_nrs_to_remove = self - .retransmission_queue - .iter() - .filter(|(k, _)| !self.subscriptions.contains_key(&k.0)) - .map(|(k, _)| *k) - .collect::>(); - self.remove_notifications(&sequence_nrs_to_remove); - - // Compare number of items in retransmission queue to max permissible and remove the older - // notifications. - let max_retransmission_queue = self.max_publish_requests() * 2; - if self.retransmission_queue.len() > max_retransmission_queue { - let remove_count = self.retransmission_queue.len() - max_retransmission_queue; - let sequence_nrs_to_remove = self - .retransmission_queue - .iter() - .take(remove_count) - .map(|(k, _)| *k) - .collect::>(); - self.remove_notifications(&sequence_nrs_to_remove); - } - } -} diff --git a/lib/src/server/tests/address_space.rs b/lib/src/server/tests/address_space.rs deleted file mode 100644 index 3d1d576ac..000000000 --- a/lib/src/server/tests/address_space.rs +++ /dev/null @@ -1,954 +0,0 @@ -use std::sync::Arc; - -use crate::server::{ - address_space::{ - references::Reference, - relative_path::{find_node_from_browse_path, find_nodes_relative_path_simple}, - EventNotifier, - }, - callbacks, - prelude::*, - tests::*, -}; - -#[test] -fn address_space() { - let address_space = AddressSpace::new(); - - let root_folder = address_space.root_folder(); - assert_eq!(root_folder.node_class(), NodeClass::Object); - let objects_folder = address_space.objects_folder(); - assert_eq!(objects_folder.node_class(), NodeClass::Object); - let types_folder = address_space.types_folder(); - assert_eq!(types_folder.node_class(), NodeClass::Object); - let views_folder = address_space.views_folder(); - assert_eq!(views_folder.node_class(), NodeClass::Object); -} - -#[test] -fn namespaces() { - // Test that namespaces are listed properly - let mut address_space = AddressSpace::new(); - - let ns = address_space.register_namespace("urn:test").unwrap(); - - assert_eq!( - address_space - .namespace_index("http://opcfoundation.org/UA/") - .unwrap(), - 0u16 - ); - assert_eq!(address_space.namespace_index("urn:test").unwrap(), ns); - // Error - assert_eq!(address_space.register_namespace(""), Err(())); - // Add new namespaces - assert_eq!(address_space.register_namespace("foo").unwrap(), 2u16); - assert_eq!(address_space.register_namespace("bar").unwrap(), 3u16); - // Test if existing namespace is found - assert_eq!(address_space.register_namespace("foo").unwrap(), 2u16); -} - -#[test] -fn find_root_folder() { - let address_space = AddressSpace::new(); - let node_type = address_space.find_node(&NodeId::new(0, 84)); - assert!(node_type.is_some()); - - let node = node_type.unwrap().as_node(); - assert_eq!(node.node_id(), NodeId::new(0, 84)); - assert_eq!(node.node_id(), ObjectId::RootFolder.into()); -} - -#[test] -fn find_objects_folder() { - let address_space = AddressSpace::new(); - let node_type = address_space.find(ObjectId::ObjectsFolder); - assert!(node_type.is_some()); -} - -#[test] -fn find_types_folder() { - let address_space = AddressSpace::new(); - let node_type = address_space.find(ObjectId::TypesFolder); - assert!(node_type.is_some()); -} - -#[test] -fn find_views_folder() { - let address_space = AddressSpace::new(); - let node_type = address_space.find(ObjectId::ViewsFolder); - assert!(node_type.is_some()); -} - -#[test] -fn find_common_nodes() { - let address_space = AddressSpace::new(); - let nodes: Vec = vec![ - ObjectId::RootFolder.into(), - ObjectId::ObjectsFolder.into(), - ObjectId::TypesFolder.into(), - ObjectId::ViewsFolder.into(), - ObjectId::DataTypesFolder.into(), - DataTypeId::BaseDataType.into(), - // Types - DataTypeId::Boolean.into(), - DataTypeId::ByteString.into(), - DataTypeId::DataValue.into(), - DataTypeId::DateTime.into(), - DataTypeId::DiagnosticInfo.into(), - DataTypeId::Enumeration.into(), - DataTypeId::ExpandedNodeId.into(), - DataTypeId::Guid.into(), - DataTypeId::LocalizedText.into(), - DataTypeId::NodeId.into(), - DataTypeId::Number.into(), - DataTypeId::QualifiedName.into(), - DataTypeId::StatusCode.into(), - DataTypeId::String.into(), - DataTypeId::Structure.into(), - DataTypeId::XmlElement.into(), - DataTypeId::Double.into(), - DataTypeId::Float.into(), - DataTypeId::Integer.into(), - DataTypeId::SByte.into(), - DataTypeId::Int16.into(), - DataTypeId::Int32.into(), - DataTypeId::Int64.into(), - DataTypeId::Byte.into(), - DataTypeId::UInt16.into(), - DataTypeId::UInt32.into(), - DataTypeId::UInt64.into(), - ObjectId::OPCBinarySchema_TypeSystem.into(), - ObjectTypeId::DataTypeSystemType.into(), - // Refs - ObjectId::ReferenceTypesFolder.into(), - ReferenceTypeId::References.into(), - ReferenceTypeId::HierarchicalReferences.into(), - ReferenceTypeId::HasChild.into(), - ReferenceTypeId::HasSubtype.into(), - ReferenceTypeId::Organizes.into(), - ReferenceTypeId::NonHierarchicalReferences.into(), - ReferenceTypeId::HasTypeDefinition.into(), - ]; - for n in nodes { - assert!(address_space.find_node(&n).is_some()); - } -} - -#[test] -fn object_attributes() { - let on = NodeId::new(1, "o1"); - let o = Object::new(&on, "Browse01", "Display01", EventNotifier::empty()); - assert_eq!(o.node_class(), NodeClass::Object); - assert_eq!(o.node_id(), on); - assert_eq!(o.browse_name(), QualifiedName::new(0, "Browse01")); - assert_eq!(o.display_name(), LocalizedText::new("", "Display01")); -} - -#[test] -fn find_node_by_id() { - let address_space = make_sample_address_space(); - let mut address_space = trace_write_lock!(address_space); - let ns = address_space.register_namespace("urn:test").unwrap(); - - assert!(!address_space.node_exists(&NodeId::null())); - assert!(!address_space.node_exists(&NodeId::new(11, "v3"))); - - assert!(address_space.node_exists(&NodeId::new(ns, "v1"))); - assert!(address_space.node_exists(&NodeId::new(ns, 300))); - assert!(address_space.node_exists(&NodeId::new(ns, "v3"))); -} - -fn dump_references(references: &Vec) { - for r in references { - println!( - "Referencs - type = {:?}, to = {:?}", - r.reference_type, r.target_node - ); - } -} - -#[test] -fn find_references_by_direction() { - let address_space = make_sample_address_space(); - let address_space = trace_read_lock!(address_space); - - let (references, _inverse_ref_idx) = address_space - .find_references_by_direction::( - &NodeId::objects_folder_id(), - BrowseDirection::Forward, - None, - ); - dump_references(&references); - assert_eq!(references.len(), 3); - - // Should be same as filtering on None - let reference_filter = Some((ReferenceTypeId::References, true)); - let (references, _inverse_ref_idx) = address_space.find_references_by_direction( - &NodeId::objects_folder_id(), - BrowseDirection::Forward, - reference_filter, - ); - dump_references(&references); - assert_eq!(references.len(), 3); - - // Only organizes - let reference_filter = Some((ReferenceTypeId::Organizes, false)); - let (references, _inverse_ref_idx) = address_space.find_references_by_direction( - &NodeId::objects_folder_id(), - BrowseDirection::Forward, - reference_filter, - ); - dump_references(&references); - assert_eq!(references.len(), 2); - - // Reverse organises should == 1 (root organises objects) - let (references, _inverse_ref_idx) = address_space.find_references_by_direction( - &NodeId::objects_folder_id(), - BrowseDirection::Inverse, - reference_filter, - ); - dump_references(&references); - assert_eq!(references.len(), 1); - - // Both directions - let (references, inverse_ref_idx) = address_space.find_references_by_direction( - &NodeId::objects_folder_id(), - BrowseDirection::Both, - reference_filter, - ); - dump_references(&references); - assert_eq!(references.len(), 3); - assert_eq!(inverse_ref_idx, 2); -} - -#[test] -fn find_references() { - let address_space = make_sample_address_space(); - let address_space = trace_read_lock!(address_space); - - let references = address_space.find_references( - &NodeId::root_folder_id(), - Some((ReferenceTypeId::Organizes, false)), - ); - assert!(references.is_some()); - let references = references.as_ref().unwrap(); - dump_references(&references); - assert_eq!(references.len(), 3); - - let references = - address_space.find_references::(&NodeId::root_folder_id(), None); - assert!(references.is_some()); - let references = references.as_ref().unwrap(); - dump_references(&references); - assert_eq!(references.len(), 4); - - let references = address_space.find_references( - &NodeId::objects_folder_id(), - Some((ReferenceTypeId::Organizes, false)), - ); - assert!(references.is_some()); - let references = references.unwrap(); - dump_references(&references); - assert_eq!(references.len(), 2); - - let r1 = &references[0]; - assert_eq!(r1.reference_type, ReferenceTypeId::Organizes.into()); - let child_node_id = r1.target_node.clone(); - - let child = address_space.find_node(&child_node_id); - assert!(child.is_some()); -} - -#[test] -fn find_inverse_references() { - let address_space = make_sample_address_space(); - let address_space = trace_read_lock!(address_space); - - //println!("{:#?}", address_space); - let references = address_space.find_inverse_references( - &NodeId::root_folder_id(), - Some((ReferenceTypeId::Organizes, false)), - ); - assert!(references.is_none()); - - let references = address_space.find_inverse_references( - &NodeId::objects_folder_id(), - Some((ReferenceTypeId::Organizes, false)), - ); - assert!(references.is_some()); - let references = references.unwrap(); - assert_eq!(references.len(), 1); -} - -#[test] -fn find_reference_subtypes() { - let address_space = make_sample_address_space(); - let address_space = trace_read_lock!(address_space); - - let references = address_space.references(); - let reference_types = vec![ - ( - ReferenceTypeId::References, - ReferenceTypeId::HierarchicalReferences, - ), - (ReferenceTypeId::References, ReferenceTypeId::HasChild), - (ReferenceTypeId::References, ReferenceTypeId::HasSubtype), - (ReferenceTypeId::References, ReferenceTypeId::Organizes), - (ReferenceTypeId::References, ReferenceTypeId::Aggregates), - (ReferenceTypeId::References, ReferenceTypeId::HasProperty), - (ReferenceTypeId::References, ReferenceTypeId::HasComponent), - ( - ReferenceTypeId::References, - ReferenceTypeId::HasOrderedComponent, - ), - (ReferenceTypeId::References, ReferenceTypeId::HasEventSource), - (ReferenceTypeId::References, ReferenceTypeId::HasNotifier), - (ReferenceTypeId::References, ReferenceTypeId::GeneratesEvent), - ( - ReferenceTypeId::References, - ReferenceTypeId::AlwaysGeneratesEvent, - ), - (ReferenceTypeId::References, ReferenceTypeId::HasEncoding), - ( - ReferenceTypeId::References, - ReferenceTypeId::HasModellingRule, - ), - (ReferenceTypeId::References, ReferenceTypeId::HasDescription), - ( - ReferenceTypeId::References, - ReferenceTypeId::HasTypeDefinition, - ), - ( - ReferenceTypeId::HierarchicalReferences, - ReferenceTypeId::HasChild, - ), - ( - ReferenceTypeId::HierarchicalReferences, - ReferenceTypeId::HasSubtype, - ), - ( - ReferenceTypeId::HierarchicalReferences, - ReferenceTypeId::Organizes, - ), - ( - ReferenceTypeId::HierarchicalReferences, - ReferenceTypeId::Aggregates, - ), - ( - ReferenceTypeId::HierarchicalReferences, - ReferenceTypeId::HasProperty, - ), - ( - ReferenceTypeId::HierarchicalReferences, - ReferenceTypeId::HasComponent, - ), - ( - ReferenceTypeId::HierarchicalReferences, - ReferenceTypeId::HasOrderedComponent, - ), - ( - ReferenceTypeId::HierarchicalReferences, - ReferenceTypeId::HasEventSource, - ), - ( - ReferenceTypeId::HierarchicalReferences, - ReferenceTypeId::HasNotifier, - ), - (ReferenceTypeId::HasChild, ReferenceTypeId::Aggregates), - (ReferenceTypeId::HasChild, ReferenceTypeId::HasComponent), - ( - ReferenceTypeId::HasChild, - ReferenceTypeId::HasHistoricalConfiguration, - ), - (ReferenceTypeId::HasChild, ReferenceTypeId::HasProperty), - ( - ReferenceTypeId::HasChild, - ReferenceTypeId::HasOrderedComponent, - ), - (ReferenceTypeId::HasChild, ReferenceTypeId::HasSubtype), - (ReferenceTypeId::Aggregates, ReferenceTypeId::HasComponent), - ( - ReferenceTypeId::Aggregates, - ReferenceTypeId::HasHistoricalConfiguration, - ), - (ReferenceTypeId::Aggregates, ReferenceTypeId::HasProperty), - ( - ReferenceTypeId::Aggregates, - ReferenceTypeId::HasOrderedComponent, - ), - ( - ReferenceTypeId::HasComponent, - ReferenceTypeId::HasOrderedComponent, - ), - ( - ReferenceTypeId::HasEventSource, - ReferenceTypeId::HasNotifier, - ), - ( - ReferenceTypeId::HierarchicalReferences, - ReferenceTypeId::HasNotifier, - ), - ( - ReferenceTypeId::References, - ReferenceTypeId::NonHierarchicalReferences, - ), - ( - ReferenceTypeId::NonHierarchicalReferences, - ReferenceTypeId::GeneratesEvent, - ), - ( - ReferenceTypeId::NonHierarchicalReferences, - ReferenceTypeId::AlwaysGeneratesEvent, - ), - ( - ReferenceTypeId::NonHierarchicalReferences, - ReferenceTypeId::HasEncoding, - ), - ( - ReferenceTypeId::NonHierarchicalReferences, - ReferenceTypeId::HasModellingRule, - ), - ( - ReferenceTypeId::NonHierarchicalReferences, - ReferenceTypeId::HasDescription, - ), - ( - ReferenceTypeId::NonHierarchicalReferences, - ReferenceTypeId::HasTypeDefinition, - ), - ( - ReferenceTypeId::GeneratesEvent, - ReferenceTypeId::AlwaysGeneratesEvent, - ), - ]; - - // A type should always match itself - assert!(references.reference_type_matches( - &ReferenceTypeId::NonHierarchicalReferences.into(), - &ReferenceTypeId::NonHierarchicalReferences.into(), - true - )); - assert!(references.reference_type_matches( - &ReferenceTypeId::NonHierarchicalReferences.into(), - &ReferenceTypeId::NonHierarchicalReferences.into(), - false - )); - - // Make sure that subtypes match when subtypes are to be compared and doesn't when they should - // not be compared. - reference_types.iter().for_each(|r| { - let r1 = r.0.into(); - let r2 = r.1.into(); - assert!(references.reference_type_matches(&r1, &r2, true)); - assert!(!references.reference_type_matches(&r1, &r2, false)); - }); -} - -/// This test is to ensure that adding a Variable with a value of Array to address space sets the -/// ValueRank and ArrayDimensions attributes correctly. -#[test] -fn array_as_variable() { - // 1 dimensional array with 100 element - let values = (0..100) - .map(|i| Variant::Int32(i)) - .collect::>(); - - // Get the variable node back from the address space, ensure that the ValueRank and ArrayDimensions are correct - let node_id = NodeId::new(2, 1); - let v = Variable::new(&node_id, "x", "x", (VariantTypeId::Int32, values)); - - let value_rank = v.value_rank(); - assert_eq!(value_rank, 1); - let array_dimensions = v.array_dimensions().unwrap(); - assert_eq!(array_dimensions, vec![100u32]); -} - -/// This test is to ensure that adding a Variable with a value of Array to address space sets the -/// ValueRank and ArrayDimensions attributes correctly. -#[test] -fn multi_dimension_array_as_variable() { - // 2 dimensional array with 10x10 elements - - let values = (0..100) - .map(|i| Variant::Int32(i)) - .collect::>(); - let mda = Array::new_multi(VariantTypeId::Int32, values, vec![10u32, 10u32]).unwrap(); - assert!(mda.is_valid()); - - // Get the variable node back from the address space, ensure that the ValueRank and ArrayDimensions are correct - let node_id = NodeId::new(2, 1); - let v = Variable::new(&node_id, "x", "x", mda); - - let value_rank = v.value_rank(); - assert_eq!(value_rank, 2); - let array_dimensions = v.array_dimensions().unwrap(); - assert_eq!(array_dimensions, vec![10u32, 10u32]); -} - -#[test] -fn browse_nodes() { - let address_space = make_sample_address_space(); - let address_space = trace_read_lock!(address_space); - - // Test that a node can be found - let object_id = ObjectId::RootFolder.into(); - let result = find_node_from_browse_path( - &address_space, - &object_id, - &vec!["Objects".into(), "Sample".into(), "v1".into()], - ); - let node = result.unwrap(); - assert_eq!(node.as_node().browse_name(), QualifiedName::from("v1")); - - // Test that a non existent node cannot be found - let result = find_node_from_browse_path( - &address_space, - &object_id, - &vec!["Objects".into(), "Sample".into(), "vxxx".into()], - ); - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), StatusCode::BadNotFound); -} - -#[test] -fn find_nodes_relative_path() { - let address_space = make_sample_address_space(); - let address_space = trace_read_lock!(address_space); - - // Given some paths, find the nodes - let parent_node = ObjectId::RootFolder.into(); - - let relative_path = "/Objects/Server.ServerStatus.BuildInfo.ProductName"; - - let results = - find_nodes_relative_path_simple(&address_space, &parent_node, relative_path).unwrap(); - assert_eq!(results.len(), 1); - assert_eq!( - results[0], - VariableId::Server_ServerStatus_BuildInfo_ProductName.into() - ); -} - -#[test] -fn object_builder() { - let mut address_space = AddressSpace::new(); - - let node_type_id = NodeId::new(1, "HelloType"); - let _ot = ObjectTypeBuilder::new(&node_type_id, "HelloType", "HelloType") - .subtype_of(ObjectTypeId::BaseObjectType) - .insert(&mut address_space); - - let node_id = NodeId::new(1, "Hello"); - let _o = ObjectBuilder::new(&node_id, "Foo", "Foo") - .event_notifier(EventNotifier::SUBSCRIBE_TO_EVENTS) - .organized_by(ObjectId::ObjectsFolder) - .has_type_definition(node_type_id.clone()) - .insert(&mut address_space); - - // Verify the variable is there - let _o = match address_space.find_node(&node_id).unwrap() { - NodeType::Object(o) => o, - _ => panic!(), - }; - - // Verify the reference to the objects folder is there - assert!(address_space.has_reference( - &ObjectId::ObjectsFolder.into(), - &node_id, - ReferenceTypeId::Organizes - )); - assert!(address_space.has_reference( - &node_id, - &node_type_id, - ReferenceTypeId::HasTypeDefinition - )); -} - -#[test] -fn object_type_builder() { - let mut address_space = AddressSpace::new(); - - let node_type_id = NodeId::new(1, "HelloType"); - let _ot = ObjectTypeBuilder::new(&node_type_id, "HelloType", "HelloType") - .subtype_of(ObjectTypeId::BaseObjectType) - .insert(&mut address_space); - - let _ot = match address_space.find_node(&node_type_id).unwrap() { - NodeType::ObjectType(ot) => ot, - _ => panic!(), - }; - - assert!(address_space.has_reference( - &ObjectTypeId::BaseObjectType.into(), - &node_type_id, - ReferenceTypeId::HasSubtype - )); -} - -#[test] -fn variable_builder() { - let result = std::panic::catch_unwind(|| { - // This should panic - let _v = VariableBuilder::new(&NodeId::null(), "", "").build(); - }); - assert!(result.is_err()); - - // This should build - let _v = VariableBuilder::new(&NodeId::new(1, 1), "", "") - .data_type(DataTypeId::Boolean) - .build(); - - // Check a variable with a bunch of fields set - let v = VariableBuilder::new(&NodeId::new(1, "Hello"), "BrowseName", "DisplayName") - .description("Desc") - .data_type(DataTypeId::UInt32) - .value_rank(10) - .array_dimensions(&[1, 2, 3]) - .historizing(true) - .value(Variant::from(999)) - .minimum_sampling_interval(123.0) - .build(); - - assert_eq!(v.node_id(), NodeId::new(1, "Hello")); - assert_eq!(v.browse_name(), QualifiedName::new(0, "BrowseName")); - assert_eq!(v.display_name(), LocalizedText::new("", "DisplayName")); - assert_eq!(v.data_type(), DataTypeId::UInt32.into()); - assert_eq!(v.description().unwrap(), LocalizedText::new("", "Desc")); - assert_eq!(v.value_rank(), 10); - assert_eq!(v.array_dimensions().unwrap(), vec![1, 2, 3]); - assert_eq!(v.historizing(), true); - assert_eq!( - v.value( - TimestampsToReturn::Neither, - NumericRange::None, - &QualifiedName::null(), - 0.0 - ) - .value - .unwrap(), - Variant::from(999) - ); - assert_eq!(v.minimum_sampling_interval().unwrap(), 123.0); - - // Add a variable to the address space - - let mut address_space = AddressSpace::new(); - let node_id = NodeId::new(1, "Hello"); - let _v = VariableBuilder::new(&node_id, "BrowseName", "DisplayName") - .description("Desc") - .value_rank(10) - .data_type(DataTypeId::UInt32) - .array_dimensions(&[1, 2, 3]) - .historizing(true) - .value(Variant::from(999)) - .minimum_sampling_interval(123.0) - .organized_by(ObjectId::ObjectsFolder) - .insert(&mut address_space); - - // Verify the variable is there - assert!(address_space.find_variable_by_ref(&node_id).is_some()); - // Verify the reference to the objects folder is there - assert!(address_space.has_reference( - &ObjectId::ObjectsFolder.into(), - &node_id, - ReferenceTypeId::Organizes - )); -} - -#[test] -fn method_builder() { - let mut address_space = AddressSpace::new(); - - let ns = address_space.register_namespace("urn:test").unwrap(); - - let object_id: NodeId = ObjectId::ObjectsFolder.into(); - - let fn_node_id = NodeId::new(ns, "HelloWorld"); - - let inserted = MethodBuilder::new(&fn_node_id, "HelloWorld", "HelloWorld") - .component_of(object_id.clone()) - .output_args(&mut address_space, &[("Result", DataTypeId::String).into()]) - .callback(Box::new(HelloWorld)) - .insert(&mut address_space); - assert!(inserted); - - let method = match address_space.find_node(&fn_node_id).unwrap() { - NodeType::Method(m) => m, - _ => panic!(), - }; - - assert!(method.has_callback()); - - let refs = address_space - .find_references(&fn_node_id, Some((ReferenceTypeId::HasProperty, false))) - .unwrap(); - assert_eq!(refs.len(), 1); - - let child = address_space - .find_node(&refs.get(0).unwrap().target_node) - .unwrap(); - if let NodeType::Variable(v) = child { - // verify OutputArguments - // verify OutputArguments / Argument value - assert_eq!(v.data_type(), DataTypeId::Argument.into()); - assert_eq!(v.display_name(), LocalizedText::from("OutputArguments")); - let v = v - .value( - TimestampsToReturn::Neither, - NumericRange::None, - &QualifiedName::null(), - 0.0, - ) - .value - .unwrap(); - if let Variant::Array(array) = v { - let v = array.values; - assert_eq!(v.len(), 1); - let v = v.get(0).unwrap().clone(); - if let Variant::ExtensionObject(v) = v { - // deserialize the Argument here - let decoding_options = DecodingOptions::test(); - let argument = v.decode_inner::(&decoding_options).unwrap(); - assert_eq!(argument.name, UAString::from("Result")); - assert_eq!(argument.data_type, DataTypeId::String.into()); - assert_eq!(argument.value_rank, -1); - assert_eq!(argument.array_dimensions, None); - assert_eq!(argument.description, LocalizedText::null()); - } else { - panic!("Variant was expected to be extension object, was {:?}", v); - } - } else { - panic!("Variant was expected to be array, was {:?}", v); - } - } else { - panic!(); - } -} - -struct HelloWorld; - -impl callbacks::Method for HelloWorld { - fn call( - &mut self, - _session_id: &NodeId, - _session_map: Arc>, - _request: &CallMethodRequest, - ) -> Result { - Ok(CallMethodResult { - status_code: StatusCode::Good, - input_argument_results: Some(vec![StatusCode::Good]), - input_argument_diagnostic_infos: None, - output_arguments: Some(vec![Variant::from("Hello World!")]), - }) - } -} - -#[test] -fn simple_delete_node() { - crate::console_logging::init(); - - // This is a super basic, debuggable delete test. There is a single Root node, and a - // child object. After deleting the child, only the Root should exist with no references at - // all to the child. - - // A blank address space, with nothing at all in it - let mut address_space = AddressSpace::default(); - - // Add a root node - let root_node = NodeId::root_folder_id(); - - let node = Object::new(&root_node, "Root", "", EventNotifier::empty()); - let _ = address_space.insert::(node, None); - - let node_id = NodeId::new(1, "Hello"); - let _o = ObjectBuilder::new(&node_id, "Foo", "Foo") - .organized_by(root_node.clone()) - .insert(&mut address_space); - - // Verify the object and refs are there - assert!(address_space.find_node(&node_id).is_some()); - assert!(address_space.has_reference(&root_node, &node_id, ReferenceTypeId::Organizes)); - - // Try one time deleting references, the other time not deleting them. - address_space.delete(&node_id, true); - // Delete the node and the refs - assert!(address_space.find_node(&node_id).is_none()); - assert!(address_space.find_node(&root_node).is_some()); - assert!(!address_space.has_reference(&root_node, &node_id, ReferenceTypeId::Organizes)); - assert!(!address_space - .references() - .reference_to_node_exists(&node_id)); -} - -#[test] -fn delete_node() { - crate::console_logging::init(); - - // Try creating and deleting a node, verifying that it's totally gone afterwards - (0..2).for_each(|i| { - let mut address_space = AddressSpace::new(); - - let node_type_id = NodeId::new(1, "HelloType"); - let _ot = ObjectTypeBuilder::new(&node_type_id, "HelloType", "HelloType") - .subtype_of(ObjectTypeId::BaseObjectType) - .insert(&mut address_space); - - let node_id = NodeId::new(1, "Hello"); - let _o = ObjectBuilder::new(&node_id, "Foo", "Foo") - .event_notifier(EventNotifier::SUBSCRIBE_TO_EVENTS) - .organized_by(ObjectId::ObjectsFolder) - .has_type_definition(node_type_id.clone()) - .insert(&mut address_space); - - // Verify the object and refs are there - assert!(address_space.find_node(&node_id).is_some()); - assert!(address_space.has_reference( - &ObjectId::ObjectsFolder.into(), - &node_id, - ReferenceTypeId::Organizes - )); - assert!(!address_space.has_reference( - &node_id, - &ObjectId::ObjectsFolder.into(), - ReferenceTypeId::Organizes - )); - assert!(address_space.has_reference( - &node_id, - &node_type_id, - ReferenceTypeId::HasTypeDefinition - )); - - // Try one time deleting references, the other time not deleting them. - let delete_references = i == 1; - address_space.delete(&node_id, delete_references); - if !delete_references { - // Deleted the node but not refs - assert!(address_space.find_node(&node_id).is_none()); - assert!(address_space.has_reference( - &ObjectId::ObjectsFolder.into(), - &node_id, - ReferenceTypeId::Organizes - )); - assert!(address_space.has_reference( - &node_id, - &node_type_id, - ReferenceTypeId::HasTypeDefinition - )); - } else { - // Delete the node and the refs - assert!(address_space.find_node(&node_id).is_none()); - assert!(!address_space.has_reference( - &ObjectId::ObjectsFolder.into(), - &node_id, - ReferenceTypeId::Organizes - )); - assert!(!address_space.has_reference( - &node_id, - &node_type_id, - ReferenceTypeId::HasTypeDefinition - )); - assert!(!address_space - .references() - .reference_to_node_exists(&node_id)); - } - }); -} - -#[test] -fn is_subtype() { - let address_space = AddressSpace::new(); - // Test subtypes against other and the expected result - let subtypes = [ - // Positive - ( - ObjectTypeId::BaseEventType, - ObjectTypeId::BaseEventType, - true, - ), - ( - ObjectTypeId::AuditEventType, - ObjectTypeId::BaseEventType, - true, - ), - ( - ObjectTypeId::BaseModelChangeEventType, - ObjectTypeId::BaseEventType, - true, - ), - ( - ObjectTypeId::AuditHistoryUpdateEventType, - ObjectTypeId::BaseEventType, - true, - ), - ( - ObjectTypeId::AuditUrlMismatchEventType, - ObjectTypeId::AuditSessionEventType, - true, - ), - // Negative - // BaseEventType is not a subtype of AuditEventType - ( - ObjectTypeId::BaseEventType, - ObjectTypeId::AuditEventType, - false, - ), - // DeviceFailureEventType is not a subtype of ProgressEventType (different branches) - ( - ObjectTypeId::DeviceFailureEventType, - ObjectTypeId::ProgressEventType, - false, - ), - // SystemEventType is not a subtype of ProgressEventType (peers) - ( - ObjectTypeId::SystemEventType, - ObjectTypeId::ProgressEventType, - false, - ), - ]; - subtypes.iter().for_each(|v| { - println!( - "Expecting {:?} to be a subtype of {:?} == {:?}", - v.0, v.1, v.2 - ); - assert_eq!(address_space.is_subtype(&v.0.into(), &v.1.into()), v.2); - }); -} - -#[test] -fn hierarchical_references() { - let address_space = AddressSpace::new(); - - // Try with root - let refs = address_space - .find_hierarchical_references(&NodeId::root_folder_id()) - .unwrap(); - assert_eq!(refs.len(), 3); - assert!(refs.contains(&NodeId::objects_folder_id())); - assert!(refs.contains(&NodeId::views_folder_id())); - assert!(refs.contains(&NodeId::types_folder_id())); - - // Try with an object that has some properties - let node = ObjectId::Server_ServerCapabilities.into(); - let refs = address_space.find_hierarchical_references(&node).unwrap(); - println!("{:#?}", refs); - assert_eq!(refs.len(), 15); - assert!(refs.contains(&VariableId::Server_ServerCapabilities_ServerProfileArray.into())); - assert!(refs.contains(&VariableId::Server_ServerCapabilities_LocaleIdArray.into())); - assert!(refs.contains(&VariableId::Server_ServerCapabilities_MinSupportedSampleRate.into())); - assert!( - refs.contains(&VariableId::Server_ServerCapabilities_MaxBrowseContinuationPoints.into()) - ); - assert!(refs.contains(&VariableId::Server_ServerCapabilities_MaxQueryContinuationPoints.into())); - assert!( - refs.contains(&VariableId::Server_ServerCapabilities_MaxHistoryContinuationPoints.into()) - ); - assert!(refs.contains(&VariableId::Server_ServerCapabilities_SoftwareCertificates.into())); - assert!(refs.contains(&VariableId::Server_ServerCapabilities_MaxArrayLength.into())); - assert!(refs.contains(&VariableId::Server_ServerCapabilities_MaxStringLength.into())); - assert!(refs.contains(&VariableId::Server_ServerCapabilities_MaxByteStringLength.into())); - assert!(refs.contains(&ObjectId::Server_ServerCapabilities_OperationLimits.into())); - assert!(refs.contains(&ObjectId::Server_ServerCapabilities_ModellingRules.into())); - assert!(refs.contains(&ObjectId::Server_ServerCapabilities_AggregateFunctions.into())); - assert!(refs.contains(&ObjectId::HistoryServerCapabilities.into())); -} diff --git a/lib/src/server/tests/events.rs b/lib/src/server/tests/events.rs deleted file mode 100644 index 3e1ad6cfb..000000000 --- a/lib/src/server/tests/events.rs +++ /dev/null @@ -1,865 +0,0 @@ -use std::collections::HashSet; - -use crate::types::{ - node_ids::ReferenceTypeId, - operand::{ContentFilterBuilder, Operand}, - service_types::ContentFilterElement, - AttributeId, DataTypeId, LocalizedText, NodeId, ObjectId, ObjectTypeId, QualifiedName, - UAString, VariableTypeId, Variant, -}; - -use crate::server::{ - address_space::{object_type::ObjectTypeBuilder, variable::VariableBuilder, AddressSpace}, - events::event::{BaseEventType, Event}, - events::event_filter, - events::operator, - tests::*, -}; - -fn event_id() -> NodeId { - NodeId::new(2, 1000) -} - -pub struct TestEventType { - base: BaseEventType, - foo: i32, -} - -impl Event for TestEventType { - type Err = (); - - fn is_valid(&self) -> bool { - self.base.is_valid() - } - - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - match self.base.raise(address_space) { - Ok(node_id) => { - let property_id = NodeId::next_numeric(2); - self.add_property( - &node_id, - property_id, - "Foo", - "Foo", - DataTypeId::Int32, - self.foo, - address_space, - ); - Ok(node_id) - } - err => err, - } - } -} - -base_event_impl!(TestEventType, base); - -impl TestEventType { - fn new( - node_id: R, - browse_name: S, - display_name: T, - parent_node: U, - source_node: V, - foo: i32, - ) -> Self - where - R: Into, - S: Into, - T: Into, - U: Into, - V: Into, - { - let event_type_id = Self::event_type_id(); - let source_node: NodeId = source_node.into(); - Self { - base: BaseEventType::new_now( - node_id, - event_type_id, - browse_name, - display_name, - parent_node, - ) - .source_node(source_node.clone()) - .message(LocalizedText::from(format!( - "A Test event from {:?}", - source_node - ))), - foo, - } - } - - fn event_type_id() -> NodeId { - NodeId::new(2, "TestEventType") - } -} - -fn create_event( - address_space: &mut AddressSpace, - node_id: NodeId, - source_machine_id: &NodeId, - foo: i32, -) { - let event_name = format!("Event{}", foo); - let mut event = TestEventType::new( - &node_id, - event_name.clone(), - event_name, - NodeId::objects_folder_id(), - source_machine_id, - foo, - ); - let _ = event.raise(address_space); -} - -fn address_space() -> AddressSpace { - let mut address_space = AddressSpace::new(); - - let ns = address_space.register_namespace("urn:test").unwrap(); - - // Create an event type - let event_type_id = TestEventType::event_type_id(); - ObjectTypeBuilder::new(&event_type_id, "TestEventType", "TestEventType") - .is_abstract(false) - .subtype_of(ObjectTypeId::BaseEventType) - .insert(&mut address_space); - - // Add attribute to event type - let attr_foo_id = NodeId::new(ns, "Foo"); - VariableBuilder::new(&attr_foo_id, "Foo", "Foo") - .property_of(event_type_id.clone()) - .data_type(DataTypeId::UInt32) - .has_type_definition(VariableTypeId::PropertyType) - .has_modelling_rule(ObjectId::ModellingRule_Mandatory) - .insert(&mut address_space); - - // Create an event of that type - create_event( - &mut address_space, - event_id(), - &ObjectId::Server.into(), - 100, - ); - - address_space -} - -fn do_operator_test(f: T) -where - T: FnOnce(&AddressSpace, &NodeId, &mut HashSet, &Vec), -{ - crate::console_logging::init(); - - let mut used_elements = HashSet::new(); - let elements = vec![]; - let address_space = address_space(); - - // use object_id of a generated event - let object_id = event_id(); - - f(&address_space, &object_id, &mut used_elements, &elements); -} - -#[test] -fn test_eq() { - do_operator_test(|address_space, object_id, used_elements, elements| { - // Simple test, compare two values of the same kind - let operands = &[Operand::literal(10), Operand::literal(10)]; - let result = operator::eq( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(9), Operand::literal(10)]; - let result = operator::eq( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - let operands = &[Operand::literal(10), Operand::literal(11)]; - let result = operator::eq( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - }); -} - -#[test] -fn test_lt() { - do_operator_test(|address_space, object_id, used_elements, elements| { - // Simple test, compare two values of the same kind - let operands = &[Operand::literal(9), Operand::literal(10)]; - let result = operator::lt( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(10), Operand::literal(10)]; - let result = operator::lt( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - let operands = &[Operand::literal(11), Operand::literal(10)]; - let result = operator::lt( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - }); -} - -#[test] -fn test_lte() { - do_operator_test(|address_space, object_id, used_elements, elements| { - // Simple test, compare two values of the same kind - let operands = &[Operand::literal(9), Operand::literal(10)]; - let result = operator::lte( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(10), Operand::literal(10)]; - let result = operator::lte( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(11), Operand::literal(10)]; - let result = operator::lte( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - }); -} - -#[test] -fn test_gt() { - do_operator_test(|address_space, object_id, used_elements, elements| { - // Simple test, compare two values of the same kind - let operands = [Operand::literal(11), Operand::literal(10)]; - let result = operator::gt( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(10), Operand::literal(10)]; - let result = operator::gt( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - let operands = &[Operand::literal(9), Operand::literal(10)]; - let result = operator::gt( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - }); -} - -#[test] -fn test_gte() { - do_operator_test(|address_space, object_id, used_elements, elements| { - // Simple test, compare two values of the same kind - let operands = &[Operand::literal(11), Operand::literal(10)]; - let result = operator::gte( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(10), Operand::literal(10)]; - let result = operator::gte( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(9), Operand::literal(10)]; - let result = operator::gte( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - }); -} - -#[test] -fn test_not() { - do_operator_test(|address_space, object_id, used_elements, elements| { - let operands = &[Operand::literal(false)]; - let result = operator::not( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(true)]; - let result = operator::not( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - // String - let operands = &[Operand::literal("0")]; - let result = operator::not( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - // String(2) - let operands = &[Operand::literal("true")]; - let result = operator::not( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - // Invalid - Double - let operands = &[Operand::literal(99.9)]; - let result = operator::not( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Empty); - - // Invalid - Int32 - let operands = &[Operand::literal(1)]; - let result = operator::not( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Empty); - }); -} - -#[test] -fn test_between() { - do_operator_test(|address_space, object_id, used_elements, elements| { - // Test operator with some ranges and mix of types with implicit conversion - let operands = &[ - Operand::literal(12), - Operand::literal(12), - Operand::literal(13), - ]; - let result = operator::between( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[ - Operand::literal(13), - Operand::literal(12), - Operand::literal(13), - ]; - let result = operator::between( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[ - Operand::literal(12.3), - Operand::literal(12.0), - Operand::literal(12.4), - ]; - let result = operator::between( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[ - Operand::literal(11.99), - Operand::literal(12.0), - Operand::literal(13.0), - ]; - let result = operator::between( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - let operands = &[ - Operand::literal(13.0001), - Operand::literal(12.0), - Operand::literal(13.0), - ]; - let result = operator::between( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - // let operands = &[Operand::literal("12.5"), Operand::literal(12), Operand::literal(13)]); - // let result = operator::between(&operands[..], used_elements, elements, address_space).unwrap(); - // assert_eq!(result, Variant::Boolean(true)); - }) -} - -#[test] -fn test_and() { - do_operator_test(|address_space, object_id, used_elements, elements| { - let operands = &[Operand::literal(true), Operand::literal(true)]; - let result = operator::and( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(false), Operand::literal(true)]; - let result = operator::and( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - let operands = &[Operand::literal(true), Operand::literal(false)]; - let result = operator::and( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - let operands = &[Operand::literal(false), Operand::literal(false)]; - let result = operator::and( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - let operands = &[Operand::literal(true), Operand::literal(())]; - let result = operator::and( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Empty); - - let operands = &[Operand::literal(()), Operand::literal(true)]; - let result = operator::and( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Empty); - - let operands = &[Operand::literal(false), Operand::literal(())]; - let result = operator::and( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - let operands = &[Operand::literal(()), Operand::literal(false)]; - let result = operator::and( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - let operands = &[Operand::literal(()), Operand::literal(())]; - let result = operator::and( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Empty); - }) -} - -#[test] -fn test_or() { - do_operator_test(|address_space, object_id, used_elements, elements| { - let operands = &[Operand::literal(true), Operand::literal(true)]; - let result = operator::or( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(true), Operand::literal(false)]; - let result = operator::or( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(false), Operand::literal(true)]; - let result = operator::or( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(false), Operand::literal(false)]; - let result = operator::or( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - let operands = &[Operand::literal(true), Operand::literal(())]; - let result = operator::or( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(()), Operand::literal(true)]; - let result = operator::or( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(false), Operand::literal(())]; - let result = operator::or( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Empty); - - let operands = &[Operand::literal(()), Operand::literal(false)]; - let result = operator::or( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Empty); - }) -} - -#[test] -fn test_in_list() { - do_operator_test(|address_space, object_id, used_elements, elements| { - let operands = &[Operand::literal(10), Operand::literal(false)]; - let result = operator::in_list( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - - let operands = &[Operand::literal(true), Operand::literal(false)]; - let result = operator::in_list( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::Boolean(false)); - /* - let operands = &[Operand::literal("true"), Operand::literal(true)]; - let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(99), Operand::literal(11), Operand::literal(()), Operand::literal(99.0)]; - let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(()), Operand::literal(11), Operand::literal(()), Operand::literal(99.0)]; - let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(33), Operand::literal(11), Operand::literal(()), Operand::literal(99.0)]; - let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); - assert_eq!(result, Variant::Boolean(false)); - */ - }) -} - -#[test] -fn test_bitwise_or() { - do_operator_test(|address_space, object_id, used_elements, elements| { - let operands = &[Operand::literal(0xff00u16), Operand::literal(0x00ffu16)]; - let result = operator::bitwise_or( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::UInt16(0xffff)); - }) -} - -#[test] -fn test_bitwise_and() { - do_operator_test(|address_space, object_id, used_elements, elements| { - let operands = &[Operand::literal(0xf00fu16), Operand::literal(0x00ffu16)]; - let result = operator::bitwise_and( - &object_id, - &operands[..], - used_elements, - elements, - address_space, - ) - .unwrap(); - assert_eq!(result, Variant::UInt16(0x000f)); - }) -} - -#[test] -fn test_where_clause() { - crate::console_logging::init(); - - let address_space = address_space(); - - let object_id = NodeId::root_folder_id(); - - // IsNull(NULL) - let f = ContentFilterBuilder::new() - .null(Operand::literal(())) - .build(); - let result = event_filter::evaluate_where_clause(&object_id, &f, &address_space); - assert_eq!(result.unwrap(), true.into()); - - // (550 == "550") && (10.5 == "10.5") - let f = ContentFilterBuilder::new() - .and(Operand::element(1), Operand::element(2)) - .eq(Operand::literal(550), Operand::literal("550")) - .eq(Operand::literal(10.5), Operand::literal("10.5")) - .build(); - let result = event_filter::evaluate_where_clause(&object_id, &f, &address_space); - assert_eq!(result.unwrap(), true.into()); - - // Like operator - let f = ContentFilterBuilder::new() - .like( - Operand::literal("Hello world"), - Operand::literal("[Hh]ello w%"), - ) - .build(); - let result = event_filter::evaluate_where_clause(&object_id, &f, &address_space); - assert_eq!(result.unwrap(), true.into()); - - // Not equals - let f = ContentFilterBuilder::new() - .not(Operand::element(1)) - .eq(Operand::literal(550), Operand::literal(551)) - .build(); - let result = event_filter::evaluate_where_clause(&object_id, &f, &address_space); - assert_eq!(result.unwrap(), true.into()); - - // Do some relative path comparisons against the event to ensure content filters appear to work - let expected = vec![ - // Valid - (NodeId::root_folder_id(), "Objects/Event100/Foo", 100, true), - (NodeId::objects_folder_id(), "Event100/Foo", 100, true), - (event_id(), "Foo", 100, true), - // Invalid - (NodeId::root_folder_id(), "Objects/Event101/Foo", 100, false), - (NodeId::root_folder_id(), "Objects/Foo", 100, false), - (NodeId::root_folder_id(), "Objects/Event100/Foo", 101, false), - (NodeId::objects_folder_id(), "Event100/Foo", 101, false), - (event_id(), "Foo", 101, false), - (NodeId::objects_folder_id(), "Event100/Foo/Bar", 100, false), - (event_id(), "", 100, false), - ]; - expected - .into_iter() - .for_each(|(node_id, browse_path, value_to_compare, expected)| { - let f = ContentFilterBuilder::new() - .eq( - Operand::simple_attribute( - ReferenceTypeId::Organizes, - browse_path, - AttributeId::Value, - UAString::null(), - ), - Operand::literal(value_to_compare), - ) - .build(); - let result = event_filter::evaluate_where_clause(&node_id, &f, &address_space); - assert_eq!(result.unwrap(), expected.into()); - }); -} diff --git a/lib/src/server/tests/mod.rs b/lib/src/server/tests/mod.rs deleted file mode 100644 index e19eec7ca..000000000 --- a/lib/src/server/tests/mod.rs +++ /dev/null @@ -1,176 +0,0 @@ -use std::{path::PathBuf, sync::Arc}; - -use chrono; - -use crate::{ - core::{config::Config, supported_message::SupportedMessage}, - server::{ - address_space::{address_space::*, variable::*}, - builder::ServerBuilder, - config::ServerConfig, - session::*, - subscriptions::*, - }, - sync::*, - types::{status_code::StatusCode, *}, -}; - -mod address_space; -mod events; -mod services; -mod subscriptions; - -fn make_test_file(filename: &str) -> PathBuf { - let mut path = std::env::temp_dir(); - path.push(filename); - path -} - -fn make_sample_address_space() -> Arc> { - let address_space = Arc::new(RwLock::new(AddressSpace::new())); - add_sample_vars_to_address_space(address_space.clone()); - address_space -} - -fn add_sample_vars_to_address_space(address_space: Arc>) { - let mut address_space = trace_write_lock!(address_space); - - let ns = address_space.register_namespace("urn:test").unwrap(); - - // Create a sample folder under objects folder - let sample_folder_id = address_space - .add_folder("Sample", "Sample", &NodeId::objects_folder_id()) - .unwrap(); - - // Add some variables to our sample folder - let vars = vec![ - Variable::new(&NodeId::new(ns, "v1"), "v1", "v1", 30i32), - Variable::new(&NodeId::new(ns, 300), "v2", "v2", true), - Variable::new( - &NodeId::new(ns, "v3"), - "v3", - "v3", - UAString::from("Hello world"), - ), - Variable::new(&NodeId::new(ns, "v4"), "v4", "v4", 100.123f64), - ]; - let _ = address_space.add_variables(vars, &sample_folder_id); -} - -#[test] -pub fn server_config_sample_save() { - // This test only exists to dump a sample config - let config = ServerBuilder::new_sample().config(); - let mut path = std::env::current_dir().unwrap(); - path.push(".."); - path.push("samples"); - path.push("server.conf"); - println!("Path is {:?}", path); - assert!(config.save(&path).is_ok()); -} - -#[test] -pub fn server_config_save() { - let path = make_test_file("server_config.yaml"); - let config = ServerBuilder::new_anonymous("foo").config(); - assert!(config.save(&path).is_ok()); - if let Ok(config2) = ServerConfig::load(&path) { - assert_eq!(config, config2); - } else { - panic!("Cannot load config from file"); - } -} - -#[test] -pub fn server_config_invalid() { - // Remove the endpoint - let mut config = ServerBuilder::new_anonymous("foo").config(); - assert!(config.is_valid()); - config.endpoints.clear(); - assert_eq!(config.is_valid(), false); - - // Insert a nonexistent user - config = ServerBuilder::new_anonymous("foo").config(); - config - .endpoints - .get_mut("none") - .unwrap() - .user_token_ids - .insert("hello".to_string()); - assert_eq!(config.is_valid(), false); -} - -#[test] -pub fn expired_publish_requests() { - let now = chrono::Utc::now(); - let now_plus_5s = now + chrono::Duration::seconds(5); - - // Create two publish requests timestamped now, one which expires in > 30s, one which expires - // in > 20s - let now = DateTime::from(now.clone()); - let mut pr1 = PublishRequestEntry { - request_id: 1, - request: PublishRequest { - request_header: RequestHeader::new(&NodeId::null(), &now, 1000), - subscription_acknowledgements: None, - }, - results: None, - }; - pr1.request.request_header.timeout_hint = 5001; - - let mut pr2 = PublishRequestEntry { - request_id: 2, - request: PublishRequest { - request_header: RequestHeader::new(&NodeId::null(), &now, 2000), - subscription_acknowledgements: None, - }, - results: None, - }; - pr2.request.request_header.timeout_hint = 3000; - - // Create session with publish requests - let mut session = Session::new_no_certificate_store(); - - { - let publish_request_queue = session.subscriptions_mut().publish_request_queue(); - publish_request_queue.clear(); - publish_request_queue.push_back(pr1); - publish_request_queue.push_back(pr2); - publish_request_queue - }; - - // Expire requests, see which expire - session.expire_stale_publish_requests(&now_plus_5s); - - // The > 30s timeout hint request should be expired and the other should remain - - // Remain - { - let publish_request_queue = session.subscriptions_mut().publish_request_queue(); - assert_eq!(publish_request_queue.len(), 1); - assert_eq!( - publish_request_queue[0] - .request - .request_header - .request_handle, - 1000 - ); - } - - // Expire - { - let publish_response_queue = session.subscriptions_mut().publish_response_queue(); - assert_eq!(publish_response_queue.len(), 1); - - let r1 = &publish_response_queue[0]; - if let SupportedMessage::ServiceFault(ref response_header) = r1.response { - assert_eq!(response_header.response_header.request_handle, 2000); - assert_eq!( - response_header.response_header.service_result, - StatusCode::BadTimeout - ); - } else { - panic!("Expected service faults for timed out publish requests") - } - } -} diff --git a/lib/src/server/tests/services/attribute.rs b/lib/src/server/tests/services/attribute.rs deleted file mode 100644 index 097756902..000000000 --- a/lib/src/server/tests/services/attribute.rs +++ /dev/null @@ -1,829 +0,0 @@ -use chrono::Duration; - -use crate::server::{address_space::AccessLevel, services::attribute::AttributeService}; -use crate::supported_message_as; -use crate::sync::*; -use crate::types::{Variant, WriteMask}; - -use super::*; - -fn read_value(node_id: &NodeId, attribute_id: AttributeId) -> ReadValueId { - ReadValueId { - node_id: node_id.clone(), - attribute_id: attribute_id as u32, - index_range: UAString::null(), - data_encoding: QualifiedName::null(), - } -} - -fn read_value_range( - node_id: &NodeId, - attribute_id: AttributeId, - index_range: UAString, -) -> ReadValueId { - ReadValueId { - node_id: node_id.clone(), - attribute_id: attribute_id as u32, - index_range, - data_encoding: QualifiedName::null(), - } -} - -fn read_value_encoding( - node_id: &NodeId, - attribute_id: AttributeId, - data_encoding: QualifiedName, -) -> ReadValueId { - ReadValueId { - node_id: node_id.clone(), - attribute_id: attribute_id as u32, - index_range: UAString::null(), - data_encoding, - } -} - -fn node_ids(address_space: Arc>) -> Vec { - let (_, node_ids) = add_many_vars_to_address_space(address_space.clone(), 10); - let mut address_space = trace_write_lock!(address_space); - // Remove read access to [3] for a test below - let node = address_space.find_node_mut(&node_ids[3]).unwrap(); - let r = node - .as_mut_node() - .set_attribute(AttributeId::UserAccessLevel, Variant::from(0u8)); - assert!(r.is_ok()); - node_ids -} - -fn do_attribute_service_test(f: F) -where - F: FnOnce( - Arc>, - Arc>, - Arc>, - &AttributeService, - ), -{ - // Set up some nodes - let st = ServiceTest::new(); - f( - st.server_state.clone(), - st.session.clone(), - st.address_space.clone(), - &AttributeService::new(), - ) -} - -#[test] -fn read() { - do_attribute_service_test(|server_state, session, address_space, ats| { - // set up some nodes - let node_ids = node_ids(address_space.clone()); - - { - // Read a non existent variable - let nodes_to_read = vec![ - // 1. a variable - read_value(&node_ids[0], AttributeId::Value), - // 2. an attribute other than value - read_value(&node_ids[1], AttributeId::AccessLevel), - // 3. a variable without the required attribute - read_value(&node_ids[2], AttributeId::IsAbstract), - // 4. a variable with no read access - read_value(&node_ids[3], AttributeId::Value), - // 5. a non existent variable - read_value(&NodeId::new(1, "vxxx"), AttributeId::Value), - // 6. using an index range on a non-value - read_value_range(&node_ids[0], AttributeId::AccessLevel, UAString::from("1")), - // 7. invalid encoding - read_value_encoding(&node_ids[0], AttributeId::Value, QualifiedName::from("XYZ")), - ]; - let request = ReadRequest { - request_header: make_request_header(), - max_age: 0f64, - timestamps_to_return: TimestampsToReturn::Both, - nodes_to_read: Some(nodes_to_read), - }; - - let response = ats.read(server_state, session, address_space, &request); - let response: ReadResponse = supported_message_as!(response, ReadResponse); - - // Verify expected values - let results = response.results.unwrap(); - - // 1. a variable value - assert_eq!(results[0].status.as_ref().unwrap(), &StatusCode::Good); - assert_eq!(results[0].value.as_ref().unwrap(), &Variant::Int32(0)); - assert!(results[0].source_timestamp.is_some()); - assert!(results[0].server_timestamp.is_some()); - - // 2. an attribute other than value (access level) - assert_eq!(results[1].value.as_ref().unwrap(), &Variant::Byte(1)); - assert!(results[1].source_timestamp.is_none()); - assert!(results[1].server_timestamp.is_none()); - - // 3. a variable without the required attribute - assert_eq!( - results[2].status.as_ref().unwrap(), - &StatusCode::BadAttributeIdInvalid - ); - assert!(results[2].source_timestamp.is_none()); - assert!(results[2].server_timestamp.is_none()); - - // 4. a variable with no read access - assert_eq!( - results[3].status.as_ref().unwrap(), - &StatusCode::BadNotReadable - ); - assert!(results[3].source_timestamp.is_none()); - assert!(results[3].server_timestamp.is_none()); - - // 5. Non existent - assert_eq!( - results[4].status.as_ref().unwrap(), - &StatusCode::BadNodeIdUnknown - ); - assert!(results[4].source_timestamp.is_none()); - assert!(results[4].server_timestamp.is_none()); - - // 6. Index range on a non-value - assert_eq!( - results[5].status.as_ref().unwrap(), - &StatusCode::BadIndexRangeNoData - ); - - // 7. Invalid encoding - assert_eq!( - results[6].status.as_ref().unwrap(), - &StatusCode::BadDataEncodingInvalid - ); - } - - // OTHER POTENTIAL TESTS - - // distinguish between read and user read - // test max_age - // test timestamps to return Server, Source, None, Both - }); -} - -#[test] -fn read_invalid_timestamps() { - // The TimestampsToReturnEnum will be set to Invalid to simulate a decoding error. - // The Read service should return a service fault if timestamps to return is invalid. - - do_attribute_service_test(|server_state, session, address_space, ats| { - // set up some nodes - let node_ids = node_ids(address_space.clone()); - - // Read a non existent variable - let nodes_to_read = vec![read_value(&node_ids[0], AttributeId::Value)]; - let request = ReadRequest { - request_header: make_request_header(), - max_age: 0f64, - timestamps_to_return: TimestampsToReturn::Invalid, // Invalid - nodes_to_read: Some(nodes_to_read), - }; - - let response = ats.read(server_state, session, address_space, &request); - let response = supported_message_as!(response, ServiceFault); - - assert_eq!( - response.response_header.service_result, - StatusCode::BadTimestampsToReturnInvalid - ); - }); -} - -fn write_value(node_id: &NodeId, attribute_id: AttributeId, value: DataValue) -> WriteValue { - WriteValue { - node_id: node_id.clone(), - attribute_id: attribute_id as u32, - index_range: UAString::null(), - value, - } -} - -fn write_value_index_range( - node_id: &NodeId, - attribute_id: AttributeId, - index_range: V, - value: DataValue, -) -> WriteValue -where - V: Into, -{ - WriteValue { - node_id: node_id.clone(), - attribute_id: attribute_id as u32, - index_range: index_range.into(), - value, - } -} - -// Boiler plate helper makes a request and grabs a response -fn write_request( - server_state: Arc>, - session: Arc>, - address_space: Arc>, - ats: &AttributeService, - nodes_to_write: Vec, -) -> WriteResponse { - let request = WriteRequest { - request_header: make_request_header(), - nodes_to_write: Some(nodes_to_write), - }; - // do a write - let response = ats.write(server_state, session, address_space.clone(), &request); - supported_message_as!(response, WriteResponse) -} - -// Boiler plate helper to get the node's value for verification -fn validate_variable_value(address_space: Arc>, node_id: &NodeId, f: F) -where - F: FnOnce(&Variant), -{ - let address_space = trace_read_lock!(address_space); - let node = address_space.find_node(&node_id).unwrap(); - if let NodeType::Variable(node) = node { - let value = node.value( - TimestampsToReturn::Neither, - NumericRange::None, - &QualifiedName::null(), - 0., - ); - f(&value.value.unwrap()); - } else { - panic!(); - } -} - -#[test] -fn write() { - do_attribute_service_test(|server_state, session, address_space, ats| { - // Set up some nodes - // Create some variable nodes and modify permissions in the address space so we - // can see what happens when they are written to. - let node_ids = { - let (_, node_ids) = add_many_vars_to_address_space(address_space.clone(), 10); - let mut address_space = trace_write_lock!(address_space); - // set up nodes for the tests to be performed to each - for (i, node_id) in node_ids.iter().enumerate() { - let node = address_space.find_node_mut(node_id).unwrap(); - match i { - 1 => { - // Add IsAbstract to WriteMask - node.as_mut_node().set_write_mask(WriteMask::IS_ABSTRACT); - } - 2 => { - // Remove write access to the value by setting access level to 0 - let _ = node - .as_mut_node() - .set_attribute(AttributeId::UserAccessLevel, Variant::from(0u8)) - .unwrap(); - } - 6 => { - node.as_mut_node().set_write_mask(WriteMask::ACCESS_LEVEL); - } - _ => { - // Write access - let _ = node - .as_mut_node() - .set_attribute( - AttributeId::AccessLevel, - Variant::from(AccessLevel::CURRENT_WRITE.bits()), - ) - .unwrap(); - let _ = node - .as_mut_node() - .set_attribute( - AttributeId::UserAccessLevel, - Variant::from(UserAccessLevel::CURRENT_WRITE.bits()), - ) - .unwrap(); - } - } - } - - // change HasEncoding node with write access so response can be compared to HasChild which will be left alone - let node = address_space - .find_node_mut(&ReferenceTypeId::HasEncoding.into()) - .unwrap(); - node.as_mut_node().set_write_mask(WriteMask::IS_ABSTRACT); - - node_ids - }; - - let mut data_value_empty = DataValue::new_now(100 as i32); - data_value_empty.value = None; - - // This is a cross section of variables and other kinds of nodes that we want to write to - let nodes_to_write = vec![ - // 1. a variable value - write_value( - &node_ids[0], - AttributeId::Value, - DataValue::new_now(100 as i32), - ), - // 2. a variable with a bad attribute (IsAbstract doesn't exist on a var) - write_value( - &node_ids[1], - AttributeId::IsAbstract, - DataValue::new_now(true), - ), - // 3. a variable value which has no write access - write_value( - &node_ids[2], - AttributeId::Value, - DataValue::new_now(200 as i32), - ), - // 4. a node of some kind other than variable - write_value( - &ReferenceTypeId::HasEncoding.into(), - AttributeId::IsAbstract, - DataValue::new_now(false), - ), - // 5. a node with some kind other than variable with no write mask - write_value( - &ReferenceTypeId::HasChild.into(), - AttributeId::IsAbstract, - DataValue::new_now(false), - ), - // 6. a non existent variable - write_value( - &NodeId::new(2, "vxxx"), - AttributeId::Value, - DataValue::new_now(100i32), - ), - // 7. wrong type for attribute - write_value( - &node_ids[6], - AttributeId::AccessLevel, - DataValue::new_now(-1i8), - ), - // 8. a data value with no value - write_value(&node_ids[7], AttributeId::Value, data_value_empty), - ]; - - let nodes_to_write_len = nodes_to_write.len(); - - let response = write_request( - server_state, - session, - address_space.clone(), - ats, - nodes_to_write, - ); - let results = response.results.unwrap(); - assert_eq!(results.len(), nodes_to_write_len); - - // 1. a variable value - assert_eq!(results[0], StatusCode::Good); - // 2. a variable with invalid attribute - assert_eq!(results[1], StatusCode::BadAttributeIdInvalid); - // 3. a variable value which has no write access - assert_eq!(results[2], StatusCode::BadNotWritable); - // 4. a node of some kind other than variable - assert_eq!(results[3], StatusCode::Good); - // 5. a node with some kind other than variable with no write mask - assert_eq!(results[4], StatusCode::BadNotWritable); - // 6. a non existent variable - assert_eq!(results[5], StatusCode::BadNodeIdUnknown); - // 7. wrong type for attribute - assert_eq!(results[6], StatusCode::BadTypeMismatch); - // 8. a data value with no value - assert_eq!(results[7], StatusCode::BadTypeMismatch); - - // OTHER POTENTIAL TESTS - - // distinguish between write and user write - // test max_age - }); -} - -#[test] -fn write_bytestring_to_byte_array() { - // This test checks that writing a byte string to a byte array variable works - do_attribute_service_test(|server_state, session, address_space, ats| { - // Create a variable that is an array of bytes - let node_id = NodeId::next_numeric(2); - { - let mut address_space = trace_write_lock!(address_space); - let _ = VariableBuilder::new(&node_id, var_name(0), "") - .data_type(DataTypeId::Byte) - .value_rank(1) - .value(vec![0u8; 16]) - .organized_by(ObjectId::RootFolder) - .writable() - .insert(&mut address_space); - } - - let bytes = ByteString::from(vec![0x1u8, 0x2u8, 0x3u8, 0x4u8]); - let nodes_to_write = vec![write_value( - &node_id, - AttributeId::Value, - DataValue::new_now(bytes), - )]; - - // Do a write - let response = write_request( - server_state, - session, - address_space.clone(), - ats, - nodes_to_write, - ); - let results = response.results.unwrap(); - - // Expect the write to have succeeded - assert_eq!(results[0], StatusCode::Good); - - // Test the node expecting it to be an array with 4 Byte values - validate_variable_value(address_space, &node_id, |value| match value { - Variant::Array(array) => { - let values = &array.values; - assert_eq!(values.len(), 4); - assert_eq!(values[0], Variant::Byte(0x1u8)); - assert_eq!(values[1], Variant::Byte(0x2u8)); - assert_eq!(values[2], Variant::Byte(0x3u8)); - assert_eq!(values[3], Variant::Byte(0x4u8)); - } - _ => panic!(), - }); - }); -} - -#[test] -fn write_index_range() { - // Test that writing to an index in an array works - do_attribute_service_test(|server_state, session, address_space, ats| { - // Create a variable that is an array of bytes - let node_id_1 = NodeId::next_numeric(2); - let node_id_2 = NodeId::next_numeric(2); - - [&node_id_1, &node_id_2] - .iter() - .enumerate() - .for_each(|(i, node_id)| { - let mut address_space = trace_write_lock!(address_space); - let _ = VariableBuilder::new(node_id, var_name(i), "") - .data_type(DataTypeId::Byte) - .value_rank(1) - .value(vec![0u8; 16]) - .organized_by(ObjectId::RootFolder) - .writable() - .insert(&mut address_space); - }); - - let index: usize = 12; - let index_expected_value = 73u8; - let index_bytes = Variant::from(vec![index_expected_value]); - - let (range_min, range_max) = (4 as usize, 12 as usize); - let range_bytes = vec![ - 0x1u8, 0x2u8, 0x3u8, 0x4u8, 0x5u8, 0x6u8, 0x7u8, 0x8u8, 0x9u8, - ]; - let range_value = Variant::from(range_bytes.clone()); - - let nodes_to_write = vec![ - write_value_index_range( - &node_id_1, - AttributeId::Value, - format!("{}", index), - DataValue::new_now(index_bytes), - ), - write_value_index_range( - &node_id_2, - AttributeId::Value, - format!("{}:{}", range_min, range_max), - DataValue::new_now(range_value), - ), - ]; - - // Do a write - let response = write_request( - server_state, - session, - address_space.clone(), - ats, - nodes_to_write, - ); - let results = response.results.unwrap(); - - // Expect the write to have succeeded - assert_eq!(results[0], StatusCode::Good); - assert_eq!(results[1], StatusCode::Good); - - validate_variable_value(address_space.clone(), &node_id_1, |value| { - match value { - Variant::Array(array) => { - let values = &array.values; - assert_eq!(values.len(), 16); - values.iter().enumerate().for_each(|(i, v)| { - // Only one element set, others should not be set - let expected = if i == index { - index_expected_value - } else { - 0u8 - }; - assert_eq!(*v, Variant::Byte(expected)); - }); - } - _ => panic!(), - } - }); - - validate_variable_value(address_space, &node_id_2, |value| { - match value { - Variant::Array(array) => { - let values = &array.values; - assert_eq!(values.len(), 16); - // Inside the range, expect the values - values.iter().enumerate().for_each(|(i, v)| { - let expected = if i >= range_min && i <= range_max { - range_bytes[i - range_min] - } else { - 0u8 - }; - assert_eq!(*v, Variant::Byte(expected)); - }); - } - _ => panic!(), - } - }); - }); -} - -// #[test] fn write_null_value() { /* Write an empty variant to a value and see that it is allowed */} - -struct DataProvider; - -impl HistoricalDataProvider for DataProvider { - fn read_raw_modified_details( - &self, - _address_space: Arc>, - _request: ReadRawModifiedDetails, - _timestamps_to_return: TimestampsToReturn, - _release_continuation_points: bool, - _nodes_to_read: &[HistoryReadValueId], - ) -> Result, StatusCode> { - info!("DataProvider's read_raw_modified_details"); - Ok(DataProvider::historical_read_result()) - } - - fn delete_raw_modified_details( - &self, - _address_space: Arc>, - _request: DeleteRawModifiedDetails, - ) -> Result, StatusCode> { - info!("DataProvider's delete_raw_modified_details"); - Ok(vec![StatusCode::Good]) - } -} - -impl DataProvider { - pub fn historical_read_result() -> Vec { - vec![HistoryReadResult { - status_code: StatusCode::Good, - continuation_point: ByteString::null(), - history_data: ExtensionObject::null(), - }] - } -} - -fn nodes_to_read() -> Vec { - vec![HistoryReadValueId { - node_id: NodeId::new(2, "test"), - index_range: UAString::null(), - data_encoding: QualifiedName::null(), // TODO - continuation_point: ByteString::null(), - }] -} - -fn read_raw_modified_details() -> ReadRawModifiedDetails { - // Register a history data provider - let now = chrono::Utc::now(); - let start_time = (now - Duration::days(5)).into(); - let end_time = now.into(); - - ReadRawModifiedDetails { - is_read_modified: true, - start_time, - end_time, - num_values_per_node: 100u32, - return_bounds: true, - } -} - -#[test] -fn history_read_nothing_to_do_1() { - do_attribute_service_test(|server_state, session, address_space, ats| { - // Register a history data provider - // Send a valid read details command but with no nodes to read - let read_raw_modified_details = read_raw_modified_details(); - let history_read_details = ExtensionObject::from_encodable( - ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, - &read_raw_modified_details, - ); - let request = HistoryReadRequest { - request_header: make_request_header(), - history_read_details, - timestamps_to_return: TimestampsToReturn::Both, - release_continuation_points: true, - nodes_to_read: None, - }; - let response: ServiceFault = supported_message_as!( - ats.history_read(server_state, session, address_space.clone(), &request), - ServiceFault - ); - assert_eq!( - response.response_header.service_result, - StatusCode::BadNothingToDo - ); - }); -} - -#[test] -fn history_read_nothing_history_operation_invalid() { - do_attribute_service_test(|server_state, session, address_space, ats| { - // Send a command with an invalid extension object - let request = HistoryReadRequest { - request_header: make_request_header(), - history_read_details: ExtensionObject::null(), - timestamps_to_return: TimestampsToReturn::Both, - release_continuation_points: true, - nodes_to_read: Some(nodes_to_read()), - }; - let response: ServiceFault = supported_message_as!( - ats.history_read(server_state, session, address_space, &request), - ServiceFault - ); - assert_eq!( - response.response_header.service_result, - StatusCode::BadHistoryOperationInvalid - ); - }); -} - -#[test] -fn history_read_nothing_data_provider() { - do_attribute_service_test(|server_state, session, address_space, ats| { - { - let mut server_state = server_state.write(); - let data_provider = DataProvider; - server_state.set_historical_data_provider(Box::new(data_provider)); - } - - // Call ReadRawModifiedDetails on the registered callback and expect a call back - let read_raw_modified_details = read_raw_modified_details(); - let history_read_details = ExtensionObject::from_encodable( - ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, - &read_raw_modified_details, - ); - let request = HistoryReadRequest { - request_header: make_request_header(), - history_read_details, - timestamps_to_return: TimestampsToReturn::Both, - release_continuation_points: true, - nodes_to_read: Some(nodes_to_read()), - }; - let response: HistoryReadResponse = supported_message_as!( - ats.history_read(server_state, session, address_space, &request), - HistoryReadResponse - ); - let expected_read_result = DataProvider::historical_read_result(); - assert_eq!(response.results, Some(expected_read_result)); - }); -} - -fn delete_raw_modified_details() -> DeleteRawModifiedDetails { - let now = chrono::Utc::now(); - let start_time = (now - Duration::days(5)).into(); - let end_time = now.into(); - DeleteRawModifiedDetails { - node_id: NodeId::new(2, 100), - is_delete_modified: true, - start_time, - end_time, - } -} - -#[test] -fn history_update_nothing_to_do_1() { - do_attribute_service_test(|server_state, session, address_space, ats| { - // Nothing to do - let request = HistoryUpdateRequest { - request_header: make_request_header(), - history_update_details: None, - }; - let response: ServiceFault = supported_message_as!( - ats.history_update(server_state, session, address_space, &request), - ServiceFault - ); - assert_eq!( - response.response_header.service_result, - StatusCode::BadNothingToDo - ); - }); -} - -#[test] -fn history_update_nothing_to_do_2() { - do_attribute_service_test(|server_state, session, address_space, ats| { - // Nothing to do /2 - let request = HistoryUpdateRequest { - request_header: make_request_header(), - history_update_details: Some(vec![]), - }; - let response: ServiceFault = supported_message_as!( - ats.history_update(server_state, session, address_space, &request), - ServiceFault - ); - assert_eq!( - response.response_header.service_result, - StatusCode::BadNothingToDo - ); - }); -} - -#[test] -fn history_update_history_operation_invalid() { - do_attribute_service_test(|server_state, session, address_space, ats| { - // Invalid extension object - let request = HistoryUpdateRequest { - request_header: make_request_header(), - history_update_details: Some(vec![ExtensionObject::null()]), - }; - let response: HistoryUpdateResponse = supported_message_as!( - ats.history_update(server_state, session, address_space, &request), - HistoryUpdateResponse - ); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - - let result1 = &results[0]; - assert_eq!(result1.status_code, StatusCode::BadHistoryOperationInvalid); - }); -} - -#[test] -fn history_update_history_operation_unsupported() { - do_attribute_service_test(|server_state, session, address_space, ats| { - // Create an update action - let delete_raw_modified_details = delete_raw_modified_details(); - - // Unsupported operation (everything by default) - let history_update_details = ExtensionObject::from_encodable( - ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, - &delete_raw_modified_details, - ); - let request = HistoryUpdateRequest { - request_header: make_request_header(), - history_update_details: Some(vec![history_update_details]), - }; - let response: HistoryUpdateResponse = supported_message_as!( - ats.history_update(server_state, session, address_space, &request), - HistoryUpdateResponse - ); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - - let result1 = &results[0]; - assert_eq!( - result1.status_code, - StatusCode::BadHistoryOperationUnsupported - ); - }); -} - -#[test] -fn history_update_data_provider() { - do_attribute_service_test(|server_state, session, address_space, ats| { - // Register a data provider - { - let mut server_state = server_state.write(); - let data_provider = DataProvider; - server_state.set_historical_data_provider(Box::new(data_provider)); - } - - let delete_raw_modified_details = delete_raw_modified_details(); - - // Supported operation - let history_update_details = ExtensionObject::from_encodable( - ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, - &delete_raw_modified_details, - ); - let request = HistoryUpdateRequest { - request_header: make_request_header(), - history_update_details: Some(vec![history_update_details]), - }; - let response: HistoryUpdateResponse = supported_message_as!( - ats.history_update(server_state, session, address_space, &request), - HistoryUpdateResponse - ); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - - let result1 = &results[0]; - assert_eq!(result1.status_code, StatusCode::Good); - }); -} diff --git a/lib/src/server/tests/services/discovery.rs b/lib/src/server/tests/services/discovery.rs deleted file mode 100644 index 1553b493e..000000000 --- a/lib/src/server/tests/services/discovery.rs +++ /dev/null @@ -1,122 +0,0 @@ -use crate::server::services::discovery::DiscoveryService; -use crate::supported_message_as; -use crate::sync::*; -use crate::types::UAString; - -use super::*; - -fn do_discovery_service_test(f: F) -where - F: FnOnce(Arc>, Arc>, &DiscoveryService), -{ - let st = ServiceTest::new(); - let (server_state, session) = st.get_server_state_and_session(); - - let ds = DiscoveryService::new(); - - f(server_state, session, &ds); -} - -#[test] -fn get_endpoints() { - do_discovery_service_test(|server_state, _session, ds| { - let request = GetEndpointsRequest { - request_header: make_request_header(), - endpoint_url: UAString::from("opc.tcp://localhost:4855/"), - locale_ids: None, - profile_uris: None, - }; - - let result = ds.get_endpoints(server_state, &request); - let result = supported_message_as!(result, GetEndpointsResponse); - - // Verify endpoints - let endpoints = result.endpoints.unwrap(); - assert!(!endpoints.is_empty()); - - debug!("Endpoints = {:#?}", endpoints); - }); -} - -#[test] -fn find_servers() { - do_discovery_service_test(|server_state, _session, ds| { - // This is a very basic test - let request = FindServersRequest { - request_header: make_request_header(), - endpoint_url: Default::default(), - locale_ids: None, - server_uris: None, - }; - let result = ds.find_servers(server_state, &request); - - let response = supported_message_as!(result, FindServersResponse); - let servers = response.servers.unwrap(); - assert_eq!(servers.len(), 1); - - // Verify application servers have the fields we expect - servers.iter().for_each(|s| { - let discovery_urls = s.discovery_urls.as_ref().unwrap(); - assert!(!discovery_urls.is_empty()); - assert_eq!(s.application_type, ApplicationType::Server); - assert_eq!(s.application_name.text.as_ref(), "OPC UA Sample Server"); - assert_eq!(s.application_uri.as_ref(), "urn:OPC UA Sample Server"); - assert_eq!(s.product_uri.as_ref(), "urn:OPC UA Sample Server Testkit"); - }); - - // TODO other requests should exercise those filters - }); -} - -#[test] -fn discovery_test() { - do_discovery_service_test(|server_state, _session, ds| { - let endpoint_url = UAString::from("opc.tcp://localhost:4855/"); - { - let request = GetEndpointsRequest { - request_header: make_request_header(), - endpoint_url: endpoint_url.clone(), - locale_ids: None, - profile_uris: None, - }; - - let result = ds.get_endpoints(server_state.clone(), &request); - let result = supported_message_as!(result, GetEndpointsResponse); - - // Verify endpoints - let endpoints = result.endpoints.unwrap(); - assert!(!endpoints.is_empty()); - assert_eq!(endpoints.len(), 12); - } - - // specify profile ids in request - { - // Enter some nonsensical profile uris and expect nothing back - let profile_uris = vec![UAString::from("xxxxxx")]; - let request = GetEndpointsRequest { - request_header: make_request_header(), - endpoint_url: endpoint_url.clone(), - locale_ids: None, - profile_uris: Some(profile_uris), - }; - let result = ds.get_endpoints(server_state.clone(), &request); - let result = supported_message_as!(result, GetEndpointsResponse); - assert!(result.endpoints.is_none()); - - // Enter the binary transport profile and expect the endpoints - let profile_uris = vec![UAString::from( - "http://opcfoundation.org/UA-Profile/Transport/uatcp-uasc-uabinary", - )]; - let request = GetEndpointsRequest { - request_header: make_request_header(), - endpoint_url: endpoint_url.clone(), - locale_ids: None, - profile_uris: Some(profile_uris), - }; - let result = ds.get_endpoints(server_state.clone(), &request); - let result = supported_message_as!(result, GetEndpointsResponse); - let endpoints = result.endpoints.unwrap(); - assert!(!endpoints.is_empty()) - } - }); -} diff --git a/lib/src/server/tests/services/method.rs b/lib/src/server/tests/services/method.rs deleted file mode 100644 index 2ffbc02f7..000000000 --- a/lib/src/server/tests/services/method.rs +++ /dev/null @@ -1,392 +0,0 @@ -use crate::supported_message_as; -use crate::sync::*; -use crate::types::{ - node_ids::{MethodId, ObjectId}, - service_types::{CallMethodRequest, CallMethodResult, CallRequest, CallResponse}, - status_code::StatusCode, -}; - -use crate::server::services::{ - method::MethodService, monitored_item::MonitoredItemService, subscription::SubscriptionService, -}; - -use super::*; - -fn do_method_service_test(f: F) -where - F: FnOnce( - Arc>, - Arc>, - Arc>, - Arc>, - &MethodService, - ), -{ - let st = ServiceTest::new(); - - let s = MethodService::new(); - - let (server_state, session) = st.get_server_state_and_session(); - let address_space = st.address_space.clone(); - let session_manager = st.session_manager.clone(); - f(server_state, session_manager, session, address_space, &s); -} - -fn new_call_method_request( - object_id: S, - method_id: T, - input_arguments: Option>, -) -> CallMethodRequest -where - S: Into, - T: Into, -{ - CallMethodRequest { - object_id: object_id.into(), - method_id: method_id.into(), - input_arguments, - } -} - -fn create_subscription_request() -> CreateSubscriptionRequest { - CreateSubscriptionRequest { - request_header: RequestHeader::dummy(), - requested_publishing_interval: 100f64, - requested_lifetime_count: 100, - requested_max_keep_alive_count: 100, - max_notifications_per_publish: 5, - publishing_enabled: true, - priority: 0, - } -} - -fn create_monitored_items_request( - subscription_id: u32, - client_handle: u32, - node_id: T, -) -> CreateMonitoredItemsRequest -where - T: 'static + Into, -{ - CreateMonitoredItemsRequest { - request_header: RequestHeader::dummy(), - subscription_id, - timestamps_to_return: TimestampsToReturn::Both, - items_to_create: Some(vec![MonitoredItemCreateRequest { - item_to_monitor: ReadValueId { - node_id: node_id.into(), - attribute_id: AttributeId::Value as u32, - index_range: UAString::null(), - data_encoding: QualifiedName::null(), - }, - monitoring_mode: MonitoringMode::Reporting, - requested_parameters: MonitoringParameters { - client_handle, - sampling_interval: 0.1, - filter: ExtensionObject::null(), - queue_size: 1, - discard_oldest: true, - }, - }]), - } -} - -/// This is a convenience for tests -fn call_single( - s: &MethodService, - server_state: Arc>, - session_manager: Arc>, - session: Arc>, - address_space: Arc>, - request: CallMethodRequest, -) -> Result { - let session_id = { - let session = trace_read_lock!(session); - session.session_id().clone() - }; - let response = s.call( - server_state, - &session_id, - session_manager, - address_space, - &CallRequest { - request_header: RequestHeader::dummy(), - methods_to_call: Some(vec![request]), - }, - ); - let response: CallResponse = supported_message_as!(response, CallResponse); - Ok(response.results.unwrap().remove(0)) -} - -#[test] -fn call_getmonitoreditems_invalid_object_id() { - do_method_service_test(|server_state, session_manager, session, address_space, s| { - // Call without a valid object id - let request = - new_call_method_request(NodeId::null(), MethodId::Server_GetMonitoredItems, None); - let response = call_single( - s, - server_state, - session_manager, - session, - address_space, - request, - ) - .unwrap(); - assert_eq!(response.status_code, StatusCode::BadNodeIdUnknown); - }); -} - -#[test] -fn call_getmonitoreditems_invalid_method_id() { - do_method_service_test(|server_state, session_manager, session, address_space, s| { - // Call without a valid method id - let request = new_call_method_request(ObjectId::Server, NodeId::null(), None); - let response = call_single( - s, - server_state, - session_manager, - session, - address_space, - request, - ) - .unwrap(); - assert_eq!(response.status_code, StatusCode::BadMethodInvalid); - }); -} - -#[test] -fn call_getmonitoreditems_no_args() { - do_method_service_test(|server_state, session_manager, session, address_space, s| { - // Call without args - let request = - new_call_method_request(ObjectId::Server, MethodId::Server_GetMonitoredItems, None); - let response = call_single( - s, - server_state, - session_manager, - session, - address_space, - request, - ) - .unwrap(); - assert_eq!(response.status_code, StatusCode::BadArgumentsMissing); - }); -} - -#[test] -fn call_getmonitoreditems_too_many_args() { - do_method_service_test(|server_state, session_manager, session, address_space, s| { - // Call with too many args - let args: Vec = vec![100.into(), 100.into()]; - let request = new_call_method_request( - ObjectId::Server, - MethodId::Server_GetMonitoredItems, - Some(args), - ); - let response = call_single( - s, - server_state, - session_manager, - session, - address_space, - request, - ) - .unwrap(); - assert_eq!(response.status_code, StatusCode::BadTooManyArguments); - }); -} - -#[test] -fn call_getmonitoreditems_incorrect_args() { - do_method_service_test(|server_state, session_manager, session, address_space, s| { - // Call with incorrect arg - let args: Vec = vec![100u8.into()]; - let request = new_call_method_request( - ObjectId::Server, - MethodId::Server_GetMonitoredItems, - Some(args), - ); - let response = call_single( - s, - server_state, - session_manager, - session, - address_space, - request, - ) - .unwrap(); - assert_eq!(response.status_code, StatusCode::BadInvalidArgument); - }); -} - -#[test] -fn call_getmonitoreditems_invalid_subscription_id() { - do_method_service_test(|server_state, session_manager, session, address_space, s| { - // Call with invalid subscription id - let args: Vec = vec![100u32.into()]; - let request = new_call_method_request( - ObjectId::Server, - MethodId::Server_GetMonitoredItems, - Some(args), - ); - let response = call_single( - s, - server_state, - session_manager, - session, - address_space, - request, - ) - .unwrap(); - assert_eq!(response.status_code, StatusCode::BadSubscriptionIdInvalid); - }); -} - -#[test] -fn call_getmonitoreditems() { - do_method_service_test(|server_state, session_manager, session, address_space, s| { - // Call with valid subscription id - { - let ss = SubscriptionService::new(); - let mis = MonitoredItemService::new(); - - // Create a subscription with some monitored items where client handle is distinct - let subscription_id = { - let request = create_subscription_request(); - let response: CreateSubscriptionResponse = supported_message_as!( - ss.create_subscription(server_state.clone(), session.clone(), &request), - CreateSubscriptionResponse - ); - response.subscription_id - }; - - // Create a monitored item - let monitored_item_id = { - let request = create_monitored_items_request( - subscription_id, - 999, - VariableId::Server_ServerStatus_CurrentTime, - ); - let response: CreateMonitoredItemsResponse = supported_message_as!( - mis.create_monitored_items( - server_state.clone(), - session.clone(), - address_space.clone(), - &request - ), - CreateMonitoredItemsResponse - ); - response.results.unwrap()[0].monitored_item_id - }; - - // Call to get monitored items and verify handles - let args: Vec = vec![subscription_id.into()]; - let request = new_call_method_request( - ObjectId::Server, - MethodId::Server_GetMonitoredItems, - Some(args), - ); - let response = call_single( - s, - server_state.clone(), - session_manager.clone(), - session.clone(), - address_space.clone(), - request, - ) - .unwrap(); - assert_eq!(response.status_code, StatusCode::Good); - - // There should be two output args, each a vector of u32 - let mut result = response.output_arguments.unwrap(); - let server_handles = result.remove(0); - let client_handles = result.remove(0); - - if let Variant::Array(array) = server_handles { - let mut values = array.values; - assert_eq!(values.len(), 1); - assert_eq!(Variant::from(monitored_item_id), values.pop().unwrap()); - } else { - assert!(false); - } - - if let Variant::Array(array) = client_handles { - let mut values = array.values; - assert_eq!(values.len(), 1); - assert_eq!(Variant::from(999u32), values.pop().unwrap()); - } else { - assert!(false); - } - } - }); -} - -#[test] -fn call_resend_data() { - do_method_service_test(|server_state, session_manager, session, address_space, s| { - // Call without a valid object id - { - let request = - new_call_method_request(NodeId::null(), MethodId::Server_ResendData, None); - let response = call_single( - s, - server_state.clone(), - session_manager.clone(), - session.clone(), - address_space.clone(), - request, - ) - .unwrap(); - assert_eq!(response.status_code, StatusCode::BadNodeIdUnknown); - } - - // Call with invalid subscription id - { - let args: Vec = vec![100u32.into()]; - let request = - new_call_method_request(ObjectId::Server, MethodId::Server_ResendData, Some(args)); - let response = call_single( - s, - server_state.clone(), - session_manager.clone(), - session.clone(), - address_space.clone(), - request, - ) - .unwrap(); - assert_eq!(response.status_code, StatusCode::BadSubscriptionIdInvalid); - } - - // Call with valid subscription id - { - let ss = SubscriptionService::new(); - let _mis = MonitoredItemService::new(); - - // Create a subscription with some monitored items where client handle is distinct - let subscription_id = { - let request = create_subscription_request(); - let response: CreateSubscriptionResponse = supported_message_as!( - ss.create_subscription(server_state.clone(), session.clone(), &request), - CreateSubscriptionResponse - ); - response.subscription_id - }; - - // Call to get monitored items and verify handles - let args: Vec = vec![subscription_id.into()]; - let request = - new_call_method_request(ObjectId::Server, MethodId::Server_ResendData, Some(args)); - let response = call_single( - s, - server_state.clone(), - session_manager.clone(), - session.clone(), - address_space.clone(), - request, - ) - .unwrap(); - assert_eq!(response.status_code, StatusCode::Good); - } - }); -} diff --git a/lib/src/server/tests/services/mod.rs b/lib/src/server/tests/services/mod.rs deleted file mode 100644 index 1b7ac4c5e..000000000 --- a/lib/src/server/tests/services/mod.rs +++ /dev/null @@ -1,176 +0,0 @@ -use std::sync::Arc; - -use crate::server::{ - prelude::*, - services::{monitored_item::MonitoredItemService, subscription::SubscriptionService}, - session::Session, - state::ServerState, - tests::*, -}; - -struct ServiceTest { - pub server_state: Arc>, - pub address_space: Arc>, - pub session: Arc>, - pub session_manager: Arc>, -} - -impl ServiceTest { - pub fn new() -> ServiceTest { - Self::new_with_server(ServerBuilder::new_sample()) - } - - pub fn new_with_server(server_builder: ServerBuilder) -> ServiceTest { - let server = server_builder.server().unwrap(); - let server_state = server.server_state(); - let address_space = server.address_space(); - let session = Arc::new(RwLock::new(Session::new(server_state.clone()))); - let session_manager = Arc::new(RwLock::new(SessionManager::default())); - - { - let mut session_manager = trace_write_lock!(session_manager); - session_manager.register_session(session.clone()); - } - - ServiceTest { - server_state, - address_space, - session, - session_manager, - } - } - - pub fn get_server_state_and_session(&self) -> (Arc>, Arc>) { - (self.server_state.clone(), self.session.clone()) - } -} - -fn make_request_header() -> RequestHeader { - RequestHeader { - authentication_token: NodeId::new(0, 99), - timestamp: DateTime::now(), - request_handle: 1, - return_diagnostics: DiagnosticBits::empty(), - audit_entry_id: UAString::null(), - timeout_hint: 123456, - additional_header: ExtensionObject::null(), - } -} - -fn var_name(idx: usize) -> String { - format!("v{}", idx) -} - -fn var_node_id(idx: usize) -> NodeId { - NodeId::new(1, var_name(idx)) -} - -fn add_many_vars_to_address_space( - address_space: Arc>, - vars_to_add: usize, -) -> (NodeId, Vec) { - let mut address_space = trace_write_lock!(address_space); - - // Create a sample folder under objects folder - let sample_folder_id = address_space - .add_folder("Many Vars", "Many Vars", &NodeId::objects_folder_id()) - .unwrap(); - - // Add as a bunch of sequential vars to the folder - let node_ids: Vec = (0..vars_to_add) - .map(|i| { - let node_id = var_node_id(i); - let _ = VariableBuilder::new(&node_id, var_name(i), "") - .data_type(DataTypeId::Int32) - .organized_by(&sample_folder_id) - .value(i as i32) - .insert(&mut address_space); - node_id - }) - .collect(); - - (sample_folder_id, node_ids) -} - -/// A helper that sets up a subscription service test -fn do_subscription_service_test(f: T) -where - T: FnOnce( - Arc>, - Arc>, - Arc>, - SubscriptionService, - MonitoredItemService, - ), -{ - let st = ServiceTest::new(); - add_many_vars_to_address_space(st.address_space.clone(), 100); - f( - st.server_state.clone(), - st.session.clone(), - st.address_space.clone(), - SubscriptionService::new(), - MonitoredItemService::new(), - ); -} - -/// Creates a blank subscription request -fn create_subscription_request( - max_keep_alive_count: u32, - lifetime_count: u32, -) -> CreateSubscriptionRequest { - CreateSubscriptionRequest { - request_header: RequestHeader::dummy(), - requested_publishing_interval: 100f64, - requested_lifetime_count: lifetime_count, - requested_max_keep_alive_count: max_keep_alive_count, - max_notifications_per_publish: 5, - publishing_enabled: true, - priority: 0, - } -} - -/// Creates a monitored item request -fn create_monitored_items_request( - subscription_id: u32, - node_id: Vec, -) -> CreateMonitoredItemsRequest -where - T: Into, -{ - let items_to_create = Some( - node_id - .into_iter() - .enumerate() - .map(|i| { - let node_id: NodeId = i.1.into(); - MonitoredItemCreateRequest { - item_to_monitor: node_id.into(), - monitoring_mode: MonitoringMode::Reporting, - requested_parameters: MonitoringParameters { - client_handle: i.0 as u32, - sampling_interval: 0.1, - filter: ExtensionObject::null(), - queue_size: 1, - discard_oldest: true, - }, - } - }) - .collect::>(), - ); - CreateMonitoredItemsRequest { - request_header: RequestHeader::dummy(), - subscription_id, - timestamps_to_return: TimestampsToReturn::Both, - items_to_create, - } -} - -pub mod attribute; -pub mod discovery; -pub mod method; -pub mod monitored_item; -pub mod node_management; -pub mod session; -pub mod subscription; -pub mod view; diff --git a/lib/src/server/tests/services/monitored_item.rs b/lib/src/server/tests/services/monitored_item.rs deleted file mode 100644 index 24fc49bd4..000000000 --- a/lib/src/server/tests/services/monitored_item.rs +++ /dev/null @@ -1,1025 +0,0 @@ -use std::collections::HashSet; -use std::ops::Add; - -use chrono::{self, Utc}; - -use super::*; -use crate::{ - server::{ - services::{monitored_item::MonitoredItemService, subscription::SubscriptionService}, - subscriptions::{ - monitored_item::*, - subscription::{SubscriptionState, TickReason}, - }, - }, - supported_message_as, -}; - -fn test_var_node_id() -> NodeId { - NodeId::new(1, 1) -} - -fn test_object_node_id() -> NodeId { - NodeId::new(1, 1000) -} - -fn make_address_space() -> AddressSpace { - let mut address_space = AddressSpace::new(); - - (1..=5).for_each(|i| { - let id = format!("test{}", i); - VariableBuilder::new(&NodeId::new(1, i), &id, &id) - .data_type(DataTypeId::UInt32) - .value(0u32) - .organized_by(ObjectId::ObjectsFolder) - .insert(&mut address_space); - }); - - // An object for event filter - ObjectBuilder::new(&test_object_node_id(), "Object1", "") - .organized_by(ObjectId::ObjectsFolder) - .event_notifier(EventNotifier::SUBSCRIBE_TO_EVENTS) - .insert(&mut address_space); - - address_space -} - -fn make_create_request( - sampling_interval: f64, - queue_size: u32, - node_id: NodeId, - attribute_id: AttributeId, - filter: ExtensionObject, -) -> MonitoredItemCreateRequest { - MonitoredItemCreateRequest { - item_to_monitor: ReadValueId { - node_id, - attribute_id: attribute_id as u32, - index_range: UAString::null(), - data_encoding: QualifiedName::null(), - }, - monitoring_mode: MonitoringMode::Reporting, - requested_parameters: MonitoringParameters { - client_handle: 999, - sampling_interval, - filter, - queue_size, - discard_oldest: true, - }, - } -} - -fn make_create_request_data_change_filter( - sampling_interval: f64, - queue_size: u32, -) -> MonitoredItemCreateRequest { - // Encode a filter to an extension object - let filter = ExtensionObject::from_encodable( - ObjectId::DataChangeFilter_Encoding_DefaultBinary, - &DataChangeFilter { - trigger: DataChangeTrigger::StatusValueTimestamp, - deadband_type: DeadbandType::None as u32, - deadband_value: 0f64, - }, - ); - make_create_request( - sampling_interval, - queue_size, - test_var_node_id(), - AttributeId::Value, - filter, - ) -} - -fn make_create_request_event_filter( - sampling_interval: f64, - queue_size: u32, -) -> MonitoredItemCreateRequest { - let filter = ExtensionObject::from_encodable( - ObjectId::EventFilter_Encoding_DefaultBinary, - &EventFilter { - where_clause: ContentFilter { elements: None }, - select_clauses: Some(vec![ - SimpleAttributeOperand::new( - ObjectTypeId::BaseEventType, - "EventId", - AttributeId::Value, - UAString::null(), - ), - SimpleAttributeOperand::new( - ObjectTypeId::BaseEventType, - "SourceNode", - AttributeId::Value, - UAString::null(), - ), - ]), - }, - ); - make_create_request( - sampling_interval, - queue_size, - test_object_node_id(), - AttributeId::EventNotifier, - filter, - ) -} - -fn set_monitoring_mode( - session: Arc>, - subscription_id: u32, - monitored_item_id: u32, - monitoring_mode: MonitoringMode, - mis: &MonitoredItemService, -) { - let request = SetMonitoringModeRequest { - request_header: RequestHeader::dummy(), - subscription_id, - monitoring_mode, - monitored_item_ids: Some(vec![monitored_item_id]), - }; - let response: SetMonitoringModeResponse = supported_message_as!( - mis.set_monitoring_mode(session, &request), - SetMonitoringModeResponse - ); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - assert_eq!(results[0], StatusCode::Good); -} - -fn set_triggering( - session: Arc>, - subscription_id: u32, - monitored_item_id: u32, - links_to_add: &[u32], - links_to_remove: &[u32], - mis: &MonitoredItemService, -) -> (Option>, Option>) { - let request = SetTriggeringRequest { - request_header: RequestHeader::dummy(), - subscription_id, - triggering_item_id: monitored_item_id, - links_to_add: if links_to_add.is_empty() { - None - } else { - Some(links_to_add.to_vec()) - }, - links_to_remove: if links_to_remove.is_empty() { - None - } else { - Some(links_to_remove.to_vec()) - }, - }; - let response: SetTriggeringResponse = - supported_message_as!(mis.set_triggering(session, &request), SetTriggeringResponse); - (response.add_results, response.remove_results) -} - -fn publish_request( - now: &DateTimeUtc, - session: Arc>, - address_space: Arc>, - ss: &SubscriptionService, -) { - let request_id = 1001; - let request = PublishRequest { - request_header: RequestHeader::dummy(), - subscription_acknowledgements: None, - }; - - { - let mut session = trace_write_lock!(session); - session.subscriptions_mut().publish_request_queue().clear(); - } - - let response = ss.async_publish( - now, - session.clone(), - address_space.clone(), - request_id, - &request, - ); - assert!(response.is_none()); - - let mut session = trace_write_lock!(session); - assert!(!session - .subscriptions_mut() - .publish_request_queue() - .is_empty()); -} - -fn publish_response(session: Arc>) -> PublishResponse { - let mut session = trace_write_lock!(session); - let response = session - .subscriptions_mut() - .publish_response_queue() - .pop_back() - .unwrap() - .response; - let response: PublishResponse = supported_message_as!(response, PublishResponse); - response -} - -fn publish_tick_no_response( - session: Arc>, - ss: &SubscriptionService, - address_space: Arc>, - now: DateTimeUtc, - duration: chrono::Duration, -) -> DateTimeUtc { - publish_request(&now, session.clone(), address_space.clone(), ss); - let now = now.add(duration); - let mut session = trace_write_lock!(session); - let address_space = trace_read_lock!(address_space); - let _ = session.tick_subscriptions(&now, &address_space, TickReason::TickTimerFired); - assert_eq!( - session.subscriptions_mut().publish_response_queue().len(), - 0 - ); - now -} - -/// Does a publish, ticks by a duration and then calls the function to handle the response. The -/// new timestamp is returned so it can be called again. -fn publish_tick_response( - session: Arc>, - ss: &SubscriptionService, - address_space: Arc>, - now: DateTimeUtc, - duration: chrono::Duration, - handler: T, -) -> DateTimeUtc -where - T: FnOnce(PublishResponse), -{ - publish_request(&now, session.clone(), address_space.clone(), ss); - let now = now.add(duration); - { - let mut session = trace_write_lock!(session); - let address_space = trace_read_lock!(address_space); - let _ = session.tick_subscriptions(&now, &address_space, TickReason::TickTimerFired); - assert_eq!( - session.subscriptions_mut().publish_response_queue().len(), - 1 - ); - } - let response = publish_response(session.clone()); - handler(response); - now -} - -fn populate_monitored_item(server_state: &ServerState, discard_oldest: bool) -> MonitoredItem { - let client_handle = 999; - let mut monitored_item = MonitoredItem::new( - &chrono::Utc::now(), - 1, - TimestampsToReturn::Both, - server_state, - &make_create_request_data_change_filter(-1f64, 5), - ) - .unwrap(); - monitored_item.set_discard_oldest(discard_oldest); - for i in 0..5 { - monitored_item.enqueue_notification_message(MonitoredItemNotification { - client_handle, - value: DataValue::new_now(i as i32), - }); - assert!(!monitored_item.queue_overflow()); - } - - monitored_item.enqueue_notification_message(MonitoredItemNotification { - client_handle, - value: DataValue::new_now(10 as i32), - }); - assert!(monitored_item.queue_overflow()); - monitored_item -} - -fn assert_first_notification_is_i32(monitored_item: &mut MonitoredItem, value: i32) { - let notification = monitored_item.oldest_notification_message().unwrap(); - if let Notification::MonitoredItemNotification(notification) = notification { - assert_eq!(notification.value.value.unwrap(), Variant::Int32(value)); - } else { - panic!(); - } -} - -#[test] -fn data_change_filter_test() { - let mut filter = DataChangeFilter { - trigger: DataChangeTrigger::Status, - deadband_type: DeadbandType::None as u32, - deadband_value: 0f64, - }; - - let mut v1 = DataValue { - value: None, - status: None, - source_timestamp: None, - source_picoseconds: None, - server_timestamp: None, - server_picoseconds: None, - }; - - let mut v2 = DataValue { - value: None, - status: None, - source_timestamp: None, - source_picoseconds: None, - server_timestamp: None, - server_picoseconds: None, - }; - - assert_eq!(filter.compare(&v1, &v2, None), true); - - // Change v1 status - v1.status = Some(StatusCode::Good); - assert_eq!(filter.compare(&v1, &v2, None), false); - - // Change v2 status - v2.status = Some(StatusCode::Good); - assert_eq!(filter.compare(&v1, &v2, None), true); - - // Change value - but since trigger is status, this should not matter - v1.value = Some(Variant::Boolean(true)); - assert_eq!(filter.compare(&v1, &v2, None), true); - - // Change trigger to status-value and change should matter - filter.trigger = DataChangeTrigger::StatusValue; - assert_eq!(filter.compare(&v1, &v2, None), false); - - // Now values are the same - v2.value = Some(Variant::Boolean(true)); - assert_eq!(filter.compare(&v1, &v2, None), true); - - // And for status-value-timestamp - filter.trigger = DataChangeTrigger::StatusValueTimestamp; - assert_eq!(filter.compare(&v1, &v2, None), true); - - // Change timestamps to differ - let now = DateTime::now(); - v1.server_timestamp = Some(now.clone()); - assert_eq!(filter.compare(&v1, &v2, None), false); -} - -#[test] -fn data_change_deadband_abs_test() { - let filter = DataChangeFilter { - trigger: DataChangeTrigger::StatusValue, - // Abs compare - deadband_type: DeadbandType::Absolute as u32, - deadband_value: 1f64, - }; - - let v1 = DataValue { - value: Some(Variant::Double(10f64)), - status: None, - source_timestamp: None, - source_picoseconds: None, - server_timestamp: None, - server_picoseconds: None, - }; - - let mut v2 = DataValue { - value: Some(Variant::Double(10f64)), - status: None, - source_timestamp: None, - source_picoseconds: None, - server_timestamp: None, - server_picoseconds: None, - }; - - // Values are the same so deadband should not matter - assert_eq!(filter.compare(&v1, &v2, None), true); - - // Adjust by less than deadband - v2.value = Some(Variant::Double(10.9f64)); - assert_eq!(filter.compare(&v1, &v2, None), true); - - // Adjust by equal deadband - v2.value = Some(Variant::Double(11f64)); - assert_eq!(filter.compare(&v1, &v2, None), true); - - // Adjust by equal deadband plus a little bit - v2.value = Some(Variant::Double(11.00001f64)); - assert_eq!(filter.compare(&v1, &v2, None), false); -} - -// Straight tests of abs function -#[test] -fn deadband_abs() { - assert_eq!(DataChangeFilter::abs_compare(100f64, 100f64, 0f64), true); - assert_eq!(DataChangeFilter::abs_compare(100f64, 100f64, 1f64), true); - assert_eq!(DataChangeFilter::abs_compare(100f64, 101f64, 1f64), true); - assert_eq!(DataChangeFilter::abs_compare(101f64, 100f64, 1f64), true); - assert_eq!( - DataChangeFilter::abs_compare(101.001f64, 100f64, 1f64), - false - ); - assert_eq!( - DataChangeFilter::abs_compare(100f64, 101.001f64, 1f64), - false - ); -} - -// Straight tests of pct function -#[test] -fn deadband_pct() { - assert_eq!( - DataChangeFilter::pct_compare(100f64, 101f64, 0f64, 100f64, 0f64), - false - ); - assert_eq!( - DataChangeFilter::pct_compare(100f64, 101f64, 0f64, 100f64, 1f64), - true - ); - assert_eq!( - DataChangeFilter::pct_compare(100f64, 101.0001f64, 0f64, 100f64, 1f64), - false - ); - assert_eq!( - DataChangeFilter::pct_compare(101.0001f64, 100f64, 0f64, 100f64, 1f64), - false - ); - assert_eq!( - DataChangeFilter::pct_compare(101.0001f64, 100f64, 0f64, 100f64, 1.0002f64), - true - ); -} - -#[test] -fn monitored_item_data_change_filter() { - // create an address space - do_subscription_service_test( - |server_state, - _session, - _address_space, - _ss: SubscriptionService, - _mis: MonitoredItemService| { - let mut address_space = make_address_space(); - let server_state = trace_read_lock!(server_state); - - // Create request should monitor attribute of variable, e.g. value - // Sample interval is negative so it will always test on repeated calls - let mut monitored_item = MonitoredItem::new( - &chrono::Utc::now(), - 1, - TimestampsToReturn::Both, - &server_state, - &make_create_request_data_change_filter(-1f64, 5), - ) - .unwrap(); - - let now = Utc::now(); - - assert_eq!(monitored_item.notification_queue().len(), 0); - - // Expect first call to always succeed - assert_eq!( - monitored_item.tick(&now, &address_space, true, false), - TickResult::ReportValueChanged - ); - - // Expect one item in its queue - assert_eq!(monitored_item.notification_queue().len(), 1); - - // Expect false on next tick, with the same value because no subscription timer has fired - assert_eq!( - monitored_item.tick(&now, &address_space, false, false), - TickResult::NoChange - ); - assert_eq!(monitored_item.notification_queue().len(), 1); - - // Expect false because publish timer elapses but value has not changed changed - assert_eq!( - monitored_item.tick(&now, &address_space, false, false), - TickResult::NoChange - ); - assert_eq!(monitored_item.notification_queue().len(), 1); - - // adjust variable value - if let &mut NodeType::Variable(ref mut node) = - address_space.find_node_mut(&test_var_node_id()).unwrap() - { - let _ = node - .set_value(NumericRange::None, Variant::UInt32(1)) - .unwrap(); - } else { - panic!("Expected a variable, didn't get one!!"); - } - - // Expect change but only when subscription timer elapsed - assert_eq!( - monitored_item.tick(&now, &address_space, false, false), - TickResult::NoChange - ); - assert_eq!( - monitored_item.tick(&now, &address_space, true, false), - TickResult::ReportValueChanged - ); - assert_eq!(monitored_item.notification_queue().len(), 2); - }, - ) -} - -#[test] -fn monitored_item_event_filter() { - // create an address space - do_subscription_service_test( - |server_state, - _session, - _address_space, - _ss: SubscriptionService, - _mis: MonitoredItemService| { - let mut address_space = make_address_space(); - let server_state = trace_read_lock!(server_state); - - let ns = address_space.register_namespace("urn:test").unwrap(); - - // Create request should monitor attribute of variable, e.g. value - // Sample interval is negative so it will always test on repeated calls - let mut monitored_item = MonitoredItem::new( - &chrono::Utc::now(), - 1, - TimestampsToReturn::Both, - &server_state, - &make_create_request_event_filter(-1f64, 5), - ) - .unwrap(); - - let mut now = Utc::now(); - - // Verify tick does nothing - assert_eq!( - monitored_item.tick(&now, &address_space, false, false), - TickResult::NoChange - ); - - now = now + chrono::Duration::milliseconds(100); - - // Raise an event - let event_id = NodeId::new(ns, "Event1"); - let event_type_id = ObjectTypeId::BaseEventType; - let mut event = BaseEventType::new( - &event_id, - event_type_id, - "Event1", - "", - NodeId::objects_folder_id(), - DateTime::from(now), - ) - .source_node(test_object_node_id()); - assert!(event.raise(&mut address_space).is_ok()); - - // Verify that event comes back - assert_eq!( - monitored_item.tick(&now, &address_space, true, false), - TickResult::ReportValueChanged - ); - - // Look at monitored item queue - assert_eq!(monitored_item.notification_queue().len(), 1); - let event = match monitored_item.oldest_notification_message().unwrap() { - Notification::Event(event) => event, - _ => panic!(), - }; - - // Verify EventFieldList - assert_eq!(event.client_handle, 999); - let mut event_fields = event.event_fields.unwrap(); - assert_eq!(event_fields.len(), 2); - - // EventId should be a ByteString, contents of which should be 16 bytes - let event_id = event_fields.remove(0); - match event_id { - Variant::ByteString(value) => assert_eq!(value.value.unwrap().len(), 16), - _ => panic!(), - } - - // Source node should point to the originating object - let event_source_node = event_fields.remove(0); - match event_source_node { - Variant::NodeId(source_node) => assert_eq!(*source_node, test_object_node_id()), - _ => panic!(), - } - - // Tick again (nothing expected) - now = now + chrono::Duration::milliseconds(100); - assert_eq!( - monitored_item.tick(&now, &address_space, false, false), - TickResult::NoChange - ); - - // Raise an event on another object, expect nothing in the tick about it - let event_id = NodeId::new(ns, "Event2"); - let event_type_id = ObjectTypeId::BaseEventType; - let mut event = BaseEventType::new( - &event_id, - event_type_id, - "Event2", - "", - NodeId::objects_folder_id(), - DateTime::from(now), - ) - .source_node(ObjectId::Server); - assert!(event.raise(&mut address_space).is_ok()); - now = now + chrono::Duration::milliseconds(100); - assert_eq!( - monitored_item.tick(&now, &address_space, false, false), - TickResult::NoChange - ); - }, - ); -} - -/// Test to ensure create monitored items returns an error for an unknown node id -#[test] -fn unknown_node_id() { - do_subscription_service_test( - |server_state, - session, - address_space, - ss: SubscriptionService, - mis: MonitoredItemService| { - // Create subscription - let subscription_id = { - let request = create_subscription_request(0, 0); - let response: CreateSubscriptionResponse = supported_message_as!( - ss.create_subscription(server_state.clone(), session.clone(), &request), - CreateSubscriptionResponse - ); - response.subscription_id - }; - - let request = create_monitored_items_request( - subscription_id, - vec![ - NodeId::new(1, var_name(1)), - NodeId::new(99, "Doesn't exist"), - ], - ); - - let response: CreateMonitoredItemsResponse = supported_message_as!( - mis.create_monitored_items( - server_state.clone(), - session.clone(), - address_space.clone(), - &request - ), - CreateMonitoredItemsResponse - ); - let results = response.results.unwrap(); - assert_eq!(results.len(), 2); - assert_eq!( - results.get(0).as_ref().unwrap().status_code, - StatusCode::Good - ); - assert_eq!( - results.get(1).as_ref().unwrap().status_code, - StatusCode::BadNodeIdUnknown - ); - }, - ); -} - -#[test] -fn monitored_item_triggers() { - do_subscription_service_test( - |server_state, - session, - address_space, - ss: SubscriptionService, - mis: MonitoredItemService| { - // Create subscription - let subscription_id = { - let request = create_subscription_request(0, 0); - let response: CreateSubscriptionResponse = supported_message_as!( - ss.create_subscription(server_state.clone(), session.clone(), &request), - CreateSubscriptionResponse - ); - response.subscription_id - }; - - { - let mut session = trace_write_lock!(session); - session - .subscriptions_mut() - .get_mut(subscription_id) - .unwrap() - .set_state(SubscriptionState::Normal); - } - - let max_monitored_items: usize = 4; - - let triggering_node = NodeId::new(1, var_name(0)); - // create 4 monitored items - let request = create_monitored_items_request( - subscription_id, - vec![ - triggering_node.clone(), - NodeId::new(1, var_name(1)), - NodeId::new(1, var_name(2)), - NodeId::new(1, var_name(3)), - ], - ); - let response: CreateMonitoredItemsResponse = supported_message_as!( - mis.create_monitored_items( - server_state.clone(), - session.clone(), - address_space.clone(), - &request - ), - CreateMonitoredItemsResponse - ); - - // The first monitored item will be the triggering item, the other 3 will be triggered items - let monitored_item_ids: Vec = response - .results - .unwrap() - .iter() - .map(|mir| { - assert_eq!(mir.status_code, StatusCode::Good); - mir.monitored_item_id - }) - .collect(); - assert_eq!(monitored_item_ids.len(), max_monitored_items); - - let triggering_item_id = monitored_item_ids[0]; - let triggered_item_ids = &monitored_item_ids[1..]; - - // set 3 monitored items to be reporting, sampling, disabled respectively - set_monitoring_mode( - session.clone(), - subscription_id, - triggered_item_ids[0], - MonitoringMode::Reporting, - &mis, - ); - set_monitoring_mode( - session.clone(), - subscription_id, - triggered_item_ids[1], - MonitoringMode::Sampling, - &mis, - ); - set_monitoring_mode( - session.clone(), - subscription_id, - triggered_item_ids[2], - MonitoringMode::Disabled, - &mis, - ); - - // set 1 monitored item to trigger other 3 plus itself - let (add_results, remove_results) = set_triggering( - session.clone(), - subscription_id, - monitored_item_ids[0], - &[ - monitored_item_ids[0], - monitored_item_ids[1], - monitored_item_ids[2], - monitored_item_ids[3], - ], - &[], - &mis, - ); - - // expect all adds to succeed except the one to itself - assert!(remove_results.is_none()); - let add_results = add_results.unwrap(); - assert_eq!(add_results[0], StatusCode::BadMonitoredItemIdInvalid); - assert_eq!(add_results[1], StatusCode::Good); - assert_eq!(add_results[2], StatusCode::Good); - assert_eq!(add_results[3], StatusCode::Good); - - let now = Utc::now(); - - // publish on the monitored item - let now = publish_tick_response( - session.clone(), - &ss, - address_space.clone(), - now, - chrono::Duration::seconds(2), - |response| { - let (notifications, events) = response - .notification_message - .notifications(&DecodingOptions::test()) - .unwrap(); - assert_eq!(notifications.len(), 1); - assert!(events.is_empty()); - let monitored_items = notifications[0].monitored_items.as_ref().unwrap(); - assert_eq!(monitored_items.len(), 3); - let client_handles: HashSet = monitored_items - .iter() - .map(|min| min.client_handle) - .collect(); - // expect a notification to be for triggering item - assert!(client_handles.contains(&0)); - // expect a notification to be for triggered[0] (reporting) because it's reporting - assert!(client_handles.contains(&1)); - // expect a notification to be for triggered[1] (sampling) - assert!(client_handles.contains(&2)); - }, - ); - - // do a publish on the monitored item, expect no notification because nothing has changed - let now = publish_tick_no_response( - session.clone(), - &ss, - address_space.clone(), - now, - chrono::Duration::seconds(2), - ); - - // set monitoring mode of all 3 to reporting. - set_monitoring_mode( - session.clone(), - subscription_id, - triggered_item_ids[0], - MonitoringMode::Reporting, - &mis, - ); - set_monitoring_mode( - session.clone(), - subscription_id, - triggered_item_ids[1], - MonitoringMode::Reporting, - &mis, - ); - set_monitoring_mode( - session.clone(), - subscription_id, - triggered_item_ids[2], - MonitoringMode::Reporting, - &mis, - ); - - // Change the triggering item's value - { - let mut address_space = trace_write_lock!(address_space); - let _ = address_space.set_variable_value( - triggering_node.clone(), - 1, - &DateTime::from(now.clone()), - &DateTime::from(now.clone()), - ); - } - - // In this case, the triggering item changes, but triggered items are all reporting so are ignored unless they themselves - // need to report. Only 3 will fire because it was disabled previously - let now = publish_tick_response( - session.clone(), - &ss, - address_space.clone(), - now, - chrono::Duration::seconds(2), - |response| { - let (notifications, events) = response - .notification_message - .notifications(&DecodingOptions::test()) - .unwrap(); - assert_eq!(notifications.len(), 1); - assert!(events.is_empty()); - let monitored_items = notifications[0].monitored_items.as_ref().unwrap(); - let client_handles: HashSet = monitored_items - .iter() - .map(|min| min.client_handle) - .collect(); - assert_eq!(monitored_items.len(), 2); - assert!(client_handles.contains(&0)); - assert!(client_handles.contains(&3)); - }, - ); - - // revert to 3 items to be reporting, sampling, disabled - set_monitoring_mode( - session.clone(), - subscription_id, - triggered_item_ids[0], - MonitoringMode::Reporting, - &mis, - ); - set_monitoring_mode( - session.clone(), - subscription_id, - triggered_item_ids[1], - MonitoringMode::Sampling, - &mis, - ); - set_monitoring_mode( - session.clone(), - subscription_id, - triggered_item_ids[2], - MonitoringMode::Disabled, - &mis, - ); - - // change monitoring mode of triggering item to sampling and change value - set_monitoring_mode( - session.clone(), - subscription_id, - triggering_item_id, - MonitoringMode::Sampling, - &mis, - ); - { - let mut address_space = trace_write_lock!(address_space); - let _ = address_space.set_variable_value( - triggering_node.clone(), - 2, - &DateTime::from(now.clone()), - &DateTime::from(now.clone()), - ); - } - - // do a publish on the monitored item, - let now = publish_tick_response( - session.clone(), - &ss, - address_space.clone(), - now, - chrono::Duration::seconds(2), - |response| { - // expect only 1 data change corresponding to sampling triggered item - let (notifications, events) = response - .notification_message - .notifications(&DecodingOptions::test()) - .unwrap(); - assert_eq!(notifications.len(), 1); - assert!(events.is_empty()); - let monitored_items = notifications[0].monitored_items.as_ref().unwrap(); - let client_handles: HashSet = monitored_items - .iter() - .map(|min| min.client_handle) - .collect(); - assert_eq!(monitored_items.len(), 1); - assert!(client_handles.contains(&2)); - }, - ); - - // change monitoring mode of triggering item to disable - set_monitoring_mode( - session.clone(), - subscription_id, - triggering_item_id, - MonitoringMode::Disabled, - &mis, - ); - { - let mut address_space = trace_write_lock!(address_space); - let _ = address_space.set_variable_value( - triggering_node.clone(), - 3, - &DateTime::from(now.clone()), - &DateTime::from(now.clone()), - ); - } - - // do a publish on the monitored item, expect 0 data changes - let _ = publish_tick_no_response( - session.clone(), - &ss, - address_space.clone(), - now, - chrono::Duration::seconds(2), - ); - }, - ); -} - -#[test] -fn monitored_item_queue_discard_oldest() { - // The purpose of this test is to monitor the discard oldest behaviour. Depending on true/false - // the oldest or newest item will be overwritten when the queue is full - - do_subscription_service_test( - |server_state, - _session, - _address_space, - _ss: SubscriptionService, - _mis: MonitoredItemService| { - let server_state = trace_read_lock!(server_state); - - // discard_oldest = true - { - let mut monitored_item = populate_monitored_item(&server_state, true); - assert_first_notification_is_i32(&mut monitored_item, 1); - assert_first_notification_is_i32(&mut monitored_item, 2); - assert_first_notification_is_i32(&mut monitored_item, 3); - assert_first_notification_is_i32(&mut monitored_item, 4); - assert_first_notification_is_i32(&mut monitored_item, 10); - } - - // discard_oldest = false - { - let mut monitored_item = populate_monitored_item(&server_state, false); - assert_first_notification_is_i32(&mut monitored_item, 0); - assert_first_notification_is_i32(&mut monitored_item, 1); - assert_first_notification_is_i32(&mut monitored_item, 2); - assert_first_notification_is_i32(&mut monitored_item, 3); - assert_first_notification_is_i32(&mut monitored_item, 10); - } - }, - ); -} diff --git a/lib/src/server/tests/services/node_management.rs b/lib/src/server/tests/services/node_management.rs deleted file mode 100644 index 23a8e9ba5..000000000 --- a/lib/src/server/tests/services/node_management.rs +++ /dev/null @@ -1,762 +0,0 @@ -use crate::server::services::node_management::NodeManagementService; -use crate::supported_message_as; -use crate::sync::*; -use crate::types::node_ids::{DataTypeId, MethodId, ObjectId, ObjectTypeId, ReferenceTypeId}; - -use super::*; - -/// A helper that sets up a subscription service test -fn do_node_management_service_test(can_modify_address_space: bool, f: T) -where - T: FnOnce( - Arc>, - Arc>, - Arc>, - NodeManagementService, - ), -{ - crate::console_logging::init(); - - let st = ServiceTest::new(); - - { - // Enable client side modification of address space - let mut session = trace_write_lock!(st.session); - session.set_can_modify_address_space(can_modify_address_space); - } - - let _ = add_many_vars_to_address_space(st.address_space.clone(), 10); - - f( - st.server_state.clone(), - st.session.clone(), - st.address_space.clone(), - NodeManagementService::new(), - ); -} - -// A helper that adds one node and tests that the result matches the expected status code -fn do_add_node_test_with_expected_error( - can_modify_address_space: bool, - item: AddNodesItem, - expected_status_code: StatusCode, -) { - do_node_management_service_test( - can_modify_address_space, - |server_state, session, address_space, nms| { - let response = nms.add_nodes( - server_state, - session, - address_space.clone(), - &AddNodesRequest { - request_header: RequestHeader::dummy(), - nodes_to_add: Some(vec![item]), - }, - ); - let response: AddNodesResponse = supported_message_as!(response, AddNodesResponse); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - assert_eq!( - format!("{}", results[0].status_code), - format!("{}", expected_status_code) - ); - if expected_status_code.is_good() { - assert_ne!(results[0].added_node_id, NodeId::null()); - let address_space = trace_read_lock!(address_space); - assert!(address_space.find_node(&results[0].added_node_id).is_some()); - } else { - assert_eq!(results[0].added_node_id, NodeId::null()); - } - }, - ); -} - -fn do_add_references_test( - can_modify_address_space: bool, - item: AddReferencesItem, - expected_status_code: StatusCode, -) { - do_node_management_service_test( - can_modify_address_space, - |server_state, session, address_space, nms| { - let response = nms.add_references( - server_state, - session, - address_space, - &AddReferencesRequest { - request_header: RequestHeader::dummy(), - references_to_add: Some(vec![item]), - }, - ); - let response: AddReferencesResponse = - supported_message_as!(response, AddReferencesResponse); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - assert_eq!( - format!("{}", results[0]), - format!("{}", expected_status_code) - ); - if expected_status_code.is_good() { - // TODO expect the reference to exist - } - }, - ); -} - -fn do_delete_nodes_test( - can_modify_address_space: bool, - item: DeleteNodesItem, - expected_status_code: StatusCode, -) { - do_node_management_service_test( - can_modify_address_space, - |server_state, session, address_space, nms| { - let response = nms.delete_nodes( - server_state, - session, - address_space, - &DeleteNodesRequest { - request_header: RequestHeader::dummy(), - nodes_to_delete: Some(vec![item]), - }, - ); - let response: DeleteNodesResponse = - supported_message_as!(response, DeleteNodesResponse); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - assert_eq!( - format!("{}", results[0]), - format!("{}", expected_status_code) - ); - }, - ); -} - -fn do_delete_references_test( - can_modify_address_space: bool, - item: DeleteReferencesItem, - expected_status_code: StatusCode, -) { - do_node_management_service_test( - can_modify_address_space, - |server_state, session, address_space, nms| { - let response = nms.delete_references( - server_state, - session, - address_space, - &DeleteReferencesRequest { - request_header: RequestHeader::dummy(), - references_to_delete: Some(vec![item]), - }, - ); - let response: DeleteReferencesResponse = - supported_message_as!(response, DeleteReferencesResponse); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - assert_eq!( - format!("{}", results[0]), - format!("{}", expected_status_code) - ); - }, - ); -} - -fn object_attributes(display_name: T) -> ExtensionObject -where - T: Into, -{ - let specified_attributes = AttributesMask::DISPLAY_NAME - | AttributesMask::DESCRIPTION - | AttributesMask::WRITE_MASK - | AttributesMask::USER_WRITE_MASK - | AttributesMask::EVENT_NOTIFIER; - - ExtensionObject::from_encodable( - ObjectId::ObjectAttributes_Encoding_DefaultBinary, - &ObjectAttributes { - specified_attributes: specified_attributes.bits(), - display_name: display_name.into(), - description: LocalizedText::new("", "description"), - write_mask: 0, - user_write_mask: 0, - event_notifier: 0, - }, - ) -} - -fn variable_attributes(display_name: T) -> ExtensionObject -where - T: Into, -{ - let specified_attributes = AttributesMask::DISPLAY_NAME - | AttributesMask::ACCESS_LEVEL - | AttributesMask::USER_ACCESS_LEVEL - | AttributesMask::DATA_TYPE - | AttributesMask::HISTORIZING - | AttributesMask::VALUE - | AttributesMask::VALUE_RANK; - - ExtensionObject::from_encodable( - ObjectId::VariableAttributes_Encoding_DefaultBinary, - &VariableAttributes { - specified_attributes: specified_attributes.bits(), - display_name: display_name.into(), - description: LocalizedText::null(), - write_mask: 0, - user_write_mask: 0, - value: Variant::from(true), - data_type: DataTypeId::Boolean.into(), - value_rank: 1, - array_dimensions: None, - access_level: 1, - user_access_level: 2, - minimum_sampling_interval: 0.0, - historizing: false, - }, - ) -} - -fn method_attributes(display_name: T) -> ExtensionObject -where - T: Into, -{ - let specified_attributes = - AttributesMask::DISPLAY_NAME | AttributesMask::EXECUTABLE | AttributesMask::USER_EXECUTABLE; - ExtensionObject::from_encodable( - ObjectId::MethodAttributes_Encoding_DefaultBinary, - &MethodAttributes { - specified_attributes: specified_attributes.bits(), - display_name: display_name.into(), - description: LocalizedText::null(), - write_mask: 0, - user_write_mask: 0, - executable: true, - user_executable: true, - }, - ) -} - -#[test] -fn add_nodes_nothing_to_do() { - // Empty request - do_node_management_service_test( - true, - |server_state, session, address_space, nms: NodeManagementService| { - let response = nms.add_nodes( - server_state.clone(), - session.clone(), - address_space.clone(), - &AddNodesRequest { - request_header: RequestHeader::dummy(), - nodes_to_add: None, - }, - ); - let response: ServiceFault = supported_message_as!(response, ServiceFault); - assert_eq!( - response.response_header.service_result, - StatusCode::BadNothingToDo - ); - - let response = nms.add_nodes( - server_state.clone(), - session.clone(), - address_space.clone(), - &AddNodesRequest { - request_header: RequestHeader::dummy(), - nodes_to_add: Some(vec![]), - }, - ); - let response: ServiceFault = supported_message_as!(response, ServiceFault); - assert_eq!( - response.response_header.service_result, - StatusCode::BadNothingToDo - ); - }, - ); -} - -#[test] -fn add_nodes_reference_type_id_invalid() { - // Add a node with a null requested node id - do_add_node_test_with_expected_error( - true, - AddNodesItem { - parent_node_id: NodeId::root_folder_id().into(), - reference_type_id: NodeId::null(), // !!! - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, - StatusCode::BadReferenceTypeIdInvalid, - ); -} - -#[test] -fn add_nodes_node_class_invalid() { - // Invalid class - do_add_node_test_with_expected_error( - true, - AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Unspecified, // !!! - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, - StatusCode::BadNodeClassInvalid, - ); -} - -#[test] -fn add_nodes_parent_node_id_invalid() { - // Add a node with an invalid parent id - do_add_node_test_with_expected_error( - true, - AddNodesItem { - parent_node_id: NodeId::new(100, "blahblah").into(), // !!! - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, - StatusCode::BadParentNodeIdInvalid, - ); -} - -#[test] -fn add_nodes_type_definition_invalid() { - // Add a node with a missing type definition, when one is required - // Object - do_add_node_test_with_expected_error( - true, - AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ExpandedNodeId::null(), // !!! - }, - StatusCode::BadTypeDefinitionInvalid, - ); - - // Add a node with a missing type definition, when one is required - // Variable - do_add_node_test_with_expected_error( - true, - AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Variable, - node_attributes: variable_attributes("foo"), - type_definition: ExpandedNodeId::null(), // !!! - }, - StatusCode::BadTypeDefinitionInvalid, - ); - - // Add a node with a type definition when one is not required, e.g.. for Method - do_add_node_test_with_expected_error( - true, - AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Method, - node_attributes: method_attributes("foo"), - type_definition: ObjectTypeId::AddressSpaceFileType.into(), // !!! - }, - StatusCode::BadTypeDefinitionInvalid, - ); - - // Add a node with an unrecognized type, something that is not a type at all - do_add_node_test_with_expected_error( - true, - AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Variable, - node_attributes: variable_attributes("foo"), - type_definition: MethodId::ProgramStateMachineType_Start.into(), // !!! - }, - StatusCode::BadTypeDefinitionInvalid, - ); -} - -#[test] -fn add_nodes_node_id_exists() { - // Add a node where node id already exists - do_add_node_test_with_expected_error( - true, - AddNodesItem { - parent_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ObjectId::ObjectsFolder.into(), // !!! - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Variable, - node_attributes: variable_attributes("foo"), - type_definition: ExpandedNodeId::null(), - }, - StatusCode::BadNodeIdExists, - ); -} - -#[test] -fn add_nodes_mismatching_class_and_attributes_exists() { - // Add a node where node class does not match the supplied node attributes - do_add_node_test_with_expected_error( - true, - AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: variable_attributes("foo"), // !!! - type_definition: ObjectTypeId::AddressSpaceFileType.into(), - }, - StatusCode::BadNodeAttributesInvalid, - ); -} - -#[test] -fn add_nodes_browse_name_duplicated() { - // Add a node which is valid - do_add_node_test_with_expected_error( - true, - AddNodesItem { - parent_node_id: NodeId::root_folder_id().into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("Objects"), // !!! - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, - StatusCode::BadBrowseNameDuplicated, - ); -} - -#[test] -fn add_nodes_valid() { - // Add a node which is valid - do_add_node_test_with_expected_error( - true, - AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, - StatusCode::Good, - ); -} - -#[test] -fn add_nodes_user_access_denied() { - // Add a node without permission - do_add_node_test_with_expected_error( - false, - AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, - StatusCode::BadUserAccessDenied, - ); -} - -#[test] -fn add_references() { - // Add a reference where the node id is invalid - do_add_references_test( - true, - AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::HasProperty.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: var_node_id(1).into(), - target_node_class: NodeClass::Variable, - }, - StatusCode::Good, - ); -} - -#[test] -fn add_references_source_node_id_invalid() { - // Add a reference where the node id is invalid - do_add_references_test( - true, - AddReferencesItem { - source_node_id: NodeId::null(), // !!! - reference_type_id: ReferenceTypeId::HasChild.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ServerConfiguration.into(), - target_node_class: NodeClass::Object, - }, - StatusCode::BadSourceNodeIdInvalid, - ); -} - -#[test] -fn add_references_target_node_id_invalid() { - // Add a reference where the node id is invalid - do_add_references_test( - true, - AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::HasChild.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ExpandedNodeId::null(), // !!! - target_node_class: NodeClass::Object, - }, - StatusCode::BadTargetNodeIdInvalid, - ); -} - -#[test] -fn add_references_server_uri_invalid() { - // Add a reference where the server uri is invalid - do_add_references_test( - true, - AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::HasChild.into(), - is_forward: true, - target_server_uri: UAString::from("urn:foo"), // !!! - target_node_id: ObjectId::ServerConfiguration.into(), - target_node_class: NodeClass::Object, - }, - StatusCode::BadServerUriInvalid, - ); -} - -#[test] -fn add_references_reference_type_id_invalid() { - // Add a reference where the reference type id is invalid - - // Null node - do_add_references_test( - true, - AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: NodeId::null(), // !!! - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ObjectsFolder.into(), - target_node_class: NodeClass::Object, - }, - StatusCode::BadReferenceTypeIdInvalid, - ); - - // Not a reference type id node - do_add_references_test( - true, - AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: MethodId::AddressSpaceFileType_Write.into(), // !!! - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ObjectsFolder.into(), - target_node_class: NodeClass::Object, - }, - StatusCode::BadReferenceTypeIdInvalid, - ); -} - -#[test] -fn add_references_reference_local_only() { - // Add a reference where the reference is remote - do_add_references_test( - true, - AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::HasChild.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ExpandedNodeId { - server_index: 1, - namespace_uri: UAString::null(), - node_id: ObjectId::ServerConfiguration.into(), - }, // !!! - target_node_class: NodeClass::Object, - }, - StatusCode::BadReferenceLocalOnly, - ); -} - -#[test] -fn add_references_duplicate_reference_not_allowed() { - // Add a reference that is a duplicate - do_add_references_test( - true, - AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ObjectsFolder.into(), - target_node_class: NodeClass::Object, - }, - StatusCode::BadDuplicateReferenceNotAllowed, - ); -} - -#[test] -fn add_references_node_class_invalid() { - // Add a reference where the node class is invalid - do_add_references_test( - true, - AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ObjectsFolder.into(), - target_node_class: NodeClass::Unspecified, // !!! - }, - StatusCode::BadNodeClassInvalid, - ); - - // This supplies a target class which is different type from the target node's class - do_add_references_test( - true, - AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ObjectsFolder.into(), - target_node_class: NodeClass::Variable, // !!! - }, - StatusCode::BadNodeClassInvalid, - ); -} - -#[test] -fn delete_nodes() { - // delete a node by node id - do_delete_nodes_test( - true, - DeleteNodesItem { - node_id: var_node_id(1), - delete_target_references: false, - }, - StatusCode::Good, - ); - do_delete_nodes_test( - true, - DeleteNodesItem { - node_id: var_node_id(2), - delete_target_references: true, - }, - StatusCode::Good, - ); -} - -#[test] -fn delete_nodes_node_id_unknown() { - // delete a node by node id when it does not exist - do_delete_nodes_test( - true, - DeleteNodesItem { - node_id: var_node_id(2000), // !!! - delete_target_references: false, - }, - StatusCode::BadNodeIdUnknown, - ); -} - -#[test] -fn delete_nodes_user_access_denied() { - // delete a node by node id without permission - do_delete_nodes_test( - false, - DeleteNodesItem { - node_id: var_node_id(1), - delete_target_references: false, - }, - StatusCode::BadUserAccessDenied, - ); -} - -#[test] -fn delete_references() { - do_delete_references_test( - true, - DeleteReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_node_id: ObjectId::ObjectsFolder.into(), - delete_bidirectional: false, - }, - StatusCode::Good, - ); -} - -#[test] -fn delete_references_user_access_denied() { - do_delete_references_test( - false, - DeleteReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_node_id: ObjectId::ObjectsFolder.into(), - delete_bidirectional: false, - }, - StatusCode::BadUserAccessDenied, - ); -} - -#[test] -fn delete_references_source_node_id_invalid() { - do_delete_references_test( - true, - DeleteReferencesItem { - source_node_id: NodeId::null(), // !!! - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_node_id: ObjectId::ObjectsFolder.into(), - delete_bidirectional: false, - }, - StatusCode::BadSourceNodeIdInvalid, - ); -} - -#[test] -fn delete_references_target_node_id_invalid() { - do_delete_references_test( - true, - DeleteReferencesItem { - source_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_node_id: ExpandedNodeId::null(), // !!! - delete_bidirectional: false, - }, - StatusCode::BadTargetNodeIdInvalid, - ); -} diff --git a/lib/src/server/tests/services/session.rs b/lib/src/server/tests/services/session.rs deleted file mode 100644 index ae08ae6be..000000000 --- a/lib/src/server/tests/services/session.rs +++ /dev/null @@ -1,322 +0,0 @@ -use crate::crypto::{random, user_identity::make_user_name_identity_token, SecurityPolicy}; -use crate::types::{ActivateSessionRequest, RequestHeader, SignatureData}; - -use crate::server::{ - builder::ServerBuilder, - identity_token::{ - POLICY_ID_USER_PASS_NONE, POLICY_ID_USER_PASS_RSA_15, POLICY_ID_USER_PASS_RSA_OAEP, - }, - services::session::SessionService, - state::ServerState, - tests::*, -}; - -use super::*; - -fn dummy_activate_session_request() -> ActivateSessionRequest { - ActivateSessionRequest { - request_header: RequestHeader::dummy(), - client_signature: SignatureData { - algorithm: UAString::null(), - signature: ByteString::null(), - }, - client_software_certificates: None, - locale_ids: None, - user_identity_token: ExtensionObject::null(), - user_token_signature: SignatureData { - algorithm: UAString::null(), - signature: ByteString::null(), - }, - } -} - -/// A helper that sets up a subscription service test -fn do_session_service_test(pki_dir: Option<&str>, f: T) -where - T: FnOnce(Arc>, SessionService), -{ - crate::console_logging::init(); - - let mut server_builder = ServerBuilder::new_sample(); - if let Some(pki_dir) = pki_dir { - server_builder = server_builder.pki_dir(pki_dir); - }; - - let st = ServiceTest::new_with_server(server_builder); - f(st.server_state.clone(), SessionService::new()); -} - -#[test] -fn anonymous_user_token() { - do_session_service_test(None, |server_state, _session_service| { - let server_state = server_state.read(); - - // Makes an anonymous token and sticks it into an extension object - let token = AnonymousIdentityToken { - policy_id: UAString::from("anonymous"), - }; - let token = ExtensionObject::from_encodable( - ObjectId::AnonymousIdentityToken_Encoding_DefaultBinary, - &token, - ); - - let server_nonce = random::byte_string(20); - - let request = dummy_activate_session_request(); - - let result = server_state.authenticate_endpoint( - &request, - "opc.tcp://localhost:4855/", - SecurityPolicy::None, - MessageSecurityMode::None, - &token, - &server_nonce, - ); - trace!("result = {:?}", result); - assert!(result.is_ok()); - - let result = server_state.authenticate_endpoint( - &request, - "opc.tcp://localhost:4855/x", - SecurityPolicy::None, - MessageSecurityMode::None, - &token, - &server_nonce, - ); - trace!("result = {:?}", result); - assert_eq!(result.unwrap_err(), StatusCode::BadTcpEndpointUrlInvalid); - - let result = server_state.authenticate_endpoint( - &request, - "opc.tcp://localhost:4855/noaccess", - SecurityPolicy::None, - MessageSecurityMode::None, - &token, - &server_nonce, - ); - trace!("result = {:?}", result); - assert_eq!(result.unwrap_err(), StatusCode::BadIdentityTokenRejected); - }); -} - -fn make_encrypted_user_name_identity_token( - policy_id: &str, - security_policy: SecurityPolicy, - server_nonce: &ByteString, - server_cert: &Option, - user: &str, - pass: &str, -) -> ExtensionObject { - let user_token_policy = crate::types::service_types::UserTokenPolicy { - policy_id: UAString::from(policy_id), - token_type: UserTokenType::UserName, - issued_token_type: UAString::null(), - issuer_endpoint_url: UAString::null(), - security_policy_uri: UAString::null(), - }; - let token = make_user_name_identity_token( - security_policy, - &user_token_policy, - server_nonce.as_ref(), - server_cert, - user, - pass, - ) - .unwrap(); - ExtensionObject::from_encodable( - ObjectId::UserNameIdentityToken_Encoding_DefaultBinary, - &token, - ) -} - -fn make_unencrypted_user_name_identity_token(user: &str, pass: &str) -> ExtensionObject { - let token = UserNameIdentityToken { - policy_id: UAString::from(POLICY_ID_USER_PASS_NONE), - user_name: UAString::from(user), - password: ByteString::from(pass.as_bytes()), - encryption_algorithm: UAString::null(), - }; - ExtensionObject::from_encodable( - ObjectId::UserNameIdentityToken_Encoding_DefaultBinary, - &token, - ) -} - -#[test] -fn user_name_pass_token() { - do_session_service_test( - Some("./pki_user_name_pass_token"), - |server_state, _session_service| { - let server_nonce = random::byte_string(20); - - let server_state = server_state.read(); - let server_cert = server_state.server_certificate.clone(); - assert!(server_cert.is_some()); - - const ENDPOINT_URL: &str = "opc.tcp://localhost:4855/"; - - let request = dummy_activate_session_request(); - - // Test that a good user authenticates in unencrypt and encrypted policies - let token = make_unencrypted_user_name_identity_token("sample1", "sample1pwd"); - let result = server_state.authenticate_endpoint( - &request, - ENDPOINT_URL, - SecurityPolicy::None, - MessageSecurityMode::None, - &token, - &server_nonce, - ); - assert!(result.is_ok()); - - let token = make_encrypted_user_name_identity_token( - POLICY_ID_USER_PASS_RSA_15, - SecurityPolicy::Basic128Rsa15, - &server_nonce, - &server_cert, - "sample1", - "sample1pwd", - ); - let result = server_state.authenticate_endpoint( - &request, - ENDPOINT_URL, - SecurityPolicy::Basic128Rsa15, - MessageSecurityMode::SignAndEncrypt, - &token, - &server_nonce, - ); - assert!(result.is_ok()); - - let token = make_encrypted_user_name_identity_token( - POLICY_ID_USER_PASS_RSA_OAEP, - SecurityPolicy::Basic256, - &server_nonce, - &server_cert, - "sample1", - "sample1pwd", - ); - let result = server_state.authenticate_endpoint( - &request, - ENDPOINT_URL, - SecurityPolicy::Basic256, - MessageSecurityMode::SignAndEncrypt, - &token, - &server_nonce, - ); - assert!(result.is_ok()); - - let token = make_encrypted_user_name_identity_token( - POLICY_ID_USER_PASS_RSA_OAEP, - SecurityPolicy::Basic256Sha256, - &server_nonce, - &server_cert, - "sample1", - "sample1pwd", - ); - let result = server_state.authenticate_endpoint( - &request, - ENDPOINT_URL, - SecurityPolicy::Basic256Sha256, - MessageSecurityMode::SignAndEncrypt, - &token, - &server_nonce, - ); - assert!(result.is_ok()); - - // Invalid tests - - // Mismatch between security policy and encryption - let token = make_encrypted_user_name_identity_token( - POLICY_ID_USER_PASS_RSA_15, - SecurityPolicy::Basic256Sha256, - &server_nonce, - &server_cert, - "sample1", - "sample1pwd", - ); - let result = server_state.authenticate_endpoint( - &request, - ENDPOINT_URL, - SecurityPolicy::Basic256Sha256, - MessageSecurityMode::SignAndEncrypt, - &token, - &server_nonce, - ); - assert_eq!(result.unwrap_err(), StatusCode::BadIdentityTokenInvalid); - - // No encryption policy when encryption is required - let token = make_encrypted_user_name_identity_token( - POLICY_ID_USER_PASS_NONE, - SecurityPolicy::Basic128Rsa15, - &server_nonce, - &server_cert, - "sample1", - "sample1pwd", - ); - let result = server_state.authenticate_endpoint( - &request, - ENDPOINT_URL, - SecurityPolicy::Basic256Sha256, - MessageSecurityMode::SignAndEncrypt, - &token, - &server_nonce, - ); - assert_eq!(result.unwrap_err(), StatusCode::BadIdentityTokenInvalid); - - // Invalid user - let token = make_unencrypted_user_name_identity_token("samplex", "sample1pwd"); - let result = server_state.authenticate_endpoint( - &request, - ENDPOINT_URL, - SecurityPolicy::None, - MessageSecurityMode::None, - &token, - &server_nonce, - ); - assert_eq!(result.unwrap_err(), StatusCode::BadUserAccessDenied); - - // Invalid password - let token = make_unencrypted_user_name_identity_token("sample1", "sample"); - let result = server_state.authenticate_endpoint( - &request, - ENDPOINT_URL, - SecurityPolicy::None, - MessageSecurityMode::None, - &token, - &server_nonce, - ); - assert_eq!(result.unwrap_err(), StatusCode::BadUserAccessDenied); - - // Empty user - let token = make_unencrypted_user_name_identity_token("", "sample1pwd"); - let result = server_state.authenticate_endpoint( - &request, - ENDPOINT_URL, - SecurityPolicy::None, - MessageSecurityMode::None, - &token, - &server_nonce, - ); - assert_eq!(result.unwrap_err(), StatusCode::BadUserAccessDenied); - - // Invalid password (encrypted) - let token = make_encrypted_user_name_identity_token( - POLICY_ID_USER_PASS_RSA_OAEP, - SecurityPolicy::Basic128Rsa15, - &server_nonce, - &server_cert, - "sample1", - "samplexx1", - ); - let result = server_state.authenticate_endpoint( - &request, - ENDPOINT_URL, - SecurityPolicy::Basic256Sha256, - MessageSecurityMode::SignAndEncrypt, - &token, - &server_nonce, - ); - assert_eq!(result.unwrap_err(), StatusCode::BadUserAccessDenied); - }, - ); -} diff --git a/lib/src/server/tests/services/subscription.rs b/lib/src/server/tests/services/subscription.rs deleted file mode 100644 index 78097b1fe..000000000 --- a/lib/src/server/tests/services/subscription.rs +++ /dev/null @@ -1,461 +0,0 @@ -use std::ops::Add; - -use chrono::Utc; - -use crate::server::{ - prelude::*, - services::{monitored_item::MonitoredItemService, subscription::SubscriptionService}, - state::ServerState, - subscriptions::subscription::*, -}; -use crate::supported_message_as; -use crate::sync::*; - -use super::*; - -fn create_subscription( - server_state: Arc>, - session: Arc>, - ss: &SubscriptionService, -) -> u32 { - let request = create_subscription_request(0, 0); - debug!("{:#?}", request); - let response: CreateSubscriptionResponse = supported_message_as!( - ss.create_subscription(server_state, session, &request), - CreateSubscriptionResponse - ); - debug!("{:#?}", response); - response.subscription_id -} - -fn create_monitored_item( - subscription_id: u32, - node_to_monitor: T, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - mis: &MonitoredItemService, -) where - T: Into, -{ - // Create a monitored item - let request = create_monitored_items_request(subscription_id, vec![node_to_monitor]); - debug!("CreateMonitoredItemsRequest {:#?}", request); - let response: CreateMonitoredItemsResponse = supported_message_as!( - mis.create_monitored_items(server_state, session, address_space, &request), - CreateMonitoredItemsResponse - ); - debug!("CreateMonitoredItemsResponse {:#?}", response); - // let result = response.results.unwrap()[0].monitored_item_id; -} - -fn publish_request( - subscription_acknowledgements: Option>, -) -> PublishRequest { - let request = PublishRequest { - request_header: RequestHeader::dummy(), - subscription_acknowledgements, - }; - debug!("PublishRequest {:#?}", request); - request -} - -fn republish_request(subscription_id: u32, retransmit_sequence_number: u32) -> RepublishRequest { - let request = RepublishRequest { - request_header: RequestHeader::dummy(), - subscription_id, - retransmit_sequence_number, - }; - debug!("RepublishRequest {:#?}", request); - request -} - -#[test] -fn create_modify_destroy_subscription() { - do_subscription_service_test(|_server_state, _session, _, _ss, _| { - // TODO Create a subscription, modify it, destroy it - //unimplemented!(); - }) -} - -/// Creates a subscription with the specified keep alive and lifetime values and compares -/// the revised values to the expected values. -fn keepalive_test( - keep_alive: u32, - lifetime: u32, - expected_keep_alive: u32, - expected_lifetime: u32, -) { - do_subscription_service_test(|server_state, session, _, ss, _| { - // Create subscription - let request = create_subscription_request(keep_alive, lifetime); - let response: CreateSubscriptionResponse = supported_message_as!( - ss.create_subscription(server_state, session, &request), - CreateSubscriptionResponse - ); - debug!("{:#?}", response); - assert_eq!(response.revised_lifetime_count, expected_lifetime); - assert_eq!(response.revised_max_keep_alive_count, expected_keep_alive); - assert!(response.revised_lifetime_count >= 3 * response.revised_max_keep_alive_count); - }) -} - -#[test] -fn test_revised_keep_alive_lifetime_counts() { - // Test that the keep alive and lifetime counts are correctly revised from their inputs - use crate::server::constants::{DEFAULT_KEEP_ALIVE_COUNT, MAX_KEEP_ALIVE_COUNT}; - const MAX_LIFETIME_COUNT: u32 = 3 * MAX_KEEP_ALIVE_COUNT; - const DEFAULT_LIFETIME_COUNT: u32 = 3 * DEFAULT_KEEP_ALIVE_COUNT; - - // Expect defaults to hold true - keepalive_test(0, 0, DEFAULT_KEEP_ALIVE_COUNT, DEFAULT_LIFETIME_COUNT); - keepalive_test( - 0, - (DEFAULT_KEEP_ALIVE_COUNT * 3) - 1, - DEFAULT_KEEP_ALIVE_COUNT, - DEFAULT_LIFETIME_COUNT, - ); - - // Expect lifetime to be 3 * keep alive - keepalive_test(1, 3, 1, 3); - keepalive_test(1, 4, 1, 4); - keepalive_test(1, 2, 1, 3); - keepalive_test( - DEFAULT_KEEP_ALIVE_COUNT, - 2, - DEFAULT_KEEP_ALIVE_COUNT, - DEFAULT_LIFETIME_COUNT, - ); - - // Expect max values to be honoured - keepalive_test( - MAX_KEEP_ALIVE_COUNT, - 0, - MAX_KEEP_ALIVE_COUNT, - MAX_LIFETIME_COUNT, - ); - keepalive_test( - MAX_KEEP_ALIVE_COUNT + 1, - 0, - MAX_KEEP_ALIVE_COUNT, - MAX_LIFETIME_COUNT, - ); -} - -#[test] -fn publish_with_no_subscriptions() { - do_subscription_service_test(|_, session, address_space, ss, _| { - let request = publish_request(None); - // Publish and expect a service fault BadNoSubscription - let request_id = 1001; - let response = ss - .async_publish(&Utc::now(), session, address_space, request_id, &request) - .unwrap(); - let response: ServiceFault = supported_message_as!(response, ServiceFault); - assert_eq!( - response.response_header.service_result, - StatusCode::BadNoSubscription - ); - }) -} - -#[test] -fn publish_response_subscription() { - do_subscription_service_test(|server_state, session, address_space, ss, mis| { - // Create subscription - let subscription_id = create_subscription(server_state.clone(), session.clone(), &ss); - - // Create a monitored item - create_monitored_item( - subscription_id, - VariableId::Server_ServerStatus_StartTime, - server_state.clone(), - session.clone(), - address_space.clone(), - &mis, - ); - - let now = Utc::now(); - - // Put the subscription into normal state - { - let mut session = trace_write_lock!(session); - session - .subscriptions_mut() - .get_mut(subscription_id) - .unwrap() - .set_state(SubscriptionState::Normal); - } - - // Send a publish and expect a publish response containing the subscription - let notification_message = { - let request_id = 1001; - let request = publish_request(None); - - // Tick subscriptions to trigger a change - let _ = ss.async_publish( - &now, - session.clone(), - address_space.clone(), - request_id, - &request, - ); - let now = now.add(chrono::Duration::seconds(2)); - - let mut session = trace_write_lock!(session); - let address_space = trace_read_lock!(address_space); - let _ = session.tick_subscriptions(&now, &address_space, TickReason::TickTimerFired); - - // Ensure publish request was processed into a publish response - assert_eq!(session.subscriptions_mut().publish_request_queue().len(), 0); - assert_eq!( - session.subscriptions_mut().publish_response_queue().len(), - 1 - ); - - // Get the response from the queue - let response = session - .subscriptions_mut() - .publish_response_queue() - .pop_back() - .unwrap() - .response; - let response: PublishResponse = supported_message_as!(response, PublishResponse); - debug!("PublishResponse {:#?}", response); - - // We expect the response to contain a non-empty notification - assert_eq!(response.more_notifications, false); - assert_eq!(response.subscription_id, subscription_id); - assert!(response.available_sequence_numbers.is_none()); - - response.notification_message - }; - assert_eq!(notification_message.sequence_number, 1); - assert!(notification_message.notification_data.is_some()); - - // We expect to have one notification - let notification_data = notification_message.notification_data.as_ref().unwrap(); - assert_eq!(notification_data.len(), 1); - - // We expect the notification to contain one data change notification referring to - // the monitored item. - let decoding_options = DecodingOptions::test(); - let data_change = notification_data[0] - .decode_inner::(&decoding_options) - .unwrap(); - assert!(data_change.monitored_items.is_some()); - let monitored_items = data_change.monitored_items.unwrap(); - assert_eq!(monitored_items.len(), 1); - - // We expect the notification to be about handle 1 - let monitored_item_notification = &monitored_items[0]; - assert_eq!(monitored_item_notification.client_handle, 0); - - // We expect the queue to be empty, because we got an immediate response - let mut session = trace_write_lock!(session); - assert!(session - .subscriptions_mut() - .publish_response_queue() - .is_empty()); - }) -} - -#[test] -fn publish_keep_alive() { - do_subscription_service_test(|server_state, session, address_space, ss, mis| { - // Create subscription - let subscription_id = create_subscription(server_state.clone(), session.clone(), &ss); - - // Create a monitored item - { - let request = - create_monitored_items_request(subscription_id, vec![(1, "v1"), (1, "v1")]); - debug!("CreateMonitoredItemsRequest {:#?}", request); - let response: CreateMonitoredItemsResponse = supported_message_as!( - mis.create_monitored_items( - server_state.clone(), - session.clone(), - address_space.clone(), - &request - ), - CreateMonitoredItemsResponse - ); - debug!("CreateMonitoredItemsResponse {:#?}", response); - // let result = response.results.unwrap()[0].monitored_item_id; - } - - // Disable publishing to force a keep-alive - { - let mut session = trace_write_lock!(session); - let subscription = session - .subscriptions_mut() - .get_mut(subscription_id) - .unwrap(); - subscription.set_state(SubscriptionState::Normal); - subscription.set_publishing_enabled(false); - } - - // Send a publish and expect a keep-alive response - let notification_message = { - let request_id = 1001; - let request = publish_request(None); - let now = Utc::now(); - - // Don't expect a response right away - let response = ss.async_publish( - &now, - session.clone(), - address_space.clone(), - request_id, - &request, - ); - assert!(response.is_none()); - - let mut session = trace_write_lock!(session); - let address_space = trace_read_lock!(address_space); - - assert!(!session - .subscriptions_mut() - .publish_request_queue() - .is_empty()); - - // Tick subscriptions to trigger a change - let now = now.add(chrono::Duration::seconds(2)); - - let _ = session.tick_subscriptions(&now, &address_space, TickReason::TickTimerFired); - - // Ensure publish request was processed into a publish response - assert_eq!(session.subscriptions_mut().publish_request_queue().len(), 0); - assert_eq!( - session.subscriptions_mut().publish_response_queue().len(), - 1 - ); - - // Get the response from the queue - let response = session - .subscriptions_mut() - .publish_response_queue() - .pop_back() - .unwrap() - .response; - let response: PublishResponse = supported_message_as!(response, PublishResponse); - debug!("PublishResponse {:#?}", response); - - // We expect the response to contain a non-empty notification - assert_eq!(response.more_notifications, false); - assert_eq!(response.subscription_id, subscription_id); - assert!(response.available_sequence_numbers.is_none()); - response.notification_message - }; - - // Expect the notification message to be a keep-alive - assert_eq!(notification_message.sequence_number, 1); - assert_eq!(notification_message.notification_data, None); - }) -} - -#[test] -fn multiple_publish_response_subscription() { - do_subscription_service_test(|server_state, session, address_space, ss, _mis| { - let _subscription_id = create_subscription(server_state, session.clone(), &ss); - - let now = Utc::now(); - let request_id = 1001; - - // Send a publish and expect nothing - let request = publish_request(None); - let response = ss.async_publish( - &now, - session.clone(), - address_space.clone(), - request_id, - &request, - ); - assert!(response.is_none()); - - // TODO Tick a change - // TODO Expect a publish response containing the subscription to be pushed - //unimplemented!(); - }) -} - -#[test] -fn acknowledge_unknown_sequence_nr() { - do_subscription_service_test(|server_state, session, address_space, ss, _mis| { - let subscription_id = create_subscription(server_state, session.clone(), &ss); - - let now = Utc::now(); - let request_id = 1001; - - // Acknowledge an unknown seqid, test the response - let ack = SubscriptionAcknowledgement { - subscription_id, - sequence_number: 10001, - }; - let request = publish_request(Some(vec![ack])); - let _response = ss.async_publish( - &now, - session.clone(), - address_space.clone(), - request_id, - &request, - ); - - // TODO - //unimplemented!(); - }) -} - -#[test] -fn republish() { - do_subscription_service_test(|server_state, session, _, ss, _| { - // Create subscription - let subscription_id = create_subscription(server_state.clone(), session.clone(), &ss); - - // Add a notification to the subscriptions retransmission queue - let sequence_number = { - let monitored_item_notifications = vec![MonitoredItemNotification { - client_handle: 1, - value: Variant::Empty.into(), - }]; - let notification = NotificationMessage::data_change( - 1, - DateTime::now(), - monitored_item_notifications, - vec![], - ); - let sequence_number = notification.sequence_number; - let mut session = trace_write_lock!(session); - session.subscriptions_mut().retransmission_queue().insert( - (subscription_id, notification.sequence_number), - notification, - ); - sequence_number - }; - - // try for a notification message known to exist - let request = republish_request(subscription_id, sequence_number); - let response = ss.republish(session.clone(), &request); - trace!("republish response {:#?}", response); - let response: RepublishResponse = supported_message_as!(response, RepublishResponse); - assert!(response.notification_message.sequence_number != 0); - - // try for a subscription id that does not exist, expect service fault - let request = republish_request(subscription_id + 1, sequence_number); - let response: ServiceFault = - supported_message_as!(ss.republish(session.clone(), &request), ServiceFault); - assert_eq!( - response.response_header.service_result, - StatusCode::BadSubscriptionIdInvalid - ); - - // try for a sequence nr that does not exist - let request = republish_request(subscription_id, sequence_number + 1); - let response: ServiceFault = - supported_message_as!(ss.republish(session.clone(), &request), ServiceFault); - assert_eq!( - response.response_header.service_result, - StatusCode::BadMessageNotAvailable - ); - }) -} diff --git a/lib/src/server/tests/services/view.rs b/lib/src/server/tests/services/view.rs deleted file mode 100644 index dc5575e55..000000000 --- a/lib/src/server/tests/services/view.rs +++ /dev/null @@ -1,1190 +0,0 @@ -use std::sync::Weak; - -use crate::server::services::view::ViewService; -use crate::supported_message_as; -use crate::sync::*; - -use super::*; - -// View service tests - -fn make_browse_request( - nodes: &[NodeId], - node_class_mask: NodeClassMask, - max_references_per_node: usize, - browse_direction: BrowseDirection, - reference_type: T, -) -> BrowseRequest -where - T: Into + Clone, -{ - let request_header = make_request_header(); - let nodes_to_browse = nodes - .iter() - .map(|n| BrowseDescription { - node_id: n.clone(), - browse_direction, - reference_type_id: reference_type.clone().into(), - include_subtypes: true, - node_class_mask: node_class_mask.bits(), - result_mask: BrowseDescriptionResultMask::all().bits() as u32, - }) - .collect(); - BrowseRequest { - request_header, - view: ViewDescription { - view_id: NodeId::null(), - timestamp: DateTime::null(), - view_version: 0, - }, - requested_max_references_per_node: max_references_per_node as u32, - nodes_to_browse: Some(nodes_to_browse), - } -} - -fn make_browse_next_request( - continuation_point: &ByteString, - release_continuation_points: bool, -) -> BrowseNextRequest { - let request_header = make_request_header(); - BrowseNextRequest { - request_header, - release_continuation_points, - continuation_points: if continuation_point.is_null() { - None - } else { - Some(vec![continuation_point.clone()]) - }, - } -} - -fn verify_references_to_many_vars( - references: &[ReferenceDescription], - expected_size: usize, - start_idx: usize, -) { - // Verify that the reference descriptions point at sequential vars - assert_eq!(references.len(), expected_size); - for (i, r) in references.iter().enumerate() { - assert_eq!(r.node_id.node_id, var_node_id(i + start_idx)); - } -} - -fn do_view_service_test(f: F) -where - F: FnOnce( - Arc>, - Arc>, - Arc>, - &ViewService, - ), -{ - crate::console_logging::init(); - let st = ServiceTest::new(); - f( - st.server_state.clone(), - st.session.clone(), - st.address_space.clone(), - &ViewService::new(), - ); -} - -fn do_browse( - vs: &ViewService, - server_state: Arc>, - session: Arc>, - address_space: Arc>, - nodes: &[NodeId], - max_references_per_node: usize, - browse_direction: BrowseDirection, -) -> BrowseResponse { - let request = make_browse_request( - nodes, - NodeClassMask::empty(), - max_references_per_node, - browse_direction, - ReferenceTypeId::Organizes, - ); - let response = vs.browse(server_state, session, address_space, &request); - supported_message_as!(response, BrowseResponse) -} - -fn do_browse_next( - vs: &ViewService, - session: Arc>, - address_space: Arc>, - continuation_point: &ByteString, - release_continuation_points: bool, -) -> BrowseNextResponse { - let request = make_browse_next_request(continuation_point, release_continuation_points); - let response = vs.browse_next(session, address_space, &request); - supported_message_as!(response, BrowseNextResponse) -} - -#[test] -fn browse() { - do_view_service_test(|server_state, session, address_space, vs| { - add_sample_vars_to_address_space(address_space.clone()); - - let nodes: Vec = vec![ObjectId::RootFolder.into()]; - let response = do_browse( - &vs, - server_state, - session.clone(), - address_space.clone(), - &nodes, - 1000, - BrowseDirection::Forward, - ); - assert!(response.results.is_some()); - - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - - assert!(results[0].references.is_some()); - let references = results[0].references.as_ref().unwrap(); - assert_eq!(references.len(), 3); - - // Expect to see refs to - // Objects/ - // Types/ - // Views/ - - let r1 = &references[0]; - assert_eq!(r1.browse_name, QualifiedName::new(0, "Objects")); - let r2 = &references[1]; - assert_eq!(r2.browse_name, QualifiedName::new(0, "Types")); - let r3 = &references[2]; - assert_eq!(r3.browse_name, QualifiedName::new(0, "Views")); - }); -} - -// Test the response of supplying an unsupported view to the browse request -#[test] -fn browse_non_null_view() { - do_view_service_test(|server_state, session, address_space, vs| { - let nodes: Vec = vec![ObjectId::RootFolder.into()]; - - // Expect a non-null view to be rejected - let mut request = make_browse_request( - &nodes, - NodeClassMask::empty(), - 1000, - BrowseDirection::Forward, - ReferenceTypeId::Organizes, - ); - request.view.view_id = NodeId::new(1, "FakeView"); - let response = vs.browse( - server_state.clone(), - session.clone(), - address_space.clone(), - &request, - ); - let response = supported_message_as!(response, ServiceFault); - assert_eq!( - response.response_header.service_result, - StatusCode::BadViewIdUnknown - ); - - // Expect a non-0 timestamp to be rejected - request.view.view_id = NodeId::null(); - request.view.timestamp = DateTime::now(); - let response = vs.browse(server_state, session, address_space, &request); - let response = supported_message_as!(response, ServiceFault); - assert_eq!( - response.response_header.service_result, - StatusCode::BadViewIdUnknown - ); - }); -} - -// This test applies a class mask to the browse so only nodes of types in the mask should come back -#[test] -fn browse_node_class_mask() { - do_view_service_test(|server_state, session, address_space, vs| { - add_sample_vars_to_address_space(address_space.clone()); - - let nodes: Vec = vec![ObjectId::Server.into()]; - let request = make_browse_request( - &nodes, - NodeClassMask::OBJECT, - 1000, - BrowseDirection::Forward, - ReferenceTypeId::HasComponent, - ); - - let response = vs.browse(server_state, session, address_space, &request); - let response = supported_message_as!(response, BrowseResponse); - assert!(response.results.is_some()); - - let results = response.results.unwrap(); - let references = results[0].references.as_ref().unwrap(); - - // There are 12 HasComponent values under Server altogether but only 7 are of Object type - assert_eq!(references.len(), 7); - references.iter().for_each(|r| { - assert_eq!(r.node_class, NodeClass::Object); - }); - }); -} - -fn verify_references( - expected: &[(ReferenceTypeId, NodeId, bool)], - references: &[ReferenceDescription], -) { - if expected.len() != references.len() { - debug!("Check expected references to this actual list:"); - expected.iter().for_each(|r| { - let reference_type_id: NodeId = r.0.into(); - let node_id: NodeId = r.1.clone(); - let is_forward = r.2; - let found = references.iter().any(|r| { - r.reference_type_id == reference_type_id - && r.node_id.node_id == node_id - && r.is_forward == is_forward - }); - if !found { - debug!( - " Missing expected ({:?}, {:?}, {:?}),", - r.0, node_id, is_forward - ); - } - }); - references.iter().for_each(|r| { - let found = expected.iter().any(|e| { - let reference_type_id: NodeId = e.0.into(); - let node_id: NodeId = e.1.clone(); - let is_forward = e.2; - r.reference_type_id == reference_type_id - && r.node_id.node_id == node_id - && r.is_forward == is_forward - }); - if !found { - debug!( - " Surplus ({:?}, {:?}, {:?}),", - r.reference_type_id, r.node_id.node_id, r.is_forward - ); - } - }); - } - - assert_eq!(expected.len(), references.len()); - expected.into_iter().for_each(|e| { - let reference_type_id: NodeId = e.0.into(); - let node_id: NodeId = e.1.clone(); - let is_forward = e.2; - let reference = references.iter().find(|r| { - r.reference_type_id == reference_type_id - && r.node_id.node_id == node_id - && r.is_forward == is_forward - }); - assert!(reference.is_some()); - }); -} - -#[test] -fn browse_inverse() { - crate::console_logging::init(); - do_view_service_test(|server_state, session, address_space, vs| { - // Ask for Inverse refs only - - let node_id: NodeId = ObjectTypeId::FolderType.into(); - let nodes = vec![node_id.clone()]; - - let request = make_browse_request( - &nodes, - NodeClassMask::empty(), - 1000, - BrowseDirection::Inverse, - NodeId::null(), - ); - - let response = vs.browse(server_state, session, address_space, &request); - let response = supported_message_as!(response, BrowseResponse); - - assert!(response.results.is_some()); - - let results = response.results.unwrap(); - let references = results.get(0).unwrap().references.as_ref().unwrap(); - - // We do NOT expect to find the node in the list of results - assert!(references - .iter() - .find(|r| r.node_id.node_id == node_id) - .is_none()); - - // We expect this many results - assert_eq!(references.len(), 21); - - let expected: Vec<(ReferenceTypeId, NodeId, bool)> = vec![ - // (ref_type, node_id, is_forward) - // Inverse refs - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::HistoryServerCapabilitiesType_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ObjectTypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::DataTypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ServerType_ServerCapabilities_ModellingRules.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::HistoryServerCapabilities_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasSubtype, - ObjectTypeId::BaseObjectType.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ServerCapabilitiesType_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::Server_ServerCapabilities_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::TypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ServerCapabilitiesType_ModellingRules.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ObjectsFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::VariableTypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::RootFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ServerType_ServerCapabilities_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ViewsFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::EventTypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::Server_ServerCapabilities_ModellingRules.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ReferenceTypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::HistoricalDataConfigurationType_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::InterfaceTypes.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::AuthorizationServices.into(), - false, - ), - ]; - verify_references(&expected, references); - }); -} - -#[test] -fn browse_both() { - crate::console_logging::init(); - do_view_service_test(|server_state, session, address_space, vs| { - // Ask for both forward and inverse refs - - let node_id: NodeId = ObjectTypeId::FolderType.into(); - let nodes = vec![node_id.clone()]; - - let request = make_browse_request( - &nodes, - NodeClassMask::empty(), - 1000, - BrowseDirection::Both, - NodeId::null(), - ); - - let response = vs.browse(server_state, session, address_space, &request); - let response = supported_message_as!(response, BrowseResponse); - - assert!(response.results.is_some()); - - let results = response.results.unwrap(); - let references = results.get(0).unwrap().references.as_ref().unwrap(); - - // We do NOT expect to find the node in the list of results - assert!(references - .iter() - .find(|r| r.node_id.node_id == node_id) - .is_none()); - - // We expect this many results - assert_eq!(references.len(), 29); - - let expected: Vec<(ReferenceTypeId, NodeId, bool)> = vec![ - // (ref_type, node_id, is_forward) - // Forward refs - ( - ReferenceTypeId::HasSubtype, - ObjectTypeId::OperationLimitsType.into(), - true, - ), - ( - ReferenceTypeId::HasSubtype, - ObjectTypeId::FileDirectoryType.into(), - true, - ), - ( - ReferenceTypeId::HasSubtype, - ObjectTypeId::CertificateGroupFolderType.into(), - true, - ), - // Inverse refs - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::HistoryServerCapabilitiesType_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ObjectTypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::DataTypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ServerType_ServerCapabilities_ModellingRules.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::HistoryServerCapabilities_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasSubtype, - ObjectTypeId::BaseObjectType.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ServerCapabilitiesType_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::Server_ServerCapabilities_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::TypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ServerCapabilitiesType_ModellingRules.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ObjectsFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::VariableTypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::RootFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ServerType_ServerCapabilities_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ViewsFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::EventTypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::Server_ServerCapabilities_ModellingRules.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::ReferenceTypesFolder.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::HistoricalDataConfigurationType_AggregateFunctions.into(), - false, - ), - ( - ReferenceTypeId::HasSubtype, - ObjectTypeId::DictionaryFolderType.into(), - true, - ), - ( - ReferenceTypeId::HasSubtype, - ObjectTypeId::AlarmGroupType.into(), - true, - ), - ( - ReferenceTypeId::HasSubtype, - ObjectTypeId::KeyCredentialConfigurationFolderType.into(), - true, - ), - ( - ReferenceTypeId::HasSubtype, - ObjectTypeId::SecurityGroupFolderType.into(), - true, - ), - ( - ReferenceTypeId::HasSubtype, - ObjectTypeId::DataSetFolderType.into(), - true, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::InterfaceTypes.into(), - false, - ), - ( - ReferenceTypeId::HasTypeDefinition, - ObjectId::AuthorizationServices.into(), - false, - ), - ]; - verify_references(&expected, references); - }); -} - -#[test] -fn browse_next_no_cp1() { - do_view_service_test(|server_state, session, address_space, vs| { - let parent_node_id = add_many_vars_to_address_space(address_space.clone(), 100).0; - let nodes = vec![parent_node_id.clone()]; - // Browse with requested_max_references_per_node = 101, expect 100 results, no continuation point - let response = do_browse( - &vs, - server_state, - session.clone(), - address_space.clone(), - &nodes, - 101, - BrowseDirection::Forward, - ); - assert!(response.results.is_some()); - let r1 = &response.results.unwrap()[0]; - let references = r1.references.as_ref().unwrap(); - assert!(r1.continuation_point.is_null()); - verify_references_to_many_vars(references, 100, 0); - }); -} - -#[test] -fn browse_next_no_cp2() { - do_view_service_test(|server_state, session, address_space, vs| { - let parent_node_id = add_many_vars_to_address_space(address_space.clone(), 100).0; - let nodes = vec![parent_node_id.clone()]; - // Browse with requested_max_references_per_node = 100, expect 100 results, no continuation point - let response = do_browse( - &vs, - server_state, - session.clone(), - address_space.clone(), - &nodes, - 100, - BrowseDirection::Forward, - ); - let r1 = &response.results.unwrap()[0]; - let references = r1.references.as_ref().unwrap(); - assert!(r1.continuation_point.is_null()); - verify_references_to_many_vars(references, 100, 0); - }); -} - -#[test] -fn browse_next_cp() { - // Browse with requested_max_references_per_node = 99 expect 99 results and a continuation point - // Browse next with continuation point, expect 1 result leaving off from last continuation point - do_view_service_test(|server_state, session, address_space, vs| { - let parent_node_id = add_many_vars_to_address_space(address_space.clone(), 100).0; - let nodes = vec![parent_node_id.clone()]; - // Get first 99 - let response = do_browse( - &vs, - server_state, - session.clone(), - address_space.clone(), - &nodes, - 99, - BrowseDirection::Forward, - ); - let r1 = &response.results.unwrap()[0]; - let references = r1.references.as_ref().unwrap(); - assert!(!r1.continuation_point.is_null()); - verify_references_to_many_vars(references, 99, 0); - - // Expect continuation point and browse next to return last var and no more continuation point - let response = do_browse_next( - &vs, - session.clone(), - address_space.clone(), - &r1.continuation_point, - false, - ); - let r2 = &response.results.unwrap()[0]; - assert!(r2.continuation_point.is_null()); - let references = r2.references.as_ref().unwrap(); - verify_references_to_many_vars(references, 1, 99); - - // Browse next again with same continuation point, expect failure - let response = do_browse_next( - &vs, - session.clone(), - address_space.clone(), - &r1.continuation_point, - false, - ); - let r2 = &response.results.unwrap()[0]; - assert!(r2.continuation_point.is_null()); - assert_eq!(r2.status_code, StatusCode::BadContinuationPointInvalid); - }); -} - -#[test] -fn browse_next_release_cp() { - // Browse and get a continuation point and then release that continuation point, expecting it to be deleted - do_view_service_test(|server_state, session, address_space, vs| { - let parent_node_id = add_many_vars_to_address_space(address_space.clone(), 100).0; - let nodes = vec![parent_node_id.clone()]; - // Get first 99 - let response = do_browse( - &vs, - server_state, - session.clone(), - address_space.clone(), - &nodes, - 99, - BrowseDirection::Forward, - ); - let r1 = &response.results.unwrap()[0]; - let _references = r1.references.as_ref().unwrap(); - assert!(!r1.continuation_point.is_null()); - - // Browse next and release the previous continuation points, expect Null result - let response = do_browse_next( - &vs, - session.clone(), - address_space.clone(), - &r1.continuation_point, - true, - ); - assert!(response.results.is_none()); - - // Browse next again with same continuation point, expect BadContinuationPointInvalid - let response = do_browse_next( - &vs, - session.clone(), - address_space.clone(), - &r1.continuation_point, - false, - ); - let r1 = &response.results.unwrap()[0]; - assert_eq!(r1.status_code, StatusCode::BadContinuationPointInvalid); - }); -} - -#[test] -fn browse_next_multiple_cps() { - // Browse multiple times with multiple continuation points - do_view_service_test(|server_state, session, address_space, vs| { - let parent_node_id = add_many_vars_to_address_space(address_space.clone(), 100).0; - let nodes = vec![parent_node_id.clone()]; - // Browse with 35 expect continuation point cp1 - // Browse next with cp1 with 35 expect cp2 - // Browse next with cp2 expect 30 results - // Get first 35 - let response = do_browse( - &vs, - server_state, - session.clone(), - address_space.clone(), - &nodes, - 35, - BrowseDirection::Forward, - ); - let r1 = &response.results.unwrap()[0]; - let references = r1.references.as_ref().unwrap(); - assert!(!r1.continuation_point.is_null()); - verify_references_to_many_vars(references, 35, 0); - - // Expect continuation point and browse next to return last var and no more continuation point - let response = do_browse_next( - &vs, - session.clone(), - address_space.clone(), - &r1.continuation_point, - false, - ); - let r2 = &response.results.unwrap()[0]; - assert!(!r2.continuation_point.is_null()); - let references = r2.references.as_ref().unwrap(); - verify_references_to_many_vars(references, 35, 35); - - // Expect continuation point and browse next to return last var and no more continuation point - let response = do_browse_next( - &vs, - session.clone(), - address_space.clone(), - &r2.continuation_point, - false, - ); - let r3 = &response.results.unwrap()[0]; - assert!(r3.continuation_point.is_null()); - let references = r3.references.as_ref().unwrap(); - verify_references_to_many_vars(references, 30, 70); - }); -} - -#[test] -fn browse_next_modify_address_space() { - // Modify the address space after a browse so continuation point becomes invalid - do_view_service_test(|server_state, session, address_space, vs| { - let parent_node_id = add_many_vars_to_address_space(address_space.clone(), 100).0; - let nodes = vec![parent_node_id.clone()]; - // Modify address space so existing continuation point is invalid - // Browse next with continuation point, expect BadContinuationPointInvalid - use std::thread; - use std::time::Duration; - - let response = do_browse( - &vs, - server_state, - session.clone(), - address_space.clone(), - &nodes, - 99, - BrowseDirection::Forward, - ); - let r1 = &response.results.unwrap()[0]; - let _references = r1.references.as_ref().unwrap(); - assert!(!r1.continuation_point.is_null()); - - // Sleep a bit, modify the address space so the old continuation point is out of date - thread::sleep(Duration::from_millis(50)); - { - let var_name = "xxxx"; - let mut address_space = trace_write_lock!(address_space); - VariableBuilder::new(&NodeId::new(1, var_name), var_name, var_name) - .data_type(DataTypeId::Int32) - .value(200i32) - .organized_by(&parent_node_id) - .insert(&mut address_space); - } - - // Browsing with the old continuation point should fail - let response = do_browse_next( - &vs, - session.clone(), - address_space.clone(), - &r1.continuation_point, - false, - ); - let r1 = &response.results.unwrap()[0]; - assert_eq!(r1.status_code, StatusCode::BadContinuationPointInvalid); - }); -} - -#[test] -fn translate_browse_paths_to_node_ids() { - do_view_service_test(|server_state, _session, address_space, vs| { - // This is a very basic test of this service. It wants to find the relative path from root to the - // Objects folder and ensure that it comes back in the result - - let browse_paths = vec![BrowsePath { - starting_node: ObjectId::RootFolder.into(), - relative_path: RelativePath { - elements: Some(vec![RelativePathElement { - reference_type_id: ReferenceTypeId::Organizes.into(), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::new(0, "Objects"), - }]), - }, - }]; - - let request = TranslateBrowsePathsToNodeIdsRequest { - request_header: make_request_header(), - browse_paths: Some(browse_paths), - }; - - let response = vs.translate_browse_paths_to_node_ids(server_state, address_space, &request); - let response: TranslateBrowsePathsToNodeIdsResponse = - supported_message_as!(response, TranslateBrowsePathsToNodeIdsResponse); - - debug!("result = {:#?}", response); - - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - let r1 = &results[0]; - let targets = r1.targets.as_ref().unwrap(); - assert_eq!(targets.len(), 1); - let t1 = &targets[0]; - assert_eq!(&t1.target_id.node_id, &NodeId::objects_folder_id()); - }); -} - -#[test] -fn translate_browse_paths_to_node_ids2() { - do_view_service_test(|server_state, _session, address_space, vs| { - // Inputs and outputs taken from this testcase in Node OPCUA - // - // https://github.com/node-opcua/node-opcua/blob/68b1b57dec23a45148468fbea89ab71a39f9042f/test/end_to_end/u_test_e2e_translateBrowsePath.js - - let starting_node: NodeId = ObjectId::RootFolder.into(); - - let browse_paths = [ - "/Objects/Server", - "/Objects/Server.ServerStatus", - "/Objects/Server.ServerStatus.BuildInfo", - "/Objects/Server.ServerStatus.BuildInfo.ProductName", - "/Objects/Server.ServerStatus.BuildInfo.", - "/Objects.Server", - "/Objects/2:MatrikonOPC Simulation Server (DA)", - ] - .iter() - .map(|path| BrowsePath { - starting_node: starting_node.clone(), - relative_path: RelativePath::from_str( - path, - &RelativePathElement::default_node_resolver, - ) - .unwrap(), - }) - .collect::>(); - - let request = TranslateBrowsePathsToNodeIdsRequest { - request_header: make_request_header(), - browse_paths: Some(browse_paths), - }; - - let browse_paths_len = request.browse_paths.as_ref().unwrap().len(); - - let response = vs.translate_browse_paths_to_node_ids(server_state, address_space, &request); - let response: TranslateBrowsePathsToNodeIdsResponse = - supported_message_as!(response, TranslateBrowsePathsToNodeIdsResponse); - - let results = response.results.unwrap(); - assert_eq!(results.len(), browse_paths_len); - - let mut idx = 0; - - // results[0] - { - let r = &results[idx]; - assert!(r.status_code.is_good()); - let targets = r.targets.as_ref().unwrap(); - trace!("targets for {} = {:#?}", idx, targets); - assert_eq!(targets.len(), 1); - assert_eq!(&targets[0].target_id, &ObjectId::Server.into()); - idx += 1; - } - - // results[1] - { - let r = &results[idx]; - assert!(r.status_code.is_good()); - let targets = r.targets.as_ref().unwrap(); - trace!("targets for {} = {:#?}", idx, targets); - assert_eq!(targets.len(), 1); - assert_eq!( - &targets[0].target_id, - &VariableId::Server_ServerStatus.into() - ); - idx += 1; - } - - // results[2] - { - let r = &results[idx]; - assert!(r.status_code.is_good()); - let targets = r.targets.as_ref().unwrap(); - trace!("targets for {} = {:#?}", idx, targets); - assert_eq!(targets.len(), 1); - assert_eq!( - &targets[0].target_id, - &VariableId::Server_ServerStatus_BuildInfo.into() - ); - idx += 1; - } - - // results[3] - { - let r = &results[idx]; - assert!(r.status_code.is_good()); - let targets = r.targets.as_ref().unwrap(); - trace!("targets for {} = {:#?}", idx, targets); - assert_eq!( - &targets[0].target_id, - &VariableId::Server_ServerStatus_BuildInfo_ProductName.into() - ); - idx += 1; - } - - // results[4] - { - let r = &results[idx]; - assert_eq!(r.status_code, StatusCode::BadBrowseNameInvalid); - idx += 1; - } - - // results[5] - { - let r = &results[idx]; - assert_eq!(r.status_code, StatusCode::BadNoMatch); - idx += 1; - } - - // results[6] - { - let r = &results[idx]; - assert_eq!(r.status_code, StatusCode::BadNoMatch); - // idx += 1; - } - }); -} - -struct RegisterNodesImpl { - pub session: Weak>, -} - -impl RegisterNodes for RegisterNodesImpl { - fn register_nodes( - &mut self, - session: Arc>, - nodes_to_register: &[NodeId], - ) -> Result, StatusCode> { - let bad_node = ObjectId::ObjectsFolder.into(); - let good_node = NodeId::new(1, 100); - let alias_node = NodeId::new(1, 200); - - if nodes_to_register.contains(&bad_node) { - Err(StatusCode::BadNodeIdInvalid) - } else { - // Simulate holding a weak ref to the session - self.session = Arc::downgrade(&session); - - // The result will be the input except for the good node which will be aliased on its - // way out. - let result = nodes_to_register - .iter() - .map(|n| if *n == good_node { &alias_node } else { n }) - .cloned() - .collect(); - Ok(result) - } - } -} - -struct UnregisterNodesImpl; - -impl UnregisterNodes for UnregisterNodesImpl { - fn unregister_nodes( - &mut self, - _session: Arc>, - _nodes_to_unregister: &[NodeId], - ) -> Result<(), StatusCode> { - Ok(()) - } -} - -#[test] -fn register_nodes_nothing_to_do() { - do_view_service_test(|server_state, session, _address_space, vs| { - // Empty request - let response = vs.register_nodes( - server_state, - session, - &RegisterNodesRequest { - request_header: make_request_header(), - nodes_to_register: None, - }, - ); - let response: ServiceFault = supported_message_as!(response, ServiceFault); - assert_eq!( - response.response_header.service_result, - StatusCode::BadNothingToDo - ); - }); -} - -#[test] -fn register_nodes_no_handler() { - do_view_service_test(|server_state, session, _address_space, vs| { - // Invalid request because impl has no registered handler - let response = vs.register_nodes( - server_state, - session, - &RegisterNodesRequest { - request_header: make_request_header(), - nodes_to_register: Some(vec![ObjectId::ObjectsFolder.into()]), - }, - ); - let response: RegisterNodesResponse = - supported_message_as!(response, RegisterNodesResponse); - let registered_node_ids = response.registered_node_ids.unwrap(); - // The middle node should be aliased - assert_eq!(registered_node_ids[0], ObjectId::ObjectsFolder.into()); - }); -} - -#[test] -fn register_nodes() { - do_view_service_test(|server_state, session, _address_space, vs| { - // Register the callbacks - { - let mut server_state = trace_write_lock!(server_state); - server_state.set_register_nodes_callbacks( - Box::new(RegisterNodesImpl { - session: Weak::new(), - }), - Box::new(UnregisterNodesImpl {}), - ); - } - - // Make a good call to register - let response = vs.register_nodes( - server_state, - session, - &RegisterNodesRequest { - request_header: make_request_header(), - nodes_to_register: Some(vec![ - NodeId::new(1, 99), - NodeId::new(1, 100), - NodeId::new(1, 101), - ]), - }, - ); - let response: RegisterNodesResponse = - supported_message_as!(response, RegisterNodesResponse); - let registered_node_ids = response.registered_node_ids.unwrap(); - // The middle node should be aliased - assert_eq!(registered_node_ids[0], NodeId::new(1, 99)); - assert_eq!(registered_node_ids[1], NodeId::new(1, 200)); - assert_eq!(registered_node_ids[2], NodeId::new(1, 101)); - }); -} - -#[test] -fn unregister_nodes_nothing_to_do() { - do_view_service_test(|server_state, session, _address_space, vs| { - // Empty request - let response = vs.unregister_nodes( - server_state, - session, - &UnregisterNodesRequest { - request_header: make_request_header(), - nodes_to_unregister: None, - }, - ); - let response: ServiceFault = supported_message_as!(response, ServiceFault); - assert_eq!( - response.response_header.service_result, - StatusCode::BadNothingToDo - ); - }); -} - -#[test] -fn unregister_nodes() { - do_view_service_test(|server_state, session, _address_space, vs| { - // Register the callbacks - { - let mut server_state = trace_write_lock!(server_state); - server_state.set_register_nodes_callbacks( - Box::new(RegisterNodesImpl { - session: Weak::new(), - }), - Box::new(UnregisterNodesImpl {}), - ); - } - - // Not much to validate except that the function returns good - let response = vs.unregister_nodes( - server_state, - session, - &UnregisterNodesRequest { - request_header: make_request_header(), - nodes_to_unregister: Some(vec![ - NodeId::new(1, 99), - ObjectId::ObjectsFolder.into(), - NodeId::new(1, 100), - NodeId::new(1, 101), - ]), - }, - ); - let response: UnregisterNodesResponse = - supported_message_as!(response, UnregisterNodesResponse); - assert_eq!(response.response_header.service_result, StatusCode::Good); - }); -} diff --git a/lib/src/server/tests/subscriptions/mod.rs b/lib/src/server/tests/subscriptions/mod.rs deleted file mode 100644 index 18e8c20cc..000000000 --- a/lib/src/server/tests/subscriptions/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod subscription; -mod subscriptions; diff --git a/lib/src/server/tests/subscriptions/subscription.rs b/lib/src/server/tests/subscriptions/subscription.rs deleted file mode 100644 index f3aecd889..000000000 --- a/lib/src/server/tests/subscriptions/subscription.rs +++ /dev/null @@ -1,517 +0,0 @@ -use std::sync::Arc; - -use crate::server::{ - diagnostics::ServerDiagnostics, - subscriptions::subscription::{ - HandledState, Subscription, SubscriptionState, SubscriptionStateParams, TickReason, - UpdateStateAction, - }, -}; -use crate::sync::*; - -const DEFAULT_LIFETIME_COUNT: u32 = 300; -const DEFAULT_KEEPALIVE_COUNT: u32 = 100; - -fn make_subscription(state: SubscriptionState) -> Subscription { - let subscription_interval = 1000f64; - let mut result = Subscription::new( - Arc::new(RwLock::new(ServerDiagnostics::default())), - 0, - true, - subscription_interval, - DEFAULT_LIFETIME_COUNT, - DEFAULT_KEEPALIVE_COUNT, - 0, - ); - result.set_state(state); - result -} - -#[test] -fn basic_subscription() { - let s = Subscription::new( - Arc::new(RwLock::new(ServerDiagnostics::default())), - 0, - true, - 1000f64, - DEFAULT_LIFETIME_COUNT, - DEFAULT_KEEPALIVE_COUNT, - 0, - ); - assert_eq!(s.state(), SubscriptionState::Creating); -} - -// The update_state_ tests below test with a set of inputs and expect a set of outputs that -// indicate the subscription has moved from one state to another. - -#[test] -fn update_state_3() { - let mut s = make_subscription(SubscriptionState::Creating); - - // Test #3 - state changes from Creating -> Normal - let tick_reason = TickReason::TickTimerFired; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: false, - publishing_req_queued: true, - publishing_timer_expired: false, - }; - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::Create3); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::SubscriptionCreated - ); - assert_eq!(s.state(), SubscriptionState::Normal); - assert_eq!(s.message_sent(), false); -} - -#[test] -fn update_state_4() { - // Test #4 - - // Create a subscription in the normal state, and an incoming publish request. Tick on a subscription - // with no changes and ensure the request is still queued afterwards - - let mut s = make_subscription(SubscriptionState::Normal); - - // Receive Publish Request - // && - // ( - // PublishingEnabled == FALSE - // || - // (PublishingEnabled == TRUE - // && MoreNotifications == FALSE) - // ) - let tick_reason = TickReason::ReceivePublishRequest; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: false, - publishing_req_queued: true, - publishing_timer_expired: false, - }; - - s.set_publishing_enabled(false); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::Normal4); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::None - ); - assert_eq!(s.state(), SubscriptionState::Normal); - - // TODO repeat with publishing enabled true, more notifications false -} - -#[test] -fn update_state_5() { - // Test #5 - // Queue a publish request, publishing on, more notifications. - // Ensure return notifications action - - let mut s = make_subscription(SubscriptionState::Normal); - - // TODO publish request should include some acknowledgements - - // queue publish request - // set publish enabled true - // set more notifications true - - let tick_reason = TickReason::ReceivePublishRequest; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: true, - publishing_req_queued: true, - publishing_timer_expired: false, - }; - - s.set_publishing_enabled(true); - s.set_current_lifetime_count(10); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::Normal5); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::ReturnNotifications - ); - assert_eq!(s.state(), SubscriptionState::Normal); - assert_eq!(s.lifetime_counter(), s.max_lifetime_count()); - assert_eq!(s.message_sent(), true); - - // TODO ensure deleted acknowledged notification msgs -} - -#[test] -fn update_state_6() { - // set publishing timer expires - // set publishing requ queued - // set publishing enabled true - // set notifications available true - - let mut s = make_subscription(SubscriptionState::Normal); - - let tick_reason = TickReason::TickTimerFired; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: false, - publishing_req_queued: true, - publishing_timer_expired: true, - }; - - s.set_publishing_enabled(true); - s.set_current_lifetime_count(3); // Expect this to be reset - - let update_state_result = s.update_state(tick_reason, p); - - // ensure 6 - assert_eq!( - update_state_result.handled_state, - HandledState::IntervalElapsed6 - ); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::ReturnNotifications - ); - assert_eq!(s.state(), SubscriptionState::Normal); - assert_eq!(s.lifetime_counter(), 299); - assert_eq!(s.message_sent(), true); -} - -#[test] -fn update_state_7() { - // set timer expires - // publishing request queued true - // message sent true - // publishing enabled false - - let mut s = make_subscription(SubscriptionState::Normal); - - let tick_reason = TickReason::TickTimerFired; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: false, - publishing_req_queued: true, - publishing_timer_expired: true, - }; - - s.set_message_sent(false); - s.set_publishing_enabled(false); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!( - update_state_result.handled_state, - HandledState::IntervalElapsed7 - ); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::ReturnKeepAlive - ); - assert_eq!(s.state(), SubscriptionState::Normal); - assert_eq!(s.lifetime_counter(), 299); - assert_eq!(s.message_sent(), true); - - // TODO Repeat with publishing enabled true and notifications available false -} - -#[test] -fn update_state_8() { - // set timer expires - // set publishing request queued false - // set message_sent false - - let mut s = make_subscription(SubscriptionState::Normal); - - let tick_reason = TickReason::TickTimerFired; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: false, - publishing_req_queued: false, - publishing_timer_expired: true, - }; - s.set_message_sent(false); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!( - update_state_result.handled_state, - HandledState::IntervalElapsed8 - ); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::None - ); - assert_eq!(s.state(), SubscriptionState::Late); - // ensure start publishing timer -} - -#[test] -fn update_state_9() { - // set timer expires - // set publishing request queued false - // set message_sent false - - let mut s = make_subscription(SubscriptionState::Normal); - - let tick_reason = TickReason::TickTimerFired; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: false, - publishing_req_queued: false, - publishing_timer_expired: true, - }; - - s.set_message_sent(true); - s.set_publishing_enabled(false); - s.set_keep_alive_counter(3); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!( - update_state_result.handled_state, - HandledState::IntervalElapsed9 - ); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::None - ); - assert_eq!(s.state(), SubscriptionState::KeepAlive); - assert_eq!(s.keep_alive_counter(), s.max_keep_alive_count()); -} - -#[test] -fn update_state_10() { - let mut s = make_subscription(SubscriptionState::Late); - - let tick_reason = TickReason::ReceivePublishRequest; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: false, - publishing_req_queued: true, - publishing_timer_expired: false, - }; - - s.set_publishing_enabled(true); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::Late10); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::ReturnNotifications - ); - assert_eq!(s.state(), SubscriptionState::Normal); - assert_eq!(s.message_sent(), true); -} - -#[test] -fn update_state_11() { - let mut s = make_subscription(SubscriptionState::Late); - - let tick_reason = TickReason::ReceivePublishRequest; - let p = SubscriptionStateParams { - notifications_available: false, - more_notifications: false, - publishing_req_queued: true, - publishing_timer_expired: false, - }; - - s.set_publishing_enabled(true); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::Late11); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::ReturnKeepAlive - ); - assert_eq!(s.state(), SubscriptionState::KeepAlive); - assert_eq!(s.message_sent(), true); -} - -#[test] -fn update_state_12() { - let mut s = make_subscription(SubscriptionState::Late); - - let tick_reason = TickReason::TickTimerFired; - let p = SubscriptionStateParams { - notifications_available: false, - more_notifications: false, - publishing_req_queued: false, - publishing_timer_expired: true, - }; - - s.set_publishing_enabled(true); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::Late12); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::None - ); - assert_eq!(s.state(), SubscriptionState::Late); -} - -#[test] -fn update_state_13() { - let mut s = make_subscription(SubscriptionState::KeepAlive); - - let tick_reason = TickReason::ReceivePublishRequest; - let p = SubscriptionStateParams { - notifications_available: false, - more_notifications: false, - publishing_req_queued: true, - publishing_timer_expired: false, - }; - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::KeepAlive13); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::None - ); - assert_eq!(s.state(), SubscriptionState::KeepAlive); -} - -#[test] -fn update_state_14() { - let mut s = make_subscription(SubscriptionState::KeepAlive); - - let tick_reason = TickReason::TickTimerFired; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: false, - publishing_req_queued: true, - publishing_timer_expired: true, - }; - - s.set_publishing_enabled(true); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::KeepAlive14); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::ReturnNotifications - ); - assert_eq!(s.state(), SubscriptionState::Normal); -} - -#[test] -fn update_state_15() { - let mut s = make_subscription(SubscriptionState::KeepAlive); - - let tick_reason = TickReason::TickTimerFired; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: false, - publishing_req_queued: true, - publishing_timer_expired: true, - }; - - s.set_keep_alive_counter(1); - s.set_publishing_enabled(false); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::KeepAlive15); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::ReturnKeepAlive - ); - assert_eq!(s.state(), SubscriptionState::KeepAlive); - assert_eq!(s.keep_alive_counter(), s.max_keep_alive_count()); -} - -#[test] -fn update_state_16() { - let mut s = make_subscription(SubscriptionState::KeepAlive); - - s.set_keep_alive_counter(5); - s.set_publishing_enabled(false); - - let tick_reason = TickReason::TickTimerFired; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: false, - publishing_req_queued: true, - publishing_timer_expired: true, - }; - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::KeepAlive16); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::None - ); - assert_eq!(s.state(), SubscriptionState::KeepAlive); - assert_eq!(s.keep_alive_counter(), 4); -} - -#[test] -fn update_state_17() { - let mut s = make_subscription(SubscriptionState::KeepAlive); - - let tick_reason = TickReason::TickTimerFired; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: false, - publishing_req_queued: false, - publishing_timer_expired: true, - }; - - s.set_keep_alive_counter(1); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::KeepAlive17); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::None - ); - assert_eq!(s.state(), SubscriptionState::Late); - assert_eq!(s.keep_alive_counter(), 1); -} - -#[test] -fn update_state_27() { - // Test #27 - // Queue a publish request, publishing on, more notifications, lifetime of 1 - - // Ensure subscription is closed, update action to close expired subscription - - let mut s = make_subscription(SubscriptionState::Normal); - - // queue publish request - // set publish enabled true - // set more notifications true - - let tick_reason = TickReason::ReceivePublishRequest; - let p = SubscriptionStateParams { - notifications_available: true, - more_notifications: true, - publishing_req_queued: true, - publishing_timer_expired: false, - }; - - s.set_publishing_enabled(true); - s.set_current_lifetime_count(1); - - let update_state_result = s.update_state(tick_reason, p); - - assert_eq!(update_state_result.handled_state, HandledState::Closed27); - assert_eq!( - update_state_result.update_state_action, - UpdateStateAction::SubscriptionExpired - ); - assert_eq!(s.state(), SubscriptionState::Closed); - assert_eq!(s.lifetime_counter(), 1); - assert_eq!(s.message_sent(), false); -} diff --git a/lib/src/server/tests/subscriptions/subscriptions.rs b/lib/src/server/tests/subscriptions/subscriptions.rs deleted file mode 100644 index 524dce7e6..000000000 --- a/lib/src/server/tests/subscriptions/subscriptions.rs +++ /dev/null @@ -1,2 +0,0 @@ -// Tests related to the Subscriptions struct go here, in particular relating to publish request -// and response handling. diff --git a/lib/src/server/transport/mod.rs b/lib/src/server/transport/mod.rs new file mode 100644 index 000000000..fcb722b9a --- /dev/null +++ b/lib/src/server/transport/mod.rs @@ -0,0 +1 @@ +pub mod tcp; diff --git a/lib/src/server/transport/tcp.rs b/lib/src/server/transport/tcp.rs new file mode 100644 index 000000000..6e72abbaa --- /dev/null +++ b/lib/src/server/transport/tcp.rs @@ -0,0 +1,340 @@ +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +use crate::{ + core::{ + comms::{ + buffer::SendBuffer, + chunker::Chunker, + message_chunk::{MessageChunk, MessageIsFinalType}, + message_chunk_info::ChunkInfo, + secure_channel::SecureChannel, + tcp_codec::{Message, TcpCodec}, + tcp_types::{AcknowledgeMessage, ErrorMessage, HelloMessage}, + }, + SupportedMessage, + }, + server::info::ServerInfo, + types::{DecodingOptions, StatusCode}, +}; +use futures::StreamExt; +use tokio::{ + io::{ReadHalf, WriteHalf}, + net::TcpStream, +}; +use tokio_util::codec::FramedRead; + +/// Transport implementation for opc.tcp. +pub(crate) struct TcpTransport { + read: FramedRead, TcpCodec>, + write: WriteHalf, + send_buffer: SendBuffer, + state: TransportState, + pending_chunks: Vec, + /// Client protocol version set during HELLO + pub(crate) client_protocol_version: u32, + /// Last decoded sequence number + last_received_sequence_number: u32, + info: Arc, +} + +enum TransportState { + WaitingForHello(Instant), + Running, + Closing, +} + +#[derive(Debug, Clone)] +pub(crate) struct TransportConfig { + pub send_buffer_size: usize, + pub max_message_size: usize, + pub max_chunk_count: usize, + pub hello_timeout: Duration, +} + +#[derive(Debug)] +pub(crate) struct Request { + pub message: SupportedMessage, + pub chunk_info: ChunkInfo, + pub request_id: u32, +} + +#[derive(Debug)] +/// Result of polling a TCP transport. +pub(crate) enum TransportPollResult { + OutgoingMessageSent, + IncomingChunk, + IncomingMessage(Request), + IncomingHello, + Error(StatusCode), + Closed, +} + +fn min_zero_infinite(server: u32, client: u32) -> u32 { + if client == 0 { + server + } else if server == 0 { + client + } else { + client.min(server) + } +} + +impl TcpTransport { + pub fn new( + stream: TcpStream, + config: TransportConfig, + decoding_options: DecodingOptions, + info: Arc, + ) -> Self { + let (read, write) = tokio::io::split(stream); + let read = FramedRead::new(read, TcpCodec::new(decoding_options)); + + Self { + read, + write, + send_buffer: SendBuffer::new( + config.send_buffer_size, + config.max_message_size, + config.max_chunk_count, + ), + state: TransportState::WaitingForHello(Instant::now() + config.hello_timeout), + pending_chunks: Vec::new(), + last_received_sequence_number: 0, + client_protocol_version: 0, + info, + } + } + + /// Set the transport state to closing, once the final message is sent + /// the connection will be closed. + pub fn set_closing(&mut self) { + self.state = TransportState::Closing; + } + + pub fn is_closing(&self) -> bool { + matches!(self.state, TransportState::Closing) + } + + pub fn enqueue_error(&mut self, message: ErrorMessage) { + self.send_buffer.write_error(message); + } + + pub fn enqueue_message_for_send( + &mut self, + channel: &mut SecureChannel, + message: SupportedMessage, + request_id: u32, + ) -> Result<(), StatusCode> { + self.send_buffer.write(request_id, message, channel)?; + Ok(()) + } + + fn process_hello( + &mut self, + channel: &mut SecureChannel, + hello: HelloMessage, + ) -> Result<(), StatusCode> { + let endpoints = self.info.endpoints(&hello.endpoint_url, &None); + + if !endpoints.is_some_and(|e| hello.is_endpoint_url_valid(&e)) { + error!("HELLO endpoint url is invalid"); + return Err(StatusCode::BadTcpEndpointUrlInvalid); + } + if !hello.is_valid_buffer_sizes() { + error!("HELLO buffer sizes are invalid"); + return Err(StatusCode::BadCommunicationError); + } + + let server_protocol_version = 0; + // Validate protocol version + if hello.protocol_version > server_protocol_version { + return Err(StatusCode::BadProtocolVersionUnsupported); + } + + self.client_protocol_version = hello.protocol_version; + + let decoding_options = channel.decoding_options(); + + // Send acknowledge + let acknowledge = AcknowledgeMessage::new( + server_protocol_version, + hello.send_buffer_size, + (self.send_buffer.send_buffer_size as u32).min(hello.receive_buffer_size), + min_zero_infinite( + decoding_options.max_message_size as u32, + hello.max_message_size, + ), + min_zero_infinite( + decoding_options.max_chunk_count as u32, + hello.max_chunk_count, + ), + ); + self.send_buffer.revise( + acknowledge.send_buffer_size as usize, + acknowledge.max_message_size as usize, + acknowledge.max_chunk_count as usize, + ); + self.send_buffer.write_ack(acknowledge); + + self.state = TransportState::Running; + + Ok(()) + } + + pub async fn poll(&mut self, channel: &mut SecureChannel) -> TransportPollResult { + // If we're waiting for hello, just do that. We're not sending anything until + // we get it. + if let TransportState::WaitingForHello(deadline) = &self.state { + return tokio::select! { + _ = tokio::time::sleep_until((*deadline).into()) => { + TransportPollResult::Error(StatusCode::BadTimeout) + } + r = self.wait_for_hello() => { + match r { + Ok(h) => { + match self.process_hello(channel, h) { + Ok(()) => TransportPollResult::IncomingHello, + Err(e) => TransportPollResult::Error(e) + } + } + Err(e) => { + TransportPollResult::Error(e) + } + } + } + }; + } + + // Either we've got something in the send buffer, which we can send, + // or we're waiting for more outgoing messages. + // We won't wait for outgoing messages while sending, since that + // could cause the send buffer to fill up. + + // If there's nothing in the send buffer, but there are chunks available, + // write them to the send buffer before proceeding. + if self.send_buffer.should_encode_chunks() { + if let Err(e) = self.send_buffer.encode_next_chunk(channel) { + return TransportPollResult::Error(e); + } + } + + // If there is something in the send buffer, write to the stream. + // If not, wait for outgoing messages. + // Either way, listen to incoming messages while we do this. + if self.send_buffer.can_read() { + tokio::select! { + r = self.send_buffer.read_into_async(&mut self.write) => { + if let Err(e) = r { + error!("write bytes task failed: {}", e); + return TransportPollResult::Closed; + } + TransportPollResult::OutgoingMessageSent + } + incoming = self.read.next() => { + self.handle_incoming_message(incoming, channel) + } + } + } else { + if self.is_closing() { + return TransportPollResult::Closed; + } + let incoming = self.read.next().await; + self.handle_incoming_message(incoming, channel) + } + } + + async fn wait_for_hello(&mut self) -> Result { + match self.read.next().await { + Some(Ok(Message::Hello(hello))) => Ok(hello), + Some(Ok(bad_msg)) => { + log::error!("Expected a hello message, got {:?} instead", bad_msg); + Err(StatusCode::BadCommunicationError) + } + Some(Err(communication_err)) => { + error!( + "Communication error while waiting for Hello message: {}", + communication_err + ); + Err(StatusCode::BadCommunicationError) + } + None => Err(StatusCode::BadConnectionClosed), + } + } + + fn handle_incoming_message( + &mut self, + incoming: Option>, + channel: &mut SecureChannel, + ) -> TransportPollResult { + let Some(incoming) = incoming else { + return TransportPollResult::Closed; + }; + match incoming { + Ok(message) => match self.process_message(message, channel) { + Ok(None) => TransportPollResult::IncomingChunk, + Ok(Some(message)) => { + self.pending_chunks.clear(); + TransportPollResult::IncomingMessage(message) + } + Err(e) => { + self.pending_chunks.clear(); + TransportPollResult::Error(e) + } + }, + Err(err) => { + error!("Error reading from stream {:?}", err); + TransportPollResult::Error(StatusCode::BadConnectionClosed) + } + } + } + + fn process_message( + &mut self, + message: Message, + channel: &mut SecureChannel, + ) -> Result, StatusCode> { + match message { + Message::Chunk(chunk) => { + let header = chunk.message_header(&channel.decoding_options())?; + + if header.is_final == MessageIsFinalType::FinalError { + self.pending_chunks.clear(); + Ok(None) + } else { + let chunk = channel.verify_and_remove_security(&chunk.data)?; + + if self.pending_chunks.len() == self.send_buffer.max_chunk_count { + return Err(StatusCode::BadEncodingLimitsExceeded); + } + self.pending_chunks.push(chunk); + + if header.is_final == MessageIsFinalType::Intermediate { + return Ok(None); + } + + let chunk_info = self.pending_chunks[0].chunk_info(channel)?; + + self.last_received_sequence_number = Chunker::validate_chunks( + self.last_received_sequence_number + 1, + channel, + &self.pending_chunks, + )?; + + let request = Chunker::decode(&self.pending_chunks, channel, None)?; + Ok(Some(Request { + request_id: chunk_info.sequence_header.request_id, + chunk_info, + message: request, + })) + } + } + unexpected => { + error!("Received unexpected message: {:?}", unexpected); + Err(StatusCode::BadUnexpectedError) + } + } + } +} diff --git a/lib/src/server/util/mod.rs b/lib/src/server/util/mod.rs deleted file mode 100644 index 3ca774093..000000000 --- a/lib/src/server/util/mod.rs +++ /dev/null @@ -1,63 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Provides utility routines for things that might be used in a number of places elsewhere. - -use std::sync::Arc; - -use tokio::time::{interval_at, Duration, Instant}; - -use crate::sync::*; -use crate::types::service_types::ServerState as ServerStateType; - -use crate::server::state::ServerState; - -/// This is a convenience for a polling action. This struct starts a repeating timer that calls -/// an action repeatedly. -pub struct PollingAction {} - -impl PollingAction { - pub fn spawn( - server_state: Arc>, - interval_ms: u64, - action: F, - ) -> PollingAction - where - F: 'static + Fn() + Send, - { - tokio::spawn(async move { - let mut timer = interval_at(Instant::now(), Duration::from_millis(interval_ms)); - loop { - { - // trace!("polling action.take_while"); - let server_state = trace_read_lock!(server_state); - // If the server aborts or is in a failed state, this polling timer will stop - let abort = match server_state.state() { - ServerStateType::Failed - | ServerStateType::NoConfiguration - | ServerStateType::Shutdown => true, - _ => server_state.is_abort(), - }; - if abort { - debug!("Polling action is stopping due to server state / abort"); - break; - } - } - - // Timer - timer.tick().await; - - // Polling timer will only call the action if the server is in a running state - let process_action = { - let server_state = trace_read_lock!(server_state); - server_state.is_running() - }; - if process_action { - action(); - } - } - }); - PollingAction {} - } -} diff --git a/lib/src/types/add_node_attributes.rs b/lib/src/types/add_node_attributes.rs new file mode 100644 index 000000000..6d238f697 --- /dev/null +++ b/lib/src/types/add_node_attributes.rs @@ -0,0 +1,110 @@ +use super::{ + encoding::DecodingOptions, + extension_object::ExtensionObject, + node_ids::ObjectId, + service_types::{ + DataTypeAttributes, GenericAttributes, MethodAttributes, ObjectAttributes, + ObjectTypeAttributes, ReferenceTypeAttributes, VariableAttributes, VariableTypeAttributes, + ViewAttributes, + }, + status_code::StatusCode, +}; + +#[derive(Clone, Debug)] +pub enum AddNodeAttributes { + Object(ObjectAttributes), + Variable(VariableAttributes), + Method(MethodAttributes), + ObjectType(ObjectTypeAttributes), + VariableType(VariableTypeAttributes), + ReferenceType(ReferenceTypeAttributes), + DataType(DataTypeAttributes), + View(ViewAttributes), + Generic(GenericAttributes), + None, +} + +impl AddNodeAttributes { + pub fn from_extension_object( + obj: ExtensionObject, + options: &DecodingOptions, + ) -> Result { + if obj.is_null() { + return Ok(Self::None); + } + match obj + .object_id() + .map_err(|_| StatusCode::BadNodeAttributesInvalid)? + { + ObjectId::ObjectAttributes_Encoding_DefaultBinary => { + Ok(Self::Object(obj.decode_inner(options)?)) + } + ObjectId::VariableAttributes_Encoding_DefaultBinary => { + Ok(Self::Variable(obj.decode_inner(options)?)) + } + ObjectId::MethodAttributes_Encoding_DefaultBinary => { + Ok(Self::Method(obj.decode_inner(options)?)) + } + ObjectId::ObjectTypeAttributes_Encoding_DefaultBinary => { + Ok(Self::ObjectType(obj.decode_inner(options)?)) + } + ObjectId::VariableTypeAttributes_Encoding_DefaultBinary => { + Ok(Self::VariableType(obj.decode_inner(options)?)) + } + ObjectId::ReferenceTypeAttributes_Encoding_DefaultBinary => { + Ok(Self::ReferenceType(obj.decode_inner(options)?)) + } + ObjectId::DataTypeAttributes_Encoding_DefaultBinary => { + Ok(Self::DataType(obj.decode_inner(options)?)) + } + ObjectId::ViewAttributes_Encoding_DefaultBinary => { + Ok(Self::View(obj.decode_inner(options)?)) + } + ObjectId::GenericAttributes_Encoding_DefaultBinary => { + Ok(Self::Generic(obj.decode_inner(options)?)) + } + _ => Err(StatusCode::BadNodeAttributesInvalid), + } + } + + pub fn as_extension_object(&self) -> ExtensionObject { + match self { + AddNodeAttributes::Object(o) => ExtensionObject::from_encodable( + ObjectId::ObjectAttributes_Encoding_DefaultBinary, + o, + ), + AddNodeAttributes::Variable(o) => ExtensionObject::from_encodable( + ObjectId::VariableAttributes_Encoding_DefaultBinary, + o, + ), + AddNodeAttributes::Method(o) => ExtensionObject::from_encodable( + ObjectId::MethodAttributes_Encoding_DefaultBinary, + o, + ), + AddNodeAttributes::ObjectType(o) => ExtensionObject::from_encodable( + ObjectId::ObjectTypeAttributes_Encoding_DefaultBinary, + o, + ), + AddNodeAttributes::VariableType(o) => ExtensionObject::from_encodable( + ObjectId::VariableTypeAttributes_Encoding_DefaultBinary, + o, + ), + AddNodeAttributes::ReferenceType(o) => ExtensionObject::from_encodable( + ObjectId::ReferenceTypeAttributes_Encoding_DefaultBinary, + o, + ), + AddNodeAttributes::DataType(o) => ExtensionObject::from_encodable( + ObjectId::DataTypeAttributes_Encoding_DefaultBinary, + o, + ), + AddNodeAttributes::View(o) => { + ExtensionObject::from_encodable(ObjectId::ViewAttributes_Encoding_DefaultBinary, o) + } + AddNodeAttributes::Generic(o) => ExtensionObject::from_encodable( + ObjectId::GenericAttributes_Encoding_DefaultBinary, + o, + ), + AddNodeAttributes::None => todo!(), + } + } +} diff --git a/lib/src/types/data_value.rs b/lib/src/types/data_value.rs index e2ae2df61..487864023 100644 --- a/lib/src/types/data_value.rs +++ b/lib/src/types/data_value.rs @@ -400,6 +400,27 @@ impl DataValue { } } + /// Creates a `DataValue` from the supplied value and timestamp. If you are passing a value to the Attribute::Write service + /// on a server from a server, you may consider this from the specification: + /// + /// _If the SourceTimestamp or the ServerTimestamp is specified, the Server shall use these values. + /// The Server returns a Bad_WriteNotSupported error if it does not support writing of timestamps_ + /// + /// In which case, use the `value_only()` constructor, or make explicit which fields you pass. + pub fn new_at(value: V, time: DateTime) -> DataValue + where + V: Into, + { + DataValue { + value: Some(value.into()), + status: Some(StatusCode::Good), + source_timestamp: Some(time), + source_picoseconds: Some(0), + server_timestamp: Some(time), + server_picoseconds: Some(0), + } + } + /// Creates an empty DataValue pub fn null() -> DataValue { DataValue { diff --git a/lib/src/types/date_time.rs b/lib/src/types/date_time.rs index 73319769f..bc03e456a 100644 --- a/lib/src/types/date_time.rs +++ b/lib/src/types/date_time.rs @@ -28,7 +28,7 @@ pub type DateTimeUtc = chrono::DateTime; /// A date/time value. This is a wrapper around the chrono type with extra functionality /// for obtaining ticks in OPC UA measurements, endtimes, epoch etc. -#[derive(PartialEq, Debug, Clone, Copy)] +#[derive(PartialEq, Debug, Clone, Copy, Ord, Eq)] pub struct DateTime { date_time: DateTimeUtc, } diff --git a/lib/src/types/encoding.rs b/lib/src/types/encoding.rs index 347878654..a94726b8c 100644 --- a/lib/src/types/encoding.rs +++ b/lib/src/types/encoding.rs @@ -8,10 +8,7 @@ use std::{ fmt::Debug, io::{Cursor, Read, Result, Write}, - sync::{ - atomic::{AtomicU64, Ordering}, - Arc, - }, + sync::atomic::{AtomicU64, Ordering}, }; use byteorder::{ByteOrder, LittleEndian, WriteBytesExt}; @@ -24,11 +21,11 @@ pub type EncodingResult = std::result::Result; /// Depth lock holds a reference on the depth gauge. The drop ensures impl that the reference is /// decremented even if there is a panic unwind. #[derive(Debug)] -pub struct DepthLock { - depth_gauge: Arc, +pub struct DepthLock<'a> { + depth_gauge: &'a DepthGauge, } -impl Drop for DepthLock { +impl<'a> Drop for DepthLock<'a> { fn drop(&mut self) { // This will overflow back if the gauge is somehow at 0. That really should not be possible, if it is only ever // incremented from `obtain` @@ -38,8 +35,8 @@ impl Drop for DepthLock { } } -impl DepthLock { - fn new(depth_gauge: Arc) -> (Self, u64) { +impl<'a> DepthLock<'a> { + fn new(depth_gauge: &'a DepthGauge) -> (Self, u64) { let current = depth_gauge.current_depth.fetch_add(1, Ordering::Acquire); (Self { depth_gauge }, current) @@ -47,7 +44,7 @@ impl DepthLock { /// The depth lock tests if the depth can increment and then obtains a lock on it. /// The lock will decrement the depth when it drops to ensure proper behaviour during unwinding. - pub fn obtain(depth_gauge: Arc) -> core::result::Result { + pub fn obtain(depth_gauge: &'a DepthGauge) -> core::result::Result, StatusCode> { let max_depth = depth_gauge.max_depth; let (gauge, val) = Self::new(depth_gauge); @@ -70,6 +67,17 @@ pub struct DepthGauge { pub(self) current_depth: AtomicU64, } +// TODO: In general keeping DepthGauge as part of DecodingOptions is suboptimal, +// since this pattern is unintuitive. It should be separated out. +impl Clone for DepthGauge { + fn clone(&self) -> Self { + Self { + max_depth: self.max_depth.clone(), + current_depth: AtomicU64::new(0), + } + } +} + impl Default for DepthGauge { fn default() -> Self { Self::new(constants::MAX_DECODING_DEPTH) @@ -111,7 +119,7 @@ pub struct DecodingOptions { /// Maximum number of array elements. 0 actually means 0, i.e. no array permitted pub max_array_length: usize, /// Decoding depth gauge is used to check for recursion - pub decoding_depth_gauge: Arc, + pub decoding_depth_gauge: DepthGauge, } impl Default for DecodingOptions { @@ -123,7 +131,7 @@ impl Default for DecodingOptions { max_string_length: constants::MAX_STRING_LENGTH, max_byte_string_length: constants::MAX_BYTE_STRING_LENGTH, max_array_length: constants::MAX_ARRAY_LENGTH, - decoding_depth_gauge: Arc::new(DepthGauge::default()), + decoding_depth_gauge: DepthGauge::default(), } } } @@ -136,7 +144,7 @@ impl DecodingOptions { max_string_length: 8192, max_byte_string_length: 8192, max_array_length: 8192, - decoding_depth_gauge: Arc::new(DepthGauge::minimal()), + decoding_depth_gauge: DepthGauge::minimal(), ..Default::default() } } @@ -147,8 +155,8 @@ impl DecodingOptions { Self::default() } - pub fn depth_lock(&self) -> core::result::Result { - DepthLock::obtain(self.decoding_depth_gauge.clone()) + pub fn depth_lock<'a>(&'a self) -> core::result::Result, StatusCode> { + DepthLock::obtain(&self.decoding_depth_gauge) } } @@ -442,7 +450,7 @@ mod tests { { let mut v = Vec::new(); for _ in 0..max_depth { - v.push(DepthLock::obtain(dg.clone()).unwrap()); + v.push(DepthLock::obtain(&dg).unwrap()); } // Depth should now be MAX_DECODING_DEPTH @@ -455,7 +463,7 @@ mod tests { // Next obtain should fail assert_eq!( - DepthLock::obtain(dg.clone()).unwrap_err(), + DepthLock::obtain(&dg).unwrap_err(), StatusCode::BadDecodingError ); diff --git a/lib/src/types/expanded_node_id.rs b/lib/src/types/expanded_node_id.rs index 1bdbd2ded..a4cb7f283 100644 --- a/lib/src/types/expanded_node_id.rs +++ b/lib/src/types/expanded_node_id.rs @@ -23,7 +23,7 @@ use crate::types::{ }; /// A NodeId that allows the namespace URI to be specified instead of an index. -#[derive(PartialEq, Debug, Clone)] +#[derive(PartialEq, Debug, Clone, Eq, Hash)] pub struct ExpandedNodeId { pub node_id: NodeId, pub namespace_uri: UAString, diff --git a/lib/src/types/localized_text.rs b/lib/src/types/localized_text.rs index c51a207e0..0539e305c 100644 --- a/lib/src/types/localized_text.rs +++ b/lib/src/types/localized_text.rs @@ -28,7 +28,7 @@ pub struct LocalizedText { impl<'a> From<&'a str> for LocalizedText { fn from(value: &'a str) -> Self { Self { - locale: UAString::from(""), + locale: UAString::null(), text: UAString::from(value), } } @@ -37,7 +37,7 @@ impl<'a> From<&'a str> for LocalizedText { impl From<&String> for LocalizedText { fn from(value: &String) -> Self { Self { - locale: UAString::from(""), + locale: UAString::null(), text: UAString::from(value), } } @@ -46,7 +46,7 @@ impl From<&String> for LocalizedText { impl From for LocalizedText { fn from(value: String) -> Self { Self { - locale: UAString::from(""), + locale: UAString::null(), text: UAString::from(value), } } diff --git a/lib/src/types/mod.rs b/lib/src/types/mod.rs index fa6917bc4..00c72a0b1 100644 --- a/lib/src/types/mod.rs +++ b/lib/src/types/mod.rs @@ -161,6 +161,7 @@ bitflags! { // Bits that control the reference description coming back from browse() bitflags! { + #[derive(Debug, Copy, Clone)] pub struct BrowseDescriptionResultMask: u32 { const RESULT_MASK_REFERENCE_TYPE = 1; const RESULT_MASK_IS_FORWARD = 1 << 1; @@ -191,6 +192,7 @@ pub mod node_ids; #[rustfmt::skip] pub mod service_types; +mod add_node_attributes; pub mod argument; pub mod array; pub mod attribute; @@ -220,7 +222,7 @@ pub mod variant_json; pub mod variant_type_id; pub use crate::types::{ - argument::*, array::*, attribute::*, basic_types::*, byte_string::*, data_types::*, + add_node_attributes::AddNodeAttributes, array::*, attribute::*, byte_string::*, data_types::*, data_value::*, date_time::*, diagnostic_info::*, encoding::*, expanded_node_id::*, extension_object::*, guid::*, localized_text::*, node_id::*, node_ids::*, numeric_range::*, operand::*, qualified_name::*, request_header::*, response_header::*, service_types::*, diff --git a/lib/src/types/node_id.rs b/lib/src/types/node_id.rs index a4c448ea0..ca50ebc01 100644 --- a/lib/src/types/node_id.rs +++ b/lib/src/types/node_id.rs @@ -25,6 +25,8 @@ use crate::types::{ string::*, }; +use super::node_ids::VariableId; + /// The kind of identifier, numeric, string, guid or byte #[derive(Eq, PartialEq, Clone, Debug, Hash)] pub enum Identifier { @@ -527,6 +529,15 @@ impl NodeId { } } + pub fn as_variable_id(&self) -> std::result::Result { + match self.identifier { + Identifier::Numeric(id) if self.namespace == 0 => { + VariableId::try_from(id).map_err(|_| NodeIdError) + } + _ => Err(NodeIdError), + } + } + pub fn as_reference_type_id(&self) -> std::result::Result { // TODO this function should not exist - filter code should work with non ns 0 reference // types diff --git a/lib/src/types/operand.rs b/lib/src/types/operand.rs index 57ee6580b..2583baba4 100644 --- a/lib/src/types/operand.rs +++ b/lib/src/types/operand.rs @@ -23,6 +23,7 @@ pub enum OperandType { SimpleAttributeOperand, } +#[derive(Debug, Clone)] pub enum Operand { ElementOperand(ElementOperand), LiteralOperand(LiteralOperand), diff --git a/lib/src/types/qualified_name.rs b/lib/src/types/qualified_name.rs index d57efe2e5..6a124f9e0 100644 --- a/lib/src/types/qualified_name.rs +++ b/lib/src/types/qualified_name.rs @@ -22,7 +22,7 @@ use crate::types::{encoding::*, string::*}; /// NamespaceUriassociated with the NamespaceIndexportion of the QualifiedNameis encoded as /// JSON string unless the NamespaceIndexis 1 or if NamespaceUriis unknown. In these cases, /// the NamespaceIndexis encoded as a JSON number. -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize, Eq, Hash)] #[serde(rename_all = "PascalCase")] pub struct QualifiedName { /// The namespace index diff --git a/lib/src/types/relative_path.rs b/lib/src/types/relative_path.rs index 6fcd654ba..35871a1fe 100644 --- a/lib/src/types/relative_path.rs +++ b/lib/src/types/relative_path.rs @@ -6,8 +6,6 @@ //! //! Functions are implemented on the `RelativePath` and `RelativePathElement` structs where //! there are most useful. -//! -use std::{error::Error, fmt}; use regex::Regex; @@ -19,17 +17,6 @@ use crate::types::{ string::UAString, }; -#[derive(Debug)] -struct RelativePathError; - -impl fmt::Display for RelativePathError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "RelativePathError") - } -} - -impl Error for RelativePathError {} - impl RelativePath { /// The maximum size in chars of any path element. const MAX_TOKEN_LEN: usize = 256; diff --git a/lib/src/types/response_header.rs b/lib/src/types/response_header.rs index c2f754529..170f91f79 100644 --- a/lib/src/types/response_header.rs +++ b/lib/src/types/response_header.rs @@ -66,13 +66,29 @@ impl BinaryEncoder for ResponseHeader { } } +pub trait AsRequestHandle { + fn as_request_handle(&self) -> u32; +} + +impl AsRequestHandle for &RequestHeader { + fn as_request_handle(&self) -> u32 { + self.request_handle + } +} + +impl AsRequestHandle for u32 { + fn as_request_handle(&self) -> u32 { + *self + } +} + impl ResponseHeader { - pub fn new_good(request_header: &RequestHeader) -> ResponseHeader { + pub fn new_good(request_header: impl AsRequestHandle) -> ResponseHeader { ResponseHeader::new_service_result(request_header, StatusCode::Good) } pub fn new_service_result( - request_header: &RequestHeader, + request_header: impl AsRequestHandle, service_result: StatusCode, ) -> ResponseHeader { ResponseHeader::new_timestamped_service_result( @@ -84,12 +100,12 @@ impl ResponseHeader { pub fn new_timestamped_service_result( timestamp: DateTime, - request_header: &RequestHeader, + request_header: impl AsRequestHandle, service_result: StatusCode, ) -> ResponseHeader { ResponseHeader { timestamp, - request_handle: request_header.request_handle, + request_handle: request_header.as_request_handle(), service_result, service_diagnostics: DiagnosticInfo::default(), string_table: None, diff --git a/lib/src/types/service_types/enums.rs b/lib/src/types/service_types/enums.rs index 111259ad8..d849d455c 100644 --- a/lib/src/types/service_types/enums.rs +++ b/lib/src/types/service_types/enums.rs @@ -701,7 +701,7 @@ impl BinaryEncoder for AccessLevelType { bitflags! { #[derive(Debug, Copy, Clone, PartialEq)] - pub struct AccessLevelExType: i32 { + pub struct AccessLevelExType: u32 { const None = 0; const CurrentRead = 1; const CurrentWrite = 2; @@ -723,11 +723,11 @@ impl BinaryEncoder for AccessLevelExType { } fn encode(&self, stream: &mut S) -> EncodingResult { - write_i32(stream, self.bits()) + write_u32(stream, self.bits()) } fn decode(stream: &mut S, decoding_options: &DecodingOptions) -> EncodingResult { - Ok(AccessLevelExType::from_bits_truncate(i32::decode( + Ok(AccessLevelExType::from_bits_truncate(u32::decode( stream, decoding_options, )?)) @@ -772,6 +772,12 @@ bitflags! { } } +impl Default for AccessRestrictionType { + fn default() -> Self { + Self::None + } +} + impl BinaryEncoder for AccessRestrictionType { fn byte_len(&self) -> usize { 2 diff --git a/lib/src/types/service_types/impls.rs b/lib/src/types/service_types/impls.rs index b2c5b7b87..128bf50cf 100644 --- a/lib/src/types/service_types/impls.rs +++ b/lib/src/types/service_types/impls.rs @@ -13,8 +13,7 @@ use crate::types::{ node_ids::{DataTypeId, ObjectId}, profiles, qualified_name::QualifiedName, - request_header::RequestHeader, - response_header::ResponseHeader, + response_header::{AsRequestHandle, ResponseHeader}, service_types::{ enums::DeadbandType, AnonymousIdentityToken, ApplicationDescription, ApplicationType, Argument, CallMethodRequest, DataChangeFilter, DataChangeTrigger, DataSetFieldFlags, @@ -34,7 +33,7 @@ pub trait MessageInfo { } impl ServiceFault { - pub fn new(request_header: &RequestHeader, service_result: StatusCode) -> ServiceFault { + pub fn new(request_header: impl AsRequestHandle, service_result: StatusCode) -> ServiceFault { ServiceFault { response_header: ResponseHeader::new_service_result(request_header, service_result), } diff --git a/lib/src/types/tests/encoding.rs b/lib/src/types/tests/encoding.rs index c35bfbc92..b9e81d909 100644 --- a/lib/src/types/tests/encoding.rs +++ b/lib/src/types/tests/encoding.rs @@ -1,4 +1,3 @@ -use std::sync::Arc; use std::{io::Cursor, str::FromStr}; use crate::types::{encoding::DecodingOptions, string::UAString, tests::*}; @@ -534,7 +533,7 @@ fn null_array() -> EncodingResult<()> { #[test] fn deep_encoding() { let decoding_options = DecodingOptions { - decoding_depth_gauge: Arc::new(DepthGauge::new(2)), + decoding_depth_gauge: DepthGauge::new(2), ..Default::default() }; diff --git a/lib/src/types/variant.rs b/lib/src/types/variant.rs index e4e4b237a..99b1adc31 100644 --- a/lib/src/types/variant.rs +++ b/lib/src/types/variant.rs @@ -347,6 +347,18 @@ impl From for Variant { } } +impl From> for Variant +where + T: Into, +{ + fn from(value: Option) -> Self { + match value { + Some(v) => v.into(), + None => Variant::Empty, + } + } +} + macro_rules! cast_to_bool { ($value: expr) => { if $value == 1 { @@ -394,7 +406,8 @@ macro_rules! from_array_to_variant_impl { impl From> for Variant { fn from(v: Vec<$rtype>) -> Self { - Variant::from(v.as_slice()) + let array: Vec = v.into_iter().map(|v| Variant::from(v)).collect(); + Variant::try_from(($encoding_mask, array)).unwrap() } } @@ -419,6 +432,9 @@ from_array_to_variant_impl!(VariantTypeId::Int64, i64); from_array_to_variant_impl!(VariantTypeId::UInt64, u64); from_array_to_variant_impl!(VariantTypeId::Float, f32); from_array_to_variant_impl!(VariantTypeId::Double, f64); +from_array_to_variant_impl!(VariantTypeId::NodeId, NodeId); +from_array_to_variant_impl!(VariantTypeId::LocalizedText, LocalizedText); +from_array_to_variant_impl!(VariantTypeId::ExtensionObject, ExtensionObject); /// This macro tries to return a `Vec` from a `Variant::Array>`, e.g. /// If the Variant holds @@ -1529,6 +1545,7 @@ impl Variant { // array let self_data_type = self.array_data_type(); let other_data_type = other.array_data_type(); + println!("{:?}, {:?}", self_data_type, other_data_type); if self_data_type.is_none() || other_data_type.is_none() { false } else { @@ -1539,7 +1556,7 @@ impl Variant { pub fn set_range_of(&mut self, range: NumericRange, other: &Variant) -> Result<(), StatusCode> { // Types need to be the same if !self.eq_array_type(other) { - return Err(StatusCode::BadIndexRangeNoData); + return Err(StatusCode::BadIndexRangeDataMismatch); } let other_array = if let Variant::Array(other) = other { @@ -1594,6 +1611,15 @@ impl Variant { } } + /// This function gets a range of values from the variant if it is an array, + /// or returns the variant itself. + pub fn range_of_owned(self, range: NumericRange) -> Result { + match range { + NumericRange::None => Ok(self), + r => self.range_of(r), + } + } + /// This function gets a range of values from the variant if it is an array, or returns a clone /// of the variant itself. pub fn range_of(&self, range: NumericRange) -> Result { @@ -1613,7 +1639,7 @@ impl Variant { Err(StatusCode::BadIndexRangeNoData) } } - _ => Err(StatusCode::BadIndexRangeNoData), + _ => Err(StatusCode::BadIndexRangeDataMismatch), } } NumericRange::Range(min, max) => { @@ -1636,7 +1662,7 @@ impl Variant { Ok(Variant::from((array.value_type, values))) } } - _ => Err(StatusCode::BadIndexRangeNoData), + _ => Err(StatusCode::BadIndexRangeDataMismatch), } } NumericRange::MultipleRanges(_ranges) => { diff --git a/lib/tests/browse.rs b/lib/tests/browse.rs new file mode 100644 index 000000000..258c8adc4 --- /dev/null +++ b/lib/tests/browse.rs @@ -0,0 +1,524 @@ +use opcua::{ + server::address_space::{ObjectBuilder, ReferenceDirection, VariableBuilder}, + types::{ + BrowseDescription, BrowseDirection, BrowsePath, BrowseResultMask, ByteString, DataTypeId, + NodeClass, NodeClassMask, NodeId, ObjectId, ObjectTypeId, ReferenceTypeId, RelativePath, + RelativePathElement, StatusCode, VariableTypeId, + }, +}; +use utils::setup; + +mod utils; + +fn hierarchical_desc(node_id: NodeId) -> BrowseDescription { + BrowseDescription { + node_id, + browse_direction: BrowseDirection::Forward, + reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), + include_subtypes: true, + node_class_mask: NodeClassMask::all().bits(), + result_mask: BrowseResultMask::All as u32, + } +} + +#[tokio::test] +async fn browse() { + let (tester, _nm, session) = setup().await; + + // Browse the server node and expect a few specific nodes. + let r = session + .browse(&[hierarchical_desc(ObjectId::Server.into())], 1000, None) + .await + .unwrap(); + + assert_eq!(r.len(), 1); + let it = &r[0]; + + assert!(it.continuation_point.is_null()); + let refs = it.references.clone().unwrap_or_default(); + // Exact number may vary with new versions of the standard. This number may need to be changed + // in the future. Keep the test as a sanity check. + assert_eq!(refs.len(), 18); + + let server_cap_node = refs + .iter() + .find(|f| f.node_id.node_id == ObjectId::Server_ServerCapabilities.into()) + .unwrap(); + let type_tree = tester.handle.type_tree().read(); + for rf in &refs { + assert!(rf.is_forward); + assert!(type_tree.is_subtype_of( + &rf.reference_type_id, + &ReferenceTypeId::HierarchicalReferences.into() + )); + } + + assert_eq!(server_cap_node.browse_name, "ServerCapabilities".into()); + assert_eq!(server_cap_node.display_name, "ServerCapabilities".into()); + assert_eq!(server_cap_node.node_class, NodeClass::Object); + assert!(server_cap_node.is_forward); + assert_eq!( + server_cap_node.type_definition.node_id, + ObjectTypeId::ServerCapabilitiesType.into() + ); +} + +#[tokio::test] +async fn browse_filter() { + let (_tester, _nm, session) = setup().await; + + // Browse the server node and expect a few specific nodes. + let mut desc = hierarchical_desc(ObjectId::Server.into()); + desc.node_class_mask = NodeClassMask::OBJECT.bits(); + let r = session.browse(&[desc], 1000, None).await.unwrap(); + assert_eq!(r.len(), 1); + let it = &r[0]; + + assert!(it.continuation_point.is_null()); + let refs = it.references.clone().unwrap_or_default(); + // Exact number may vary with new versions of the standard. This number may need to be changed + // in the future. Keep the test as a sanity check. + assert_eq!(refs.len(), 7); + for rf in &refs { + assert!(rf.is_forward); + assert_eq!(rf.node_class, NodeClass::Object); + } +} + +#[tokio::test] +async fn browse_reverse() { + let (_tester, _nm, session) = setup().await; + + // Browse the server node and expect a few specific nodes. + let mut desc = hierarchical_desc(ObjectId::Server.into()); + desc.browse_direction = BrowseDirection::Inverse; + let r = session.browse(&[desc], 1000, None).await.unwrap(); + assert_eq!(r.len(), 1); + let it = &r[0]; + + assert!(it.continuation_point.is_null()); + let refs = it.references.clone().unwrap_or_default(); + // Exact number may vary with new versions of the standard. This number may need to be changed + // in the future. Keep the test as a sanity check. + assert_eq!(refs.len(), 1); + let rf = &refs[0]; + assert!(!rf.is_forward); + assert_eq!(rf.reference_type_id, ReferenceTypeId::Organizes.into()); + assert_eq!(rf.browse_name, "Objects".into()); + assert_eq!(rf.display_name, "Objects".into()); +} + +#[tokio::test] +async fn browse_multiple() { + let (tester, nm, session) = setup().await; + let id1 = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectBuilder::new(&id1, "TestObj1", "TestObj1") + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&ObjectTypeId::FolderType.into()), + Vec::new(), + ); + let id2 = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectBuilder::new(&id2, "TestObj2", "TestObj2") + .build() + .into(), + &id1, + &ReferenceTypeId::Organizes.into(), + Some(&ObjectTypeId::FolderType.into()), + Vec::new(), + ); + + let r = session + .browse( + &[ + hierarchical_desc(ObjectId::Server_Namespaces.into()), + hierarchical_desc(ObjectId::ObjectsFolder.into()), + hierarchical_desc(id1.clone()), + hierarchical_desc(id2.clone()), + ], + 1000, + None, + ) + .await + .unwrap(); + + assert_eq!(4, r.len()); + let it = &r[0]; + let refs = it.references.clone().unwrap_or_default(); + // Should be 3 namespaces. + assert_eq!(3, refs.len()); + + let it = &r[1]; + let refs = it.references.clone().unwrap_or_default(); + // The objects folder has two references, our custom node and the server node. + // Note that future versions of the standard has more nodes here. + assert_eq!(2, refs.len()); + let rf = refs.iter().find(|r| r.node_id.node_id == id1).unwrap(); + assert_eq!(rf.display_name, "TestObj1".into()); + + // The first custom object should reference the second + let it = &r[2]; + let refs = it.references.clone().unwrap_or_default(); + // The objects folder has one reference, our custom node. + assert_eq!(1, refs.len()); + assert_eq!(refs[0].display_name, "TestObj2".into()); + + // The second custom object has no hierarchical references. + let it = &r[3]; + let refs = it.references.clone().unwrap_or_default(); + // The objects folder has one reference, our custom node. + assert!(refs.is_empty()); +} + +#[tokio::test] +async fn browse_cross_node_manager() { + let (tester, nm, session) = setup().await; + let id1 = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectBuilder::new(&id1, "TestObj1", "TestObj1") + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&ObjectTypeId::FolderType.into()), + Vec::new(), + ); + + // Add a non-hierarchical reference pointing into the main node manager. + nm.inner().add_references( + nm.address_space(), + &id1, + vec![( + &ObjectId::Server.into(), + ReferenceTypeId::HasCondition.into(), + ReferenceDirection::Forward, + )], + ); + + // Browse the server in inverse, and our custom node forward. + // This should result in an external reference from our custom node manager, + // which should be resolved by calling into the core node manager. + let mut desc1 = hierarchical_desc(id1.clone()); + desc1.reference_type_id = ReferenceTypeId::NonHierarchicalReferences.into(); + let mut desc2 = hierarchical_desc(ObjectId::Server.into()); + desc2.reference_type_id = ReferenceTypeId::NonHierarchicalReferences.into(); + desc2.browse_direction = BrowseDirection::Inverse; + let r = session.browse(&[desc1, desc2], 1000, None).await.unwrap(); + + assert_eq!(2, r.len()); + let it = &r[0]; + let refs = it.references.clone().unwrap_or_default(); + // Expect two non-hierarchical references here, one for the type definition. + assert_eq!(2, refs.len()); + let type_def_ref = refs + .iter() + .find(|r| r.reference_type_id == ReferenceTypeId::HasTypeDefinition.into()) + .unwrap(); + assert_eq!(type_def_ref.display_name, "FolderType".into()); + let server_ref = refs + .iter() + .find(|r| r.node_id.node_id == ObjectId::Server.into()) + .unwrap(); + assert_eq!(server_ref.display_name, "Server".into()); + assert_eq!(server_ref.type_definition, ObjectTypeId::ServerType.into()); + assert_eq!(server_ref.browse_name, "Server".into()); + assert_eq!( + server_ref.reference_type_id, + ReferenceTypeId::HasCondition.into() + ); + assert!(server_ref.is_forward); + + let it = &r[1]; + let refs = it.references.clone().unwrap_or_default(); + // Should only be one reference for now. + assert_eq!(1, refs.len()); + let rf = &refs[0]; + assert_eq!(rf.display_name, "TestObj1".into()); +} + +#[tokio::test] +async fn browse_continuation_point() { + let (tester, nm, session) = setup().await; + let root_id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectBuilder::new(&root_id, "TestObj1", "TestObj1") + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&ObjectTypeId::FolderType.into()), + Vec::new(), + ); + for i in 0..1000 { + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, &format!("Var{i}"), &format!("Var{i}")) + .data_type(DataTypeId::Int32) + .build() + .into(), + &root_id, + &ReferenceTypeId::HasComponent.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + } + + let desc = hierarchical_desc(root_id); + let r = session.browse(&[desc], 100, None).await.unwrap(); + assert_eq!(1, r.len()); + let it = &r[0]; + assert_eq!(StatusCode::Good, it.status_code); + assert!(!it.continuation_point.is_null()); + + let mut results = it.references.clone().unwrap(); + let mut cp = it.continuation_point.clone(); + assert_eq!(100, results.len()); + for i in 0..9 { + let r = session.browse_next(false, &[cp.clone()]).await.unwrap(); + assert_eq!(1, r.len()); + let it = &r[0]; + assert_eq!(StatusCode::Good, it.status_code); + if i == 8 { + assert!(it.continuation_point.is_null()); + } else { + assert!(!it.continuation_point.is_null()); + } + cp = it.continuation_point.clone(); + results.extend(it.references.clone().into_iter().flatten()); + } + + assert_eq!(1000, results.len()); +} + +#[tokio::test] +async fn browse_release_continuation_point() { + let (tester, nm, session) = setup().await; + let root_id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectBuilder::new(&root_id, "TestObj1", "TestObj1") + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&ObjectTypeId::FolderType.into()), + Vec::new(), + ); + for i in 0..1000 { + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, &format!("Var{i}"), &format!("Var{i}")) + .data_type(DataTypeId::Int32) + .build() + .into(), + &root_id, + &ReferenceTypeId::HasComponent.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + } + + let desc = hierarchical_desc(root_id); + let r = session.browse(&[desc], 100, None).await.unwrap(); + assert_eq!(1, r.len()); + let it = &r[0]; + assert!(!it.continuation_point.is_null()); + + let cp = it.continuation_point.clone(); + let r = session.browse_next(true, &[cp.clone()]).await.unwrap(); + assert_eq!(1, r.len()); + let it = &r[0]; + assert_eq!(StatusCode::Good, it.status_code); + let refs = it.references.clone().unwrap_or_default(); + assert!(refs.is_empty()); +} + +#[tokio::test] +async fn browse_limits() { + let (tester, _nm, session) = setup().await; + + let browse_limit = tester + .handle + .info() + .config + .limits + .operational + .max_nodes_per_browse; + + // Browse zero + let r = session.browse(&[], 1000, None).await.unwrap_err(); + assert_eq!(r, StatusCode::BadNothingToDo); + + // Too many operations + let ops: Vec<_> = (0..(browse_limit + 1)) + .map(|r| hierarchical_desc(NodeId::new(2, r as i32))) + .collect(); + let r = session.browse(&ops, 1000, None).await.unwrap_err(); + assert_eq!(r, StatusCode::BadTooManyOperations); + + // Browse next zero + let r = session.browse_next(false, &[]).await.unwrap_err(); + assert_eq!(r, StatusCode::BadNothingToDo); + + // Too many operations + let ops: Vec<_> = (0..(browse_limit + 1)) + .map(|_| ByteString::from(vec![1u8])) + .collect(); + let r = session.browse_next(false, &ops).await.unwrap_err(); + assert_eq!(r, StatusCode::BadTooManyOperations); +} + +#[tokio::test] +async fn translate_browse_path() { + let (tester, nm, session) = setup().await; + // Make a tree of five nodes under each other Obj0 -> Obj1 -> ... -> Obj4 + let mut id = nm.inner().next_node_id(); + let mut parent: NodeId = ObjectId::ObjectsFolder.into(); + let root_id = id.clone(); + for i in 0..5 { + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectBuilder::new(&id, &format!("Obj{i}"), &format!("Obj{i}")) + .build() + .into(), + &parent, + &ReferenceTypeId::HasComponent.into(), + Some(&ObjectTypeId::FolderType.into()), + Vec::new(), + ); + parent = id; + id = nm.inner().next_node_id(); + } + + let r = session + .translate_browse_paths_to_node_ids(&[BrowsePath { + starting_node: root_id, + relative_path: RelativePath { + elements: Some( + (1..5) + .map(|i| RelativePathElement { + reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), + is_inverse: false, + include_subtypes: true, + target_name: format!("Obj{i}").into(), + }) + .collect(), + ), + }, + }]) + .await + .unwrap(); + assert_eq!(1, r.len()); + let it = &r[0]; + assert_eq!(StatusCode::Good, it.status_code); + let targets = it.targets.clone().unwrap_or_default(); + assert_eq!(1, targets.len()); + let t = &targets[0]; + assert_eq!(t.remaining_path_index, u32::MAX); + // Parent will be the last node ID. + assert_eq!(t.target_id.node_id, parent); +} + +#[tokio::test] +async fn translate_browse_path_cross_node_manager() { + // Same test as above, but start the translate process from the objects folder, + // so we have to traverse through two node managers. + let (tester, nm, session) = setup().await; + // Make a tree of five nodes under each other Obj0 -> Obj1 -> ... -> Obj4 + let mut id = nm.inner().next_node_id(); + let mut parent: NodeId = ObjectId::ObjectsFolder.into(); + for i in 0..5 { + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectBuilder::new(&id, &format!("Obj{i}"), &format!("Obj{i}")) + .build() + .into(), + &parent, + &ReferenceTypeId::HasComponent.into(), + Some(&ObjectTypeId::FolderType.into()), + Vec::new(), + ); + parent = id; + id = nm.inner().next_node_id(); + } + + let r = session + .translate_browse_paths_to_node_ids(&[BrowsePath { + starting_node: ObjectId::ObjectsFolder.into(), + relative_path: RelativePath { + elements: Some( + (0..5) + .map(|i| RelativePathElement { + reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), + is_inverse: false, + include_subtypes: true, + target_name: format!("Obj{i}").into(), + }) + .collect(), + ), + }, + }]) + .await + .unwrap(); + assert_eq!(1, r.len()); + let it = &r[0]; + assert_eq!(StatusCode::Good, it.status_code); + let targets = it.targets.clone().unwrap_or_default(); + assert_eq!(1, targets.len()); + let t = &targets[0]; + assert_eq!(t.remaining_path_index, u32::MAX); + // Parent will be the last node ID. + assert_eq!(t.target_id.node_id, parent); +} + +#[tokio::test] +async fn translate_browse_paths_limits() { + let (tester, _nm, session) = setup().await; + + let limit = tester + .handle + .info() + .config + .limits + .operational + .max_nodes_per_translate_browse_paths_to_node_ids; + + // Translate none + let r = session + .translate_browse_paths_to_node_ids(&[]) + .await + .unwrap_err(); + assert_eq!(r, StatusCode::BadNothingToDo); + + // Translate too many + let ops: Vec<_> = (0..(limit + 1)) + .map(|r| BrowsePath { + starting_node: NodeId::new(2, r as i32), + relative_path: RelativePath { elements: None }, + }) + .collect(); + let r = session + .translate_browse_paths_to_node_ids(&ops) + .await + .unwrap_err(); + assert_eq!(r, StatusCode::BadTooManyOperations); +} diff --git a/lib/tests/core_tests.rs b/lib/tests/core_tests.rs new file mode 100644 index 000000000..800287864 --- /dev/null +++ b/lib/tests/core_tests.rs @@ -0,0 +1,354 @@ +use std::{sync::atomic::Ordering, time::Duration}; + +use bytes::BytesMut; +use log::debug; +use opcua::{ + client::IdentityToken, + core::comms::tcp_codec::{Message, TcpCodec}, + crypto::SecurityPolicy, + types::{ + ApplicationType, DecodingOptions, MessageSecurityMode, NodeId, ReadValueId, StatusCode, + TimestampsToReturn, VariableId, Variant, + }, +}; +use tokio::{ + io::AsyncReadExt, + net::{TcpListener, TcpStream}, +}; +use tokio_util::codec::Decoder; +use utils::hostname; + +mod utils; + +use crate::utils::{ + client_user_token, client_x509_token, default_server, Tester, CLIENT_USERPASS_ID, TEST_COUNTER, +}; + +#[tokio::test] +async fn hello_timeout() { + opcua::console_logging::init(); + + let test_id = TEST_COUNTER.fetch_add(1, Ordering::Relaxed); + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + let port = listener.local_addr().unwrap().port(); + + let server = default_server() + .discovery_urls(vec![format!("opc.tcp://{}:{}", hostname(), port)]) + .pki_dir(format!("./pki-server/{test_id}")) + .hello_timeout(1); + let (server, handle) = server.build().unwrap(); + let addr = listener.local_addr().unwrap(); + + tokio::task::spawn(server.run_with(listener)); + + let _guard = handle.token().clone().drop_guard(); + + let mut stream = TcpStream::connect(addr).await.unwrap(); + debug!("Connected to {addr}"); + + // Wait a bit more than the hello timeout (1 second) + tokio::time::sleep(Duration::from_millis(1200)).await; + + let mut bytes = BytesMut::with_capacity(1024); + let result = stream.read_buf(&mut bytes).await; + // Should first read the error message from the server. + let read = result.unwrap(); + assert!(read > 0); + let mut codec = TcpCodec::new(DecodingOptions::default()); + let msg = codec.decode(&mut bytes).unwrap(); + let Some(Message::Error(msg)) = msg else { + panic!("Expected error got {msg:?}"); + }; + assert_eq!(msg.error, StatusCode::BadTimeout.bits()); + + let result = stream.read_buf(&mut bytes).await; + + match result { + Ok(v) => { + if v > 0 { + panic!( + "Hello timeout exceeded and socket is still open, result = {}", + v + ) + } else { + // From + debug!("Client got a read of 0 bytes on the socket, so treating by terminating with success"); + } + } + Err(err) => { + debug!( + "Client got an error {:?} on the socket terminating successfully", + err + ); + } + } + debug!("Test passed, closing server"); +} + +#[tokio::test] +async fn get_endpoints() { + let tester = Tester::new_default_server(false).await; + let endpoints = tester + .client + .get_server_endpoints_from_url(tester.endpoint()) + .await + .unwrap(); + assert_eq!(endpoints.len(), tester.handle.info().config.endpoints.len()); +} + +async fn conn_test(policy: SecurityPolicy, mode: MessageSecurityMode, token: IdentityToken) { + let mut tester = Tester::new_default_server(false).await; + let (session, handle) = tester.connect(policy, mode, token).await.unwrap(); + let _h = handle.spawn(); + + tokio::time::timeout(Duration::from_secs(2), session.wait_for_connection()) + .await + .unwrap(); + + session + .read( + &[ReadValueId::from(>::into( + VariableId::Server_ServiceLevel, + ))], + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn connect_none() { + conn_test( + SecurityPolicy::None, + MessageSecurityMode::None, + IdentityToken::Anonymous, + ) + .await; +} + +#[tokio::test] +async fn connect_basic128rsa15_sign() { + conn_test( + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::Sign, + IdentityToken::Anonymous, + ) + .await; +} + +#[tokio::test] +async fn connect_basic128rsa15_sign_and_encrypt() { + conn_test( + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::SignAndEncrypt, + IdentityToken::Anonymous, + ) + .await; +} + +#[tokio::test] +async fn connect_basic256_sign() { + conn_test( + SecurityPolicy::Basic256, + MessageSecurityMode::Sign, + IdentityToken::Anonymous, + ) + .await; +} + +#[tokio::test] +async fn connect_basic256_sign_and_encrypt() { + conn_test( + SecurityPolicy::Basic256, + MessageSecurityMode::SignAndEncrypt, + IdentityToken::Anonymous, + ) + .await; +} + +#[tokio::test] +async fn connect_aes256sha256rsaoaep_sign() { + conn_test( + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::Sign, + IdentityToken::Anonymous, + ) + .await; +} + +#[tokio::test] +async fn connect_aes256sha256rsaoaep_sign_and_encrypt() { + conn_test( + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::SignAndEncrypt, + IdentityToken::Anonymous, + ) + .await; +} + +#[tokio::test] +async fn connect_aes256sha256rsapss_sign() { + conn_test( + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::Sign, + IdentityToken::Anonymous, + ) + .await; +} + +#[tokio::test] +async fn connect_aes256sha256rsapss_sign_and_encrypt() { + conn_test( + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::SignAndEncrypt, + IdentityToken::Anonymous, + ) + .await; +} + +#[tokio::test] +async fn connect_basic128rsa15_with_username_password() { + conn_test( + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::SignAndEncrypt, + client_user_token(), + ) + .await; +} + +#[tokio::test] +async fn connect_basic128rsa15_with_x509_token() { + conn_test( + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::SignAndEncrypt, + client_x509_token(), + ) + .await; +} + +#[tokio::test] +async fn connect_basic128rsa_15_with_invalid_token() { + let mut tester = Tester::new_default_server(true).await; + let (_, handle) = tester + .connect( + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::SignAndEncrypt, + IdentityToken::UserName(CLIENT_USERPASS_ID.to_owned(), "invalid".to_owned()), + ) + .await + .unwrap(); + let res = handle.spawn().await.unwrap(); + assert_eq!(res, StatusCode::BadUserAccessDenied); +} + +#[tokio::test] +async fn find_servers() { + let tester = Tester::new_default_server(true).await; + let servers = tester.client.find_servers(tester.endpoint()).await.unwrap(); + assert_eq!(servers.len(), 1); + + let s = &servers[0]; + let discovery_urls = s.discovery_urls.as_ref().unwrap(); + assert!(!discovery_urls.is_empty()); + assert_eq!(s.application_type, ApplicationType::Server); + assert_eq!(s.application_name.text.as_ref(), "integration_server"); + assert_eq!(s.application_uri.as_ref(), "urn:integration_server"); + assert_eq!(s.product_uri.as_ref(), "urn:integration_server Testkit"); +} + +#[tokio::test] +async fn discovery_test() { + let tester = Tester::new_default_server(true).await; + // Get all + let endpoints = tester + .client + .get_endpoints(tester.endpoint(), &[], &[]) + .await + .unwrap(); + assert_eq!(endpoints.len(), 11); + + // Get with wrong profile URIs + let endpoints = tester + .client + .get_endpoints(tester.endpoint(), &[], &["wrongwrong"]) + .await + .unwrap(); + assert!(endpoints.is_empty()); + + // Get all binary endpoints (all of them) + let endpoints = tester + .client + .get_endpoints( + tester.endpoint(), + &[], + &["http://opcfoundation.org/UA-Profile/Transport/uatcp-uasc-uabinary"], + ) + .await + .unwrap(); + assert_eq!(endpoints.len(), 11); +} + +#[tokio::test] +async fn multi_client_test() { + // Simple multi-client test, checking that we can send and receive requests with multiple clients + // to the same server, and also that the client SDK can handle multiple sessions in the same client. + let mut tester = Tester::new_default_server(true).await; + + let c1 = tester + .connect_and_wait( + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::SignAndEncrypt, + IdentityToken::UserName( + CLIENT_USERPASS_ID.to_owned(), + format!("{CLIENT_USERPASS_ID}_password"), + ), + ) + .await + .unwrap(); + // Same user token, should still be fine + let c2 = tester + .connect_and_wait( + SecurityPolicy::Basic256, + MessageSecurityMode::SignAndEncrypt, + IdentityToken::UserName( + CLIENT_USERPASS_ID.to_owned(), + format!("{CLIENT_USERPASS_ID}_password"), + ), + ) + .await + .unwrap(); + + // Different user, anonymous + let c3 = tester + .connect_and_wait( + SecurityPolicy::None, + MessageSecurityMode::None, + IdentityToken::Anonymous, + ) + .await + .unwrap(); + + // Read the service level a few times + let mut val = 100; + for _ in 0..5 { + val += 10; + tester.handle.set_service_level(val); + for session in &[c1.clone(), c2.clone(), c3.clone()] { + let value = session + .read( + &[ReadValueId::from(>::into( + VariableId::Server_ServiceLevel, + ))], + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); + let Some(Variant::Byte(v)) = value[0].value else { + panic!("Wrong result type"); + }; + assert_eq!(val, v); + } + } +} diff --git a/lib/tests/methods.rs b/lib/tests/methods.rs new file mode 100644 index 000000000..f526f0cd2 --- /dev/null +++ b/lib/tests/methods.rs @@ -0,0 +1,240 @@ +use std::sync::{atomic::AtomicU64, Arc}; + +use opcua::{ + server::address_space::MethodBuilder, + types::{ + AttributeId, CallMethodRequest, DataTypeId, NodeId, ObjectId, StatusCode, Variant, + VariantTypeId, + }, +}; +use utils::setup; + +mod utils; + +#[tokio::test] +async fn call_trivial() { + let (_tester, nm, session) = setup().await; + let called = Arc::new(AtomicU64::new(0)); + + let id = nm.inner().next_node_id(); + let input_id = nm.inner().next_node_id(); + let output_id = nm.inner().next_node_id(); + { + let mut sp = nm.address_space().write(); + MethodBuilder::new(&id, "TestMethod1", "TestMethod1") + .executable(true) + .user_executable(true) + .component_of(ObjectId::ObjectsFolder) + .input_args(&mut sp, &input_id, &[]) + .output_args(&mut sp, &output_id, &[]) + .insert(&mut sp); + } + + let called_ref = called.clone(); + nm.inner().add_method_cb(id.clone(), move |_| { + called_ref.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + Ok(vec![]) + }); + + let r = session + .call_one(CallMethodRequest { + object_id: ObjectId::ObjectsFolder.into(), + method_id: id.clone(), + input_arguments: None, + }) + .await + .unwrap(); + assert_eq!(r.status_code, StatusCode::Good); + assert_eq!(1, called.load(std::sync::atomic::Ordering::Relaxed)); +} + +#[tokio::test] +async fn call_args() { + let (_tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + let input_id = nm.inner().next_node_id(); + let output_id = nm.inner().next_node_id(); + { + let mut sp = nm.address_space().write(); + MethodBuilder::new(&id, "MethodAdd", "MethodAdd") + .executable(true) + .user_executable(true) + .component_of(ObjectId::ObjectsFolder) + .input_args( + &mut sp, + &input_id, + &[ + ("Lhs", DataTypeId::Int64).into(), + ("Rhs", DataTypeId::Int64).into(), + ], + ) + .output_args(&mut sp, &output_id, &[("Result", DataTypeId::Int64).into()]) + .insert(&mut sp); + } + + nm.inner().add_method_cb(id.clone(), |args| { + let Some(Variant::Int64(lhs)) = args.get(0).map(|a| a.cast(VariantTypeId::Int64)) else { + return Err(StatusCode::BadInvalidArgument); + }; + let Some(Variant::Int64(rhs)) = args.get(1).map(|a| a.cast(VariantTypeId::Int64)) else { + return Err(StatusCode::BadInvalidArgument); + }; + + Ok(vec![Variant::Int64(lhs + rhs)]) + }); + + let r = session + .call_one(CallMethodRequest { + object_id: ObjectId::ObjectsFolder.into(), + method_id: id.clone(), + input_arguments: Some(vec![Variant::Int64(3), Variant::Int64(2)]), + }) + .await + .unwrap(); + assert_eq!(r.status_code, StatusCode::Good); + let outputs = r.output_arguments.unwrap().clone(); + assert_eq!(1, outputs.len()); + let Variant::Int64(v) = outputs[0] else { + panic!("Wrong output type"); + }; + assert_eq!(v, 5); + + // Call with wrong args + let r = session + .call_one(CallMethodRequest { + object_id: ObjectId::ObjectsFolder.into(), + method_id: id.clone(), + input_arguments: Some(vec![Variant::String("foo".into()), Variant::Int64(2)]), + }) + .await + .unwrap(); + + assert_eq!(r.status_code, StatusCode::BadInvalidArgument); +} + +#[tokio::test] +async fn call_fail() { + let (_tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + let input_id = nm.inner().next_node_id(); + let output_id = nm.inner().next_node_id(); + { + let mut sp = nm.address_space().write(); + MethodBuilder::new(&id, "MethodAdd", "MethodAdd") + .executable(true) + .user_executable(false) + .component_of(ObjectId::ObjectsFolder) + .input_args( + &mut sp, + &input_id, + &[ + ("Lhs", DataTypeId::Int64).into(), + ("Rhs", DataTypeId::Int64).into(), + ], + ) + .output_args(&mut sp, &output_id, &[("Result", DataTypeId::Int64).into()]) + .insert(&mut sp); + } + + nm.inner().add_method_cb(id.clone(), |args| { + let Some(Variant::Int64(lhs)) = args.get(0).map(|a| a.cast(VariantTypeId::Int64)) else { + return Err(StatusCode::BadInvalidArgument); + }; + let Some(Variant::Int64(rhs)) = args.get(1).map(|a| a.cast(VariantTypeId::Int64)) else { + return Err(StatusCode::BadInvalidArgument); + }; + + Ok(vec![Variant::Int64(lhs + rhs)]) + }); + + // Call method that doesn't exist + let r = session + .call_one(CallMethodRequest { + object_id: ObjectId::ObjectsFolder.into(), + method_id: NodeId::new(2, 100), + input_arguments: Some(vec![Variant::Int64(3), Variant::Int64(2)]), + }) + .await + .unwrap(); + assert_eq!(r.status_code, StatusCode::BadMethodInvalid); + + // Call on wrong object + let r = session + .call_one(CallMethodRequest { + object_id: ObjectId::Server.into(), + method_id: id.clone(), + input_arguments: Some(vec![Variant::Int64(3), Variant::Int64(2)]), + }) + .await + .unwrap(); + assert_eq!(r.status_code, StatusCode::BadMethodInvalid); + + // Call without permission + let r = session + .call_one(CallMethodRequest { + object_id: ObjectId::ObjectsFolder.into(), + method_id: id.clone(), + input_arguments: Some(vec![Variant::Int64(3), Variant::Int64(2)]), + }) + .await + .unwrap(); + assert_eq!(r.status_code, StatusCode::BadUserAccessDenied); + + { + let mut sp = nm.address_space().write(); + sp.find_mut(&id) + .unwrap() + .as_mut_node() + .set_attribute(AttributeId::UserExecutable, Variant::Boolean(true)) + .unwrap(); + } + + // Call with too many arguments + let r = session + .call_one(CallMethodRequest { + object_id: ObjectId::ObjectsFolder.into(), + method_id: id.clone(), + input_arguments: Some(vec![ + Variant::Int64(3), + Variant::Int64(2), + Variant::Int64(3), + ]), + }) + .await + .unwrap(); + assert_eq!(r.status_code, StatusCode::BadTooManyArguments); +} + +#[tokio::test] +async fn call_limits() { + let (tester, _nm, session) = setup().await; + + let limit = tester + .handle + .info() + .config + .limits + .operational + .max_nodes_per_method_call; + + // Call none + let e = session.call(Vec::new()).await.unwrap_err(); + assert_eq!(e, StatusCode::BadNothingToDo); + + // Call too many + let e = session + .call( + (0..(limit + 1)) + .map(|i| CallMethodRequest { + object_id: ObjectId::ObjectsFolder.into(), + method_id: NodeId::new(2, i as i32), + input_arguments: None, + }) + .collect(), + ) + .await + .unwrap_err(); + assert_eq!(e, StatusCode::BadTooManyOperations); +} diff --git a/lib/tests/node_management.rs b/lib/tests/node_management.rs new file mode 100644 index 000000000..dd1db4f24 --- /dev/null +++ b/lib/tests/node_management.rs @@ -0,0 +1,218 @@ +use opcua::{ + server::address_space::{EventNotifier, NodeBase, NodeType, ObjectBuilder}, + types::{ + AddNodeAttributes, AddNodesItem, AddReferencesItem, DeleteNodesItem, DeleteReferencesItem, + ExpandedNodeId, NodeClass, NodeId, ObjectAttributes, ObjectId, ObjectTypeId, + ReferenceTypeId, StatusCode, + }, +}; +use utils::setup; + +mod utils; + +#[tokio::test] +async fn add_delete_node() { + let (_tester, nm, session) = setup().await; + + let r = session + .add_nodes(&[AddNodesItem { + parent_node_id: ObjectId::ObjectsFolder.into(), + reference_type_id: ReferenceTypeId::HasComponent.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: "MyNode".into(), + node_class: NodeClass::Object, + node_attributes: AddNodeAttributes::Object(ObjectAttributes { + specified_attributes: (1 << 5) | (1 << 6), + display_name: "DisplayName".into(), + description: "Description".into(), + write_mask: Default::default(), + user_write_mask: Default::default(), + event_notifier: EventNotifier::all().bits(), // Should not be set + }) + .as_extension_object(), + type_definition: ExpandedNodeId::new(ObjectTypeId::FolderType), + }]) + .await + .unwrap(); + + assert_eq!(1, r.len()); + let it = &r[0]; + assert_eq!(it.status_code, StatusCode::Good); + assert!(!it.added_node_id.is_null()); + + let id = it.added_node_id.clone(); + + { + let sp = nm.address_space().read(); + let Some(NodeType::Object(o)) = sp.find(&id) else { + panic!("Missing"); + }; + assert_eq!(o.browse_name(), &"MyNode".into()); + assert_eq!(o.display_name(), &"DisplayName".into()); + assert_eq!(o.description(), Some(&"Description".into())); + assert_eq!(0, o.event_notifier().bits()); + } + + println!("{id}"); + + let r = session + .delete_nodes(&[DeleteNodesItem { + node_id: id.clone(), + delete_target_references: true, + }]) + .await + .unwrap(); + assert_eq!(r.len(), 1); + assert_eq!(r[0], StatusCode::Good); +} + +#[tokio::test] +async fn add_delete_reference() { + let (tester, nm, session) = setup().await; + + let id1 = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectBuilder::new(&id1, "TestObj1", "TestObj1") + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&ObjectTypeId::FolderType.into()), + Vec::new(), + ); + let id2 = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectBuilder::new(&id2, "TestObj2", "TestObj2") + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&ObjectTypeId::FolderType.into()), + Vec::new(), + ); + + let r = session + .add_references(&[AddReferencesItem { + source_node_id: id1.clone(), + reference_type_id: ReferenceTypeId::HasCondition.into(), + is_forward: true, + target_server_uri: Default::default(), + target_node_id: id2.clone().into(), + target_node_class: NodeClass::Object, + }]) + .await + .unwrap(); + assert_eq!(r.len(), 1); + assert_eq!(r[0], StatusCode::Good); + + { + let sp = nm.address_space().read(); + let type_tree = tester.handle.type_tree().read(); + sp.find_references( + &id1, + None::<(NodeId, bool)>, + &type_tree, + opcua::types::BrowseDirection::Forward, + ) + .find(|r| { + r.target_node == &id2 && r.reference_type == &ReferenceTypeId::HasCondition.into() + }) + .unwrap(); + } + + let r = session + .delete_references(&[DeleteReferencesItem { + source_node_id: id1.clone(), + reference_type_id: ReferenceTypeId::HasCondition.into(), + is_forward: true, + target_node_id: id2.clone().into(), + delete_bidirectional: true, + }]) + .await + .unwrap(); + assert_eq!(r.len(), 1); + assert_eq!(r[0], StatusCode::Good); +} + +#[tokio::test] +async fn add_delete_node_limits() { + let (tester, _nm, session) = setup().await; + let limit = tester + .handle + .info() + .config + .limits + .operational + .max_nodes_per_node_management; + + // Add zero + let e = session.add_nodes(&[]).await.unwrap_err(); + assert_eq!(e, StatusCode::BadNothingToDo); + + // Add too many + let e = session + .add_nodes( + &(0..(limit + 1)) + .map(|i| { + AddNodesItem { + parent_node_id: ObjectId::ObjectsFolder.into(), + reference_type_id: ReferenceTypeId::HasComponent.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: format!("MyNode{i}").into(), + node_class: NodeClass::Object, + node_attributes: AddNodeAttributes::Object(ObjectAttributes { + specified_attributes: (1 << 5) | (1 << 6), + display_name: "DisplayName".into(), + description: "Description".into(), + write_mask: Default::default(), + user_write_mask: Default::default(), + event_notifier: EventNotifier::all().bits(), // Should not be set + }) + .as_extension_object(), + type_definition: ExpandedNodeId::new(ObjectTypeId::FolderType), + } + }) + .collect::>(), + ) + .await + .unwrap_err(); + assert_eq!(e, StatusCode::BadTooManyOperations); +} + +#[tokio::test] +async fn add_delete_reference_limits() { + let (tester, _nm, session) = setup().await; + let limit = tester + .handle + .info() + .config + .limits + .operational + .max_references_per_references_management; + + // Add zero + let e = session.add_references(&[]).await.unwrap_err(); + assert_eq!(e, StatusCode::BadNothingToDo); + + // Add too many + let e = session + .add_references( + &(0..(limit + 1)) + .map(|i| AddReferencesItem { + source_node_id: NodeId::new(2, i as i32), + reference_type_id: ReferenceTypeId::HasCause.into(), + is_forward: true, + target_server_uri: Default::default(), + target_node_id: NodeId::new(2, (i + 1) as i32).into(), + target_node_class: NodeClass::Object, + }) + .collect::>(), + ) + .await + .unwrap_err(); + assert_eq!(e, StatusCode::BadTooManyOperations); +} diff --git a/lib/tests/read.rs b/lib/tests/read.rs new file mode 100644 index 000000000..0f6602c81 --- /dev/null +++ b/lib/tests/read.rs @@ -0,0 +1,1073 @@ +use chrono::TimeDelta; +use opcua::{ + client::HistoryReadAction, + server::address_space::{ + AccessLevel, DataTypeBuilder, EventNotifier, MethodBuilder, ObjectBuilder, + ObjectTypeBuilder, ReferenceTypeBuilder, UserAccessLevel, VariableBuilder, + VariableTypeBuilder, ViewBuilder, + }, + types::{ + AttributeId, DataTypeId, DataValue, DateTime, HistoryData, HistoryReadValueId, NodeClass, + NodeId, ObjectId, ObjectTypeId, QualifiedName, ReadRawModifiedDetails, ReadValueId, + ReferenceTypeId, StatusCode, TimestampsToReturn, UAString, VariableId, VariableTypeId, + Variant, WriteMask, + }, +}; +use utils::{array_value, read_value_id, read_value_ids, setup}; + +mod utils; + +#[tokio::test] +async fn read() { + let (tester, _nm, session) = setup().await; + + // Read the service level + tester.handle.set_service_level(123); + let r = session + .read( + &[read_value_id( + AttributeId::Value, + VariableId::Server_ServiceLevel, + )], + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); + assert_eq!(1, r.len()); + assert_eq!(&Variant::Byte(123), r[0].value.as_ref().unwrap()) +} + +#[tokio::test] +async fn read_variable() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .historizing(true) + .array_dimensions(&[2]) + .value(vec![1, 2]) + .description("Description") + .value_rank(1) + .data_type(DataTypeId::Int32) + .access_level(AccessLevel::CURRENT_READ) + .user_access_level(UserAccessLevel::CURRENT_READ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + let r = session + .read( + &read_value_ids( + &[ + AttributeId::Value, + AttributeId::Historizing, + AttributeId::ArrayDimensions, + AttributeId::Description, + AttributeId::ValueRank, + AttributeId::DataType, + AttributeId::AccessLevel, + AttributeId::UserAccessLevel, + AttributeId::DisplayName, + AttributeId::BrowseName, + AttributeId::NodeClass, + AttributeId::NodeId, + ], + &id, + ), + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); + + assert_eq!( + array_value(&r[0]), + &vec![Variant::Int32(1), Variant::Int32(2)] + ); + assert_eq!(r[1].value, Some(Variant::Boolean(true))); + assert_eq!(array_value(&r[2]), &vec![Variant::UInt32(2)]); + assert_eq!( + r[3].value, + Some(Variant::LocalizedText(Box::new("Description".into()))) + ); + assert_eq!(r[4].value, Some(Variant::Int32(1))); + assert_eq!( + r[5].value, + Some(Variant::NodeId(Box::new(DataTypeId::Int32.into()))) + ); + assert_eq!(r[6].value, Some(Variant::Byte(1))); + assert_eq!(r[7].value, Some(Variant::Byte(1))); + assert_eq!( + r[8].value, + Some(Variant::LocalizedText(Box::new("TestVar1".into()))) + ); + assert_eq!( + r[9].value, + Some(Variant::QualifiedName(Box::new("TestVar1".into()))) + ); + assert_eq!( + r[10].value, + Some(Variant::Int32(NodeClass::Variable as i32)) + ); + assert_eq!(r[11].value, Some(Variant::NodeId(Box::new(id)))); +} + +#[tokio::test] +async fn read_object() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectBuilder::new(&id, "TestObj1", "TestObj1") + .description("Description") + .event_notifier(EventNotifier::SUBSCRIBE_TO_EVENTS) + .write_mask(WriteMask::DISPLAY_NAME) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&ObjectTypeId::FolderType.into()), + Vec::new(), + ); + + let r = session + .read( + &read_value_ids( + &[ + AttributeId::Description, + AttributeId::DisplayName, + AttributeId::BrowseName, + AttributeId::NodeClass, + AttributeId::NodeId, + AttributeId::EventNotifier, + AttributeId::WriteMask, + ], + &id, + ), + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); + + assert_eq!( + r[0].value, + Some(Variant::LocalizedText(Box::new("Description".into()))) + ); + assert_eq!( + r[1].value, + Some(Variant::LocalizedText(Box::new("TestObj1".into()))) + ); + assert_eq!( + r[2].value, + Some(Variant::QualifiedName(Box::new("TestObj1".into()))) + ); + assert_eq!(r[3].value, Some(Variant::Int32(NodeClass::Object as i32))); + assert_eq!(r[4].value, Some(Variant::NodeId(Box::new(id)))); + assert_eq!( + r[5].value, + Some(Variant::Byte(EventNotifier::SUBSCRIBE_TO_EVENTS.bits())) + ); + assert_eq!( + r[6].value, + Some(Variant::UInt32(WriteMask::DISPLAY_NAME.bits())) + ); +} + +#[tokio::test] +async fn read_view() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ViewBuilder::new(&id, "TestView1", "TestView1") + .description("Description") + .event_notifier(EventNotifier::SUBSCRIBE_TO_EVENTS) + .contains_no_loops(true) + .write_mask(WriteMask::DISPLAY_NAME) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + None, + Vec::new(), + ); + + let r = session + .read( + &read_value_ids( + &[ + AttributeId::Description, + AttributeId::DisplayName, + AttributeId::BrowseName, + AttributeId::NodeClass, + AttributeId::NodeId, + AttributeId::EventNotifier, + AttributeId::WriteMask, + AttributeId::ContainsNoLoops, + ], + &id, + ), + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); + + assert_eq!( + r[0].value, + Some(Variant::LocalizedText(Box::new("Description".into()))) + ); + assert_eq!( + r[1].value, + Some(Variant::LocalizedText(Box::new("TestView1".into()))) + ); + assert_eq!( + r[2].value, + Some(Variant::QualifiedName(Box::new("TestView1".into()))) + ); + assert_eq!(r[3].value, Some(Variant::Int32(NodeClass::View as i32))); + assert_eq!(r[4].value, Some(Variant::NodeId(Box::new(id)))); + assert_eq!( + r[5].value, + Some(Variant::Byte(EventNotifier::SUBSCRIBE_TO_EVENTS.bits())) + ); + assert_eq!( + r[6].value, + Some(Variant::UInt32(WriteMask::DISPLAY_NAME.bits())) + ); + assert_eq!(r[7].value, Some(Variant::Boolean(true))); +} + +#[tokio::test] +async fn read_method() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + MethodBuilder::new(&id, "TestMethod1", "TestMethod1") + .description("Description") + .executable(true) + .user_executable(false) + .write_mask(WriteMask::DISPLAY_NAME) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + None, + Vec::new(), + ); + + let r = session + .read( + &read_value_ids( + &[ + AttributeId::Description, + AttributeId::DisplayName, + AttributeId::BrowseName, + AttributeId::NodeClass, + AttributeId::NodeId, + AttributeId::WriteMask, + AttributeId::Executable, + AttributeId::UserExecutable, + ], + &id, + ), + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); + + assert_eq!( + r[0].value, + Some(Variant::LocalizedText(Box::new("Description".into()))) + ); + assert_eq!( + r[1].value, + Some(Variant::LocalizedText(Box::new("TestMethod1".into()))) + ); + assert_eq!( + r[2].value, + Some(Variant::QualifiedName(Box::new("TestMethod1".into()))) + ); + assert_eq!(r[3].value, Some(Variant::Int32(NodeClass::Method as i32))); + assert_eq!(r[4].value, Some(Variant::NodeId(Box::new(id)))); + assert_eq!( + r[5].value, + Some(Variant::UInt32(WriteMask::DISPLAY_NAME.bits())) + ); + assert_eq!(r[6].value, Some(Variant::Boolean(true))); + assert_eq!(r[7].value, Some(Variant::Boolean(false))); +} + +#[tokio::test] +async fn read_object_type() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectTypeBuilder::new(&id, "TestObjectType1", "TestObjectType1") + .description("Description") + .is_abstract(true) + .write_mask(WriteMask::DISPLAY_NAME) + .build() + .into(), + &ObjectTypeId::BaseObjectType.into(), + &ReferenceTypeId::HasSubtype.into(), + None, + Vec::new(), + ); + + let r = session + .read( + &read_value_ids( + &[ + AttributeId::Description, + AttributeId::DisplayName, + AttributeId::BrowseName, + AttributeId::NodeClass, + AttributeId::NodeId, + AttributeId::WriteMask, + AttributeId::IsAbstract, + ], + &id, + ), + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); + + assert_eq!( + r[0].value, + Some(Variant::LocalizedText(Box::new("Description".into()))) + ); + assert_eq!( + r[1].value, + Some(Variant::LocalizedText(Box::new("TestObjectType1".into()))) + ); + assert_eq!( + r[2].value, + Some(Variant::QualifiedName(Box::new("TestObjectType1".into()))) + ); + assert_eq!( + r[3].value, + Some(Variant::Int32(NodeClass::ObjectType as i32)) + ); + assert_eq!(r[4].value, Some(Variant::NodeId(Box::new(id)))); + assert_eq!( + r[5].value, + Some(Variant::UInt32(WriteMask::DISPLAY_NAME.bits())) + ); + assert_eq!(r[6].value, Some(Variant::Boolean(true))); +} + +#[tokio::test] +async fn read_variable_type() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableTypeBuilder::new(&id, "TestVariableType1", "TestVariableType1") + .description("Description") + .is_abstract(true) + .data_type(DataTypeId::Int32) + .array_dimensions(&[2]) + .value(vec![1, 2]) + .value_rank(1) + .write_mask(WriteMask::DISPLAY_NAME) + .build() + .into(), + &ObjectTypeId::BaseObjectType.into(), + &ReferenceTypeId::HasSubtype.into(), + None, + Vec::new(), + ); + + let r = session + .read( + &read_value_ids( + &[ + AttributeId::Description, + AttributeId::DisplayName, + AttributeId::BrowseName, + AttributeId::NodeClass, + AttributeId::NodeId, + AttributeId::WriteMask, + AttributeId::IsAbstract, + AttributeId::DataType, + AttributeId::ArrayDimensions, + AttributeId::ValueRank, + AttributeId::Value, + ], + &id, + ), + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); + + assert_eq!( + r[0].value, + Some(Variant::LocalizedText(Box::new("Description".into()))) + ); + assert_eq!( + r[1].value, + Some(Variant::LocalizedText(Box::new("TestVariableType1".into()))) + ); + assert_eq!( + r[2].value, + Some(Variant::QualifiedName(Box::new("TestVariableType1".into()))) + ); + assert_eq!( + r[3].value, + Some(Variant::Int32(NodeClass::VariableType as i32)) + ); + assert_eq!(r[4].value, Some(Variant::NodeId(Box::new(id)))); + assert_eq!( + r[5].value, + Some(Variant::UInt32(WriteMask::DISPLAY_NAME.bits())) + ); + assert_eq!(r[6].value, Some(Variant::Boolean(true))); + assert_eq!( + r[7].value, + Some(Variant::NodeId(Box::new(DataTypeId::Int32.into()))) + ); + assert_eq!(array_value(&r[8]), &vec![Variant::UInt32(2)]); + assert_eq!(r[9].value, Some(Variant::Int32(1))); + assert_eq!( + array_value(&r[10]), + &vec![Variant::Int32(1), Variant::Int32(2)] + ); +} + +#[tokio::test] +async fn read_data_type() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + DataTypeBuilder::new(&id, "TestDataType1", "TestDataType1") + .description("Description") + .is_abstract(true) + .write_mask(WriteMask::DISPLAY_NAME) + .build() + .into(), + &DataTypeId::BaseDataType.into(), + &ReferenceTypeId::HasSubtype.into(), + None, + Vec::new(), + ); + + let r = session + .read( + &read_value_ids( + &[ + AttributeId::Description, + AttributeId::DisplayName, + AttributeId::BrowseName, + AttributeId::NodeClass, + AttributeId::NodeId, + AttributeId::WriteMask, + AttributeId::IsAbstract, + ], + &id, + ), + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); + + assert_eq!( + r[0].value, + Some(Variant::LocalizedText(Box::new("Description".into()))) + ); + assert_eq!( + r[1].value, + Some(Variant::LocalizedText(Box::new("TestDataType1".into()))) + ); + assert_eq!( + r[2].value, + Some(Variant::QualifiedName(Box::new("TestDataType1".into()))) + ); + assert_eq!(r[3].value, Some(Variant::Int32(NodeClass::DataType as i32))); + assert_eq!(r[4].value, Some(Variant::NodeId(Box::new(id)))); + assert_eq!( + r[5].value, + Some(Variant::UInt32(WriteMask::DISPLAY_NAME.bits())) + ); + assert_eq!(r[6].value, Some(Variant::Boolean(true))); +} + +#[tokio::test] +async fn read_reference_type() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ReferenceTypeBuilder::new(&id, "TestReferenceType1", "TestReferenceType1") + .description("Description") + .is_abstract(true) + .symmetric(true) + .inverse_name("Inverse") + .write_mask(WriteMask::DISPLAY_NAME) + .build() + .into(), + &ReferenceTypeId::References.into(), + &ReferenceTypeId::HasSubtype.into(), + None, + Vec::new(), + ); + + let r = session + .read( + &read_value_ids( + &[ + AttributeId::Description, + AttributeId::DisplayName, + AttributeId::BrowseName, + AttributeId::NodeClass, + AttributeId::NodeId, + AttributeId::WriteMask, + AttributeId::IsAbstract, + AttributeId::Symmetric, + AttributeId::InverseName, + ], + &id, + ), + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); + + assert_eq!( + r[0].value, + Some(Variant::LocalizedText(Box::new("Description".into()))) + ); + assert_eq!( + r[1].value, + Some(Variant::LocalizedText(Box::new( + "TestReferenceType1".into() + ))) + ); + assert_eq!( + r[2].value, + Some(Variant::QualifiedName(Box::new( + "TestReferenceType1".into() + ))) + ); + assert_eq!( + r[3].value, + Some(Variant::Int32(NodeClass::ReferenceType as i32)) + ); + assert_eq!(r[4].value, Some(Variant::NodeId(Box::new(id)))); + assert_eq!( + r[5].value, + Some(Variant::UInt32(WriteMask::DISPLAY_NAME.bits())) + ); + assert_eq!(r[6].value, Some(Variant::Boolean(true))); + assert_eq!(r[7].value, Some(Variant::Boolean(true))); + assert_eq!( + r[8].value, + Some(Variant::LocalizedText(Box::new("Inverse".into()))) + ); +} + +#[tokio::test] +async fn read_mixed() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .historizing(true) + .value("value") + .description("Description") + .value_rank(1) + .data_type(DataTypeId::String) + .access_level(AccessLevel::CURRENT_READ) + .user_access_level(UserAccessLevel::CURRENT_READ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + // Read from various nodes in different node managers + tester.handle.set_service_level(200); + let r = session + .read( + &[ + read_value_id(AttributeId::DisplayName, &id), + read_value_id(AttributeId::Value, &id), + read_value_id(AttributeId::Value, VariableId::Server_ServiceLevel), + read_value_id(AttributeId::DisplayName, ObjectId::Server), + // Wrong attribute + read_value_id(AttributeId::Value, ObjectId::Server), + // Invalid node, valid namespace + read_value_id(AttributeId::Value, nm.inner().next_node_id()), + // Invalid namespace + read_value_id(AttributeId::Value, NodeId::new(100, 1)), + // Index range on non-value + ReadValueId { + node_id: VariableId::Server_ServiceLevel.into(), + attribute_id: AttributeId::Value as u32, + index_range: UAString::from("1"), + ..Default::default() + }, + // Invalid encoding + ReadValueId { + node_id: VariableId::Server_ServiceLevel.into(), + attribute_id: AttributeId::Value as u32, + data_encoding: QualifiedName::from("foo"), + ..Default::default() + }, + ], + TimestampsToReturn::Both, + 0.0, + ) + .await + .unwrap(); + + assert_eq!( + r[0].value, + Some(Variant::LocalizedText(Box::new("TestVar1".into()))) + ); + assert_eq!(r[1].value, Some(Variant::String("value".into()))); + assert_eq!(r[2].value, Some(Variant::Byte(200))); + assert_eq!( + r[3].value, + Some(Variant::LocalizedText(Box::new("Server".into()))) + ); + assert_eq!(r[4].status, Some(StatusCode::BadAttributeIdInvalid)); + assert_eq!(r[4].value, None); + assert_eq!(r[5].status, Some(StatusCode::BadNodeIdUnknown)); + assert_eq!(r[5].value, None); + assert_eq!(r[6].status, Some(StatusCode::BadNodeIdUnknown)); + assert_eq!(r[6].value, None); + assert_eq!(r[7].status, Some(StatusCode::BadIndexRangeDataMismatch)); + assert_eq!(r[7].value, None); + assert_eq!(r[8].status, Some(StatusCode::BadDataEncodingInvalid)); + assert_eq!(r[8].value, None); +} + +#[tokio::test] +async fn read_limits() { + let (tester, _nm, session) = setup().await; + + let read_limit = tester + .handle + .info() + .config + .limits + .operational + .max_nodes_per_read; + + // Read zero + let r = session + .read(&[], TimestampsToReturn::Both, 0.0) + .await + .unwrap_err(); + assert_eq!(r, StatusCode::BadNothingToDo); + + // Invalid max age + let r = session + .read( + &[read_value_id(AttributeId::DisplayName, ObjectId::Server)], + TimestampsToReturn::Both, + -15.0, + ) + .await + .unwrap_err(); + assert_eq!(r, StatusCode::BadMaxAgeInvalid); + + // Invalid timestamps to return + let r = session + .read( + &[read_value_id(AttributeId::DisplayName, ObjectId::Server)], + TimestampsToReturn::Invalid, + 0.0, + ) + .await + .unwrap_err(); + assert_eq!(r, StatusCode::BadTimestampsToReturnInvalid); + + // Too many operations + let ops: Vec<_> = (0..(read_limit + 1)) + .map(|r| read_value_id(AttributeId::Value, NodeId::new(2, r as i32))) + .collect(); + let r = session + .read(&ops, TimestampsToReturn::Both, 0.0) + .await + .unwrap_err(); + assert_eq!(r, StatusCode::BadTooManyOperations); + + // Exact number of operations, should not fail, though the reads will probably fail, mostly. + let ops: Vec<_> = (0..read_limit) + .map(|r| read_value_id(AttributeId::Value, NodeId::new(2, r as i32))) + .collect(); + session + .read(&ops, TimestampsToReturn::Both, 0.0) + .await + .unwrap(); +} + +#[tokio::test] +async fn history_read_raw() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .historizing(true) + .value(0) + .description("Description") + .data_type(DataTypeId::Int32) + .access_level(AccessLevel::CURRENT_READ | AccessLevel::HISTORY_READ) + .user_access_level(UserAccessLevel::CURRENT_READ | UserAccessLevel::HISTORY_READ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + let start = DateTime::now() - TimeDelta::try_seconds(1000).unwrap(); + + nm.inner().add_history( + &id, + (0..1000).map(|v| DataValue { + value: Some((v as i32).into()), + status: Some(StatusCode::Good), + source_timestamp: Some(start + TimeDelta::try_seconds(v).unwrap()), + server_timestamp: Some(start + TimeDelta::try_seconds(v).unwrap()), + ..Default::default() + }), + ); + + let action = HistoryReadAction::ReadRawModifiedDetails(ReadRawModifiedDetails { + is_read_modified: false, + start_time: start, + end_time: start + TimeDelta::try_seconds(2000).unwrap(), + num_values_per_node: 100, + return_bounds: false, + }); + + // Read up to 100, should get the 100 first. + let r = session + .history_read( + &action, + TimestampsToReturn::Both, + false, + &[HistoryReadValueId { + node_id: id.clone(), + index_range: Default::default(), + data_encoding: Default::default(), + continuation_point: Default::default(), + }], + ) + .await + .unwrap(); + + assert_eq!(r.len(), 1); + let v = &r[0]; + assert!(!v.continuation_point.is_null()); + assert_eq!(v.status_code, StatusCode::Good); + let mut data = v + .history_data + .decode_inner::(session.decoding_options()) + .unwrap() + .data_values + .unwrap(); + + assert_eq!(data.len(), 100); + + let mut cp = v.continuation_point.clone(); + + // Read the 100 next in a loop until we reach the end. + for i in 0..9 { + let r = session + .history_read( + &action, + TimestampsToReturn::Both, + false, + &[HistoryReadValueId { + node_id: id.clone(), + index_range: Default::default(), + data_encoding: Default::default(), + continuation_point: cp, + }], + ) + .await + .unwrap(); + + assert_eq!(r.len(), 1); + let v = &r[0]; + if i == 8 { + assert!(v.continuation_point.is_null()); + } else { + assert!(!v.continuation_point.is_null(), "Expected cp for i = {}", i); + } + assert_eq!(v.status_code, StatusCode::Good); + let next_data = v + .history_data + .decode_inner::(session.decoding_options()) + .unwrap() + .data_values + .unwrap(); + + assert_eq!(next_data.len(), 100); + data.extend(next_data); + + cp = v.continuation_point.clone(); + } + + // Data should be from 0 to 999, with the correct timestamps + // This part is more a test of the test node manager, + // but it's good to verify that continuation points work as expected. + assert_eq!(1000, data.len()); + for (idx, it) in data.into_iter().enumerate() { + let v = match it.value.as_ref().unwrap() { + Variant::Int32(v) => *v, + _ => panic!("Wrong value type: {:?}", it.value), + }; + assert_eq!(idx as i32, v); + assert_eq!( + it.source_timestamp, + Some(start + TimeDelta::try_seconds(idx as i64).unwrap()) + ); + } +} + +#[tokio::test] +async fn history_read_release_continuation_points() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .historizing(true) + .value(0) + .description("Description") + .data_type(DataTypeId::Int32) + .access_level(AccessLevel::CURRENT_READ | AccessLevel::HISTORY_READ) + .user_access_level(UserAccessLevel::CURRENT_READ | UserAccessLevel::HISTORY_READ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + let start = DateTime::now() - TimeDelta::try_seconds(1000).unwrap(); + + nm.inner().add_history( + &id, + (0..1000).map(|v| DataValue { + value: Some((v as i32).into()), + status: Some(StatusCode::Good), + source_timestamp: Some(start + TimeDelta::try_seconds(v).unwrap()), + server_timestamp: Some(start + TimeDelta::try_seconds(v).unwrap()), + ..Default::default() + }), + ); + + let action = HistoryReadAction::ReadRawModifiedDetails(ReadRawModifiedDetails { + is_read_modified: false, + start_time: start, + end_time: start + TimeDelta::try_seconds(2000).unwrap(), + num_values_per_node: 100, + return_bounds: false, + }); + + let r = session + .history_read( + &action, + TimestampsToReturn::Both, + false, + &[HistoryReadValueId { + node_id: id.clone(), + index_range: Default::default(), + data_encoding: Default::default(), + continuation_point: Default::default(), + }], + ) + .await + .unwrap(); + + assert_eq!(r.len(), 1); + let v = &r[0]; + assert!(!v.continuation_point.is_null()); + assert_eq!(v.status_code, StatusCode::Good); + let data = v + .history_data + .decode_inner::(session.decoding_options()) + .unwrap() + .data_values + .unwrap(); + + assert_eq!(data.len(), 100); + + let cp = v.continuation_point.clone(); + + let r = session + .history_read( + &action, + TimestampsToReturn::Both, + true, + &[HistoryReadValueId { + node_id: id.clone(), + index_range: Default::default(), + data_encoding: Default::default(), + continuation_point: cp, + }], + ) + .await + .unwrap(); + + assert_eq!(r.len(), 1); + let v = &r[0]; + assert!(v.continuation_point.is_null()); + assert_eq!(v.status_code, StatusCode::Good); + assert!(v.history_data.is_null()); +} + +#[tokio::test] +async fn history_read_fail() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .historizing(true) + .value(0) + .description("Description") + .data_type(DataTypeId::Int32) + .access_level(AccessLevel::CURRENT_READ) + .user_access_level(UserAccessLevel::CURRENT_READ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + let start = DateTime::now() - TimeDelta::try_seconds(1000).unwrap(); + + let action = HistoryReadAction::ReadRawModifiedDetails(ReadRawModifiedDetails { + is_read_modified: false, + start_time: start, + end_time: start + TimeDelta::try_seconds(2000).unwrap(), + num_values_per_node: 100, + return_bounds: false, + }); + + // Read nothing + let r = session + .history_read(&action, TimestampsToReturn::Both, false, &[]) + .await + .unwrap_err(); + assert_eq!(r, StatusCode::BadNothingToDo); + + let history_read_limit = tester + .handle + .info() + .config + .limits + .operational + .max_nodes_per_history_read_data; + + // Read too many + let r = session + .history_read( + &action, + TimestampsToReturn::Both, + false, + &(0..(history_read_limit + 1)) + .map(|i| HistoryReadValueId { + node_id: NodeId::new(2, i as i32), + index_range: Default::default(), + data_encoding: Default::default(), + continuation_point: Default::default(), + }) + .collect::>(), + ) + .await + .unwrap_err(); + assert_eq!(r, StatusCode::BadTooManyOperations); + + // Read without access + let r = session + .history_read( + &action, + TimestampsToReturn::Both, + false, + &[HistoryReadValueId { + node_id: id.clone(), + index_range: Default::default(), + data_encoding: Default::default(), + continuation_point: Default::default(), + }], + ) + .await + .unwrap(); + + assert_eq!(r[0].status_code, StatusCode::BadUserAccessDenied); + + // Read node that doesn't exist + let r = session + .history_read( + &action, + TimestampsToReturn::Both, + false, + &[HistoryReadValueId { + node_id: NodeId::new(2, 100), + index_range: Default::default(), + data_encoding: Default::default(), + continuation_point: Default::default(), + }], + ) + .await + .unwrap(); + + assert_eq!(r[0].status_code, StatusCode::BadNodeIdUnknown); +} diff --git a/lib/tests/subscriptions.rs b/lib/tests/subscriptions.rs new file mode 100644 index 000000000..0b4f3a109 --- /dev/null +++ b/lib/tests/subscriptions.rs @@ -0,0 +1,488 @@ +use std::time::Duration; + +use opcua::{ + client::OnSubscriptionNotification, + server::address_space::{AccessLevel, UserAccessLevel, VariableBuilder}, + types::{ + AttributeId, DataTypeId, DataValue, DateTime, MonitoredItemCreateRequest, + MonitoredItemModifyRequest, MonitoringMode, MonitoringParameters, NodeId, ObjectId, + ReadValueId, ReferenceTypeId, StatusCode, TimestampsToReturn, VariableTypeId, Variant, + }, +}; +use tokio::{sync::mpsc::UnboundedReceiver, time::timeout}; +use utils::setup; + +mod utils; + +#[derive(Clone)] +struct ChannelNotifications { + data_values: tokio::sync::mpsc::UnboundedSender<(ReadValueId, DataValue)>, + events: tokio::sync::mpsc::UnboundedSender<(ReadValueId, Option>)>, +} + +impl ChannelNotifications { + pub fn new() -> ( + Self, + UnboundedReceiver<(ReadValueId, DataValue)>, + UnboundedReceiver<(ReadValueId, Option>)>, + ) { + let (data_values, data_recv) = tokio::sync::mpsc::unbounded_channel(); + let (events, events_recv) = tokio::sync::mpsc::unbounded_channel(); + ( + Self { + data_values, + events, + }, + data_recv, + events_recv, + ) + } +} + +impl OnSubscriptionNotification for ChannelNotifications { + fn on_data_value(&mut self, notification: DataValue, item: &opcua::client::MonitoredItem) { + let _ = self + .data_values + .send((item.item_to_monitor().clone(), notification)); + } + + fn on_event( + &mut self, + event_fields: Option>, + item: &opcua::client::MonitoredItem, + ) { + let _ = self + .events + .send((item.item_to_monitor().clone(), event_fields)); + } +} + +#[tokio::test] +async fn simple_subscriptions() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .value(-1) + .data_type(DataTypeId::Int32) + .access_level(AccessLevel::CURRENT_READ) + .user_access_level(UserAccessLevel::CURRENT_READ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + let (notifs, mut data, _) = ChannelNotifications::new(); + + // Create a subscription + let sub_id = session + .create_subscription(Duration::from_millis(100), 100, 20, 1000, 0, true, notifs) + .await + .unwrap(); + + // Create a monitored item on that subscription + let res = session + .create_monitored_items( + sub_id, + TimestampsToReturn::Both, + vec![MonitoredItemCreateRequest { + item_to_monitor: ReadValueId { + node_id: id.clone(), + attribute_id: AttributeId::Value as u32, + ..Default::default() + }, + monitoring_mode: opcua::types::MonitoringMode::Reporting, + requested_parameters: MonitoringParameters { + sampling_interval: 0.0, + queue_size: 10, + discard_oldest: true, + ..Default::default() + }, + }], + ) + .await + .unwrap(); + assert_eq!(res.len(), 1); + let it = &res[0]; + assert_eq!(it.status_code, StatusCode::Good); + + // We should quickly get a data value, this is due to the initial queued publish request. + let (r, v) = timeout(Duration::from_millis(500), data.recv()) + .await + .unwrap() + .unwrap(); + assert_eq!(r.node_id, id); + let val = match v.value { + Some(Variant::Int32(v)) => v, + _ => panic!("Expected integer value"), + }; + assert_eq!(-1, val); + + // Update the value + nm.set_value( + tester.handle.subscriptions(), + &id, + None, + DataValue { + value: Some(1.into()), + status: Some(StatusCode::Good), + source_timestamp: Some(DateTime::now()), + ..Default::default() + }, + ) + .unwrap(); + // Now we should get a value once we've sent another publish. + let (r, v) = timeout(Duration::from_millis(500), data.recv()) + .await + .unwrap() + .unwrap(); + assert_eq!(r.node_id, id); + let val = match v.value { + Some(Variant::Int32(v)) => v, + _ => panic!("Expected integer value"), + }; + assert_eq!(1, val); + + // Finally, delete the subscription + session.delete_subscription(sub_id).await.unwrap(); +} + +async fn recv_n(recv: &mut UnboundedReceiver, n: usize) -> Vec { + let mut res = Vec::with_capacity(n); + for _ in 0..n { + res.push(recv.recv().await.unwrap()); + } + res +} + +#[tokio::test] +async fn many_subscriptions() { + let (tester, nm, session) = setup().await; + + let mut ids = Vec::new(); + for i in 0..1000 { + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, &format!("Var{i}"), &format!("Var{i}")) + .data_type(DataTypeId::Int32) + .value(-1) + .access_level(AccessLevel::CURRENT_READ) + .user_access_level(UserAccessLevel::CURRENT_READ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::HasComponent.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + ids.push(id); + } + + let (notifs, mut data, _) = ChannelNotifications::new(); + + // Create a subscription + let sub_id = session + .create_subscription(Duration::from_millis(100), 100, 20, 100, 0, true, notifs) + .await + .unwrap(); + + let res = session + .create_monitored_items( + sub_id, + TimestampsToReturn::Both, + ids.into_iter() + .map(|id| MonitoredItemCreateRequest { + item_to_monitor: ReadValueId { + node_id: id, + attribute_id: AttributeId::Value as u32, + ..Default::default() + }, + monitoring_mode: opcua::types::MonitoringMode::Reporting, + requested_parameters: MonitoringParameters { + sampling_interval: 0.0, + queue_size: 10, + discard_oldest: true, + ..Default::default() + }, + }) + .collect(), + ) + .await + .unwrap(); + + for r in res { + assert_eq!(r.status_code, StatusCode::Good); + } + + // Should get 1000 notifications, note that since the max notifications per publish is 100, + // this should require 10 publish requests. No current way to measure that, unfortunately. + // TODO: Once we have proper server metrics, check those here. + let its = tokio::time::timeout(Duration::from_millis(800), recv_n(&mut data, 1000)) + .await + .unwrap(); + assert_eq!(1000, its.len()); + for (_id, v) in its { + let val = match v.value { + Some(Variant::Int32(v)) => v, + _ => panic!("Expected integer value"), + }; + assert_eq!(-1, val); + } +} + +#[tokio::test] +async fn modify_subscription() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .value(-1) + .data_type(DataTypeId::Int32) + .access_level(AccessLevel::CURRENT_READ) + .user_access_level(UserAccessLevel::CURRENT_READ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + let (notifs, _data, _) = ChannelNotifications::new(); + + // Create a subscription + let sub_id = session + .create_subscription(Duration::from_millis(100), 100, 20, 1000, 0, true, notifs) + .await + .unwrap(); + + // Create a monitored item on that subscription + let res = session + .create_monitored_items( + sub_id, + TimestampsToReturn::Both, + vec![MonitoredItemCreateRequest { + item_to_monitor: ReadValueId { + node_id: id.clone(), + attribute_id: AttributeId::Value as u32, + ..Default::default() + }, + monitoring_mode: opcua::types::MonitoringMode::Reporting, + requested_parameters: MonitoringParameters { + sampling_interval: 0.0, + queue_size: 10, + discard_oldest: true, + ..Default::default() + }, + }], + ) + .await + .unwrap(); + assert_eq!(res.len(), 1); + let it = &res[0]; + assert_eq!(it.status_code, StatusCode::Good); + let monitored_item_id = it.monitored_item_id; + + let session_id = session.server_session_id(); + println!("{session_id:?}"); + let opcua::types::Identifier::Numeric(session_id_num) = &session_id.identifier else { + panic!("Expected numeric session ID"); + }; + let sess_subs = tester + .handle + .subscriptions() + .get_session_subscriptions(*session_id_num) + .unwrap(); + { + let lck = sess_subs.lock(); + let sub = lck.get(sub_id).unwrap(); + + assert_eq!(sub.len(), 1); + assert_eq!(sub.publishing_interval(), Duration::from_millis(100)); + assert_eq!(sub.priority(), 0); + assert!(sub.publishing_enabled()); + assert_eq!(sub.max_notifications_per_publish(), 1000); + + let item = sub.get(&monitored_item_id).unwrap(); + assert_eq!(id, item.item_to_monitor().node_id); + assert_eq!(MonitoringMode::Reporting, item.monitoring_mode()); + assert_eq!(100.0, item.sampling_interval()); + assert_eq!(10, item.queue_size()); + assert!(item.discard_oldest()); + } + + // Modify the subscription, we're mostly just checking that nothing blows up here. + session + .modify_subscription(sub_id, Duration::from_millis(200), 100, 20, 500, 1) + .await + .unwrap(); + + // Modify the monitored item + session + .modify_monitored_items( + sub_id, + TimestampsToReturn::Both, + &[MonitoredItemModifyRequest { + monitored_item_id, + requested_parameters: MonitoringParameters { + sampling_interval: 200.0, + queue_size: 5, + discard_oldest: false, + ..Default::default() + }, + }], + ) + .await + .unwrap(); + + { + let lck = sess_subs.lock(); + let sub = lck.get(sub_id).unwrap(); + + assert_eq!(sub.len(), 1); + assert_eq!(sub.publishing_interval(), Duration::from_millis(200)); + assert_eq!(sub.priority(), 1); + assert!(sub.publishing_enabled()); + assert_eq!(sub.max_notifications_per_publish(), 500); + + let item = sub.get(&monitored_item_id).unwrap(); + assert_eq!(id, item.item_to_monitor().node_id); + assert_eq!(MonitoringMode::Reporting, item.monitoring_mode()); + assert_eq!(200.0, item.sampling_interval()); + assert_eq!(5, item.queue_size()); + assert!(!item.discard_oldest()); + } + + // Disable publishing + session.set_publishing_mode(&[sub_id], false).await.unwrap(); + + // Set monitoring mode to sampling + session + .set_monitoring_mode(sub_id, MonitoringMode::Sampling, &[monitored_item_id]) + .await + .unwrap(); + + { + let lck = sess_subs.lock(); + let sub = lck.get(sub_id).unwrap(); + + assert_eq!(sub.len(), 1); + assert_eq!(sub.publishing_interval(), Duration::from_millis(200)); + assert_eq!(sub.priority(), 1); + assert!(!sub.publishing_enabled()); + assert_eq!(sub.max_notifications_per_publish(), 500); + + let item = sub.get(&monitored_item_id).unwrap(); + assert_eq!(id, item.item_to_monitor().node_id); + assert_eq!(MonitoringMode::Sampling, item.monitoring_mode()); + assert_eq!(200.0, item.sampling_interval()); + assert_eq!(5, item.queue_size()); + assert!(!item.discard_oldest()); + } + + // Delete monitored item + session + .delete_monitored_items(sub_id, &[monitored_item_id]) + .await + .unwrap(); + + // Delete subscription + session.delete_subscription(sub_id).await.unwrap(); +} + +#[tokio::test] +async fn subscription_limits() { + let (tester, _nm, session) = setup().await; + + let limit = tester + .handle + .info() + .config + .limits + .subscriptions + .max_subscriptions_per_session; + let (notifs, _data, _) = ChannelNotifications::new(); + let mut subs = Vec::new(); + // Create too many subscriptions + for _ in 0..limit { + subs.push( + session + .create_subscription( + Duration::from_secs(1), + 100, + 20, + 1000, + 0, + true, + notifs.clone(), + ) + .await + .unwrap(), + ) + } + let e = session + .create_subscription(Duration::from_secs(1), 100, 20, 1000, 0, true, notifs) + .await + .unwrap_err(); + assert_eq!(StatusCode::BadTooManySubscriptions, e); + for sub in subs.iter().skip(1) { + session.delete_subscription(*sub).await.unwrap(); + } + + let sub = subs[0]; + + // Monitored items. + let limits = tester + .handle + .info() + .config + .limits + .operational + .max_monitored_items_per_call; + + // Create zero + let e = session + .create_monitored_items(sub, TimestampsToReturn::Both, vec![]) + .await + .unwrap_err(); + assert_eq!(StatusCode::BadNothingToDo, e); + + // Create too many + let e = session + .create_monitored_items( + sub, + TimestampsToReturn::Both, + (0..(limits + 1)) + .map(|i| MonitoredItemCreateRequest { + item_to_monitor: ReadValueId { + node_id: NodeId::new(2, i as i32), + attribute_id: AttributeId::Value as u32, + ..Default::default() + }, + monitoring_mode: MonitoringMode::Reporting, + requested_parameters: MonitoringParameters { + client_handle: i as u32, + sampling_interval: 100.0, + ..Default::default() + }, + }) + .collect(), + ) + .await + .unwrap_err(); + assert_eq!(e, StatusCode::BadTooManyOperations); +} + +// TODO: Add tests for transfer subscriptions and more detailed high level tests on +// subscriptions. Would be much easier to do with a low-level client. diff --git a/lib/tests/utils/mod.rs b/lib/tests/utils/mod.rs new file mode 100644 index 000000000..dd90ee727 --- /dev/null +++ b/lib/tests/utils/mod.rs @@ -0,0 +1,37 @@ +mod node_manager; +mod tester; + +pub const CLIENT_USERPASS_ID: &str = "sample1"; +pub const CLIENT_X509_ID: &str = "x509"; + +pub use node_manager::*; +use opcua::types::{AttributeId, DataValue, NodeId, ReadValueId, Variant}; +pub use tester::*; + +#[allow(unused)] +pub fn read_value_id(attribute: AttributeId, id: impl Into) -> ReadValueId { + let node_id = id.into(); + ReadValueId { + node_id, + attribute_id: attribute as u32, + ..Default::default() + } +} + +#[allow(unused)] +pub fn read_value_ids(attributes: &[AttributeId], id: impl Into) -> Vec { + let node_id = id.into(); + attributes + .iter() + .map(|a| read_value_id(*a, &node_id)) + .collect() +} + +#[allow(unused)] +pub fn array_value(v: &DataValue) -> &Vec { + let v = match v.value.as_ref().unwrap() { + Variant::Array(a) => a, + _ => panic!("Expected array"), + }; + &v.values +} diff --git a/lib/tests/utils/node_manager.rs b/lib/tests/utils/node_manager.rs new file mode 100644 index 000000000..f62cf7976 --- /dev/null +++ b/lib/tests/utils/node_manager.rs @@ -0,0 +1,1018 @@ +use std::{collections::HashMap, sync::atomic::AtomicU32}; + +use async_trait::async_trait; +use opcua::{ + server::{ + address_space::{ + new_node_from_attributes, AddressSpace, HasNodeId, NodeType, ReferenceDirection, + }, + node_manager::{ + add_namespaces, get_node_metadata, + memory::{ + InMemoryNodeManager, InMemoryNodeManagerBuilder, InMemoryNodeManagerImpl, + NamespaceMetadata, + }, + AddNodeItem, AddReferenceItem, DeleteNodeItem, DeleteReferenceItem, HistoryNode, + HistoryUpdateNode, MethodCall, MonitoredItemRef, MonitoredItemUpdateRef, + NodeManagerBuilder, NodeManagersRef, ParsedReadValueId, RequestContext, ServerContext, + TypeTree, TypeTreeNode, WriteNode, + }, + ContinuationPoint, CreateMonitoredItem, + }, + sync::{Mutex, RwLock}, + trace_read_lock, trace_write_lock, + types::{ + AttributeId, DataValue, DateTime, ExpandedNodeId, MonitoringMode, NodeClass, NodeId, + PerformUpdateType, QualifiedName, ReadRawModifiedDetails, ReferenceTypeId, StatusCode, + TimestampsToReturn, Variant, + }, +}; + +#[allow(unused)] +pub type TestNodeManager = InMemoryNodeManager; + +#[derive(Default, Debug)] +pub struct HistoryData { + // Must be ordered chronologically. + values: Vec, +} + +struct HistoryContinuationPoint { + index: usize, +} + +pub struct TestNodeManagerImpl { + // In practice you would never store history data in memory, and you would not want + // a single global lock on all history. + history_data: RwLock>, + call_info: Mutex, + method_cbs: Mutex< + HashMap< + NodeId, + Box Result, StatusCode> + Send + Sync + 'static>, + >, + >, + node_id_generator: AtomicU32, + namespace_index: u16, + node_managers: NodeManagersRef, +} + +/// Information about calls made to the node manager impl, for verifying in tests. +#[derive(Default)] +pub struct CallInfo { + pub value_monitored_items: Vec, + pub read_values: Vec, + pub register_nodes: Vec, + pub set_monitoring_mode: Vec, + pub modify_monitored_items: Vec, + pub event_monitored_items: Vec, + pub delete_monitored_items: Vec, + pub unregister_nodes: Vec, + pub history_read_raw_modified: Vec, + pub history_update: Vec, + pub write: Vec<(NodeId, AttributeId)>, + pub call: Vec, + pub add_nodes: Vec, + pub add_references: Vec<(NodeId, NodeId, NodeId)>, + pub delete_nodes: Vec, + pub delete_references: Vec<(NodeId, NodeId, NodeId)>, +} + +pub fn test_node_manager() -> impl NodeManagerBuilder { + InMemoryNodeManagerBuilder::new(make_test_node_manager_impl) +} + +fn make_test_node_manager_impl( + context: ServerContext, + address_space: &mut AddressSpace, +) -> TestNodeManagerImpl { + let idx = add_namespaces(&context, address_space, &["urn:rustopcuatestserver"])[0]; + TestNodeManagerImpl::new(idx, context.node_managers.clone()) +} + +#[async_trait] +impl InMemoryNodeManagerImpl for TestNodeManagerImpl { + async fn init(&self, _address_space: &mut AddressSpace, _context: ServerContext) {} + + fn namespaces(&self) -> Vec { + vec![NamespaceMetadata { + is_namespace_subset: Some(false), + namespace_uri: "urn:rustopcuatestserver".to_owned(), + namespace_index: self.namespace_index, + ..Default::default() + }] + } + + fn name(&self) -> &str { + "test" + } + + fn handle_new_node(&self, parent_id: &ExpandedNodeId) -> bool { + // Let this node manager handle all new nodes without a specified node id. + parent_id.server_index == 0 + } + + async fn history_read_raw_modified( + &self, + _context: &RequestContext, + details: &ReadRawModifiedDetails, + nodes: &mut [&mut &mut HistoryNode], + _timestamps_to_return: TimestampsToReturn, + ) -> Result<(), StatusCode> { + { + let mut call_info = self.call_info.lock(); + for node in nodes.iter() { + call_info + .history_read_raw_modified + .push(node.node_id().clone()); + } + } + self.history_read_raw_modified(details, nodes); + Ok(()) + } + + async fn read_values( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes: &[&ParsedReadValueId], + max_age: f64, + timestamps_to_return: TimestampsToReturn, + ) -> Vec { + { + let mut call_info = self.call_info.lock(); + for node in nodes.iter() { + call_info.read_values.push(node.node_id.clone()); + } + } + + let address_space = address_space.read(); + nodes + .iter() + .map(|n| address_space.read(context, n, max_age, timestamps_to_return)) + .collect() + } + + async fn create_value_monitored_items( + &self, + context: &RequestContext, + address_space: &RwLock, + items: &mut [&mut &mut CreateMonitoredItem], + ) { + { + let mut call_info = self.call_info.lock(); + for node in items.iter() { + call_info + .value_monitored_items + .push(node.item_to_monitor().node_id.clone()); + } + } + let to_read: Vec<_> = items.iter().map(|r| r.item_to_monitor()).collect(); + let values = self + .read_values( + context, + address_space, + &to_read, + 0.0, + TimestampsToReturn::Both, + ) + .await; + + for (value, node) in values.into_iter().zip(items.into_iter()) { + if value.status() != StatusCode::BadAttributeIdInvalid { + node.set_initial_value(value); + } + node.set_status(StatusCode::Good); + } + } + + async fn create_event_monitored_items( + &self, + _context: &RequestContext, + _address_space: &RwLock, + items: &mut [&mut &mut CreateMonitoredItem], + ) { + let mut call_info = self.call_info.lock(); + for node in items.iter() { + call_info + .event_monitored_items + .push(node.item_to_monitor().node_id.clone()); + } + } + + async fn set_monitoring_mode( + &self, + _context: &RequestContext, + _mode: MonitoringMode, + items: &[&MonitoredItemRef], + ) { + let mut call_info = self.call_info.lock(); + for it in items.iter() { + call_info.event_monitored_items.push(it.node_id().clone()); + } + } + + async fn modify_monitored_items( + &self, + _context: &RequestContext, + items: &[&MonitoredItemUpdateRef], + ) { + let mut call_info = self.call_info.lock(); + for it in items.iter() { + call_info.modify_monitored_items.push(it.node_id().clone()); + } + } + + async fn delete_monitored_items(&self, _context: &RequestContext, items: &[&MonitoredItemRef]) { + let mut call_info = self.call_info.lock(); + for it in items.iter() { + call_info.delete_monitored_items.push(it.node_id().clone()); + } + } + + async fn unregister_nodes( + &self, + _context: &RequestContext, + _address_space: &RwLock, + nodes: &[&NodeId], + ) -> Result<(), StatusCode> { + let mut call_info = self.call_info.lock(); + for id in nodes { + call_info.unregister_nodes.push((*id).clone()); + } + Ok(()) + } + + async fn history_update( + &self, + _context: &RequestContext, + nodes: &mut [&mut &mut HistoryUpdateNode], + ) -> Result<(), StatusCode> { + { + let mut call_info = self.call_info.lock(); + for node in nodes.iter() { + call_info.history_update.push(match node.details() { + opcua::server::node_manager::HistoryUpdateDetails::UpdateData(d) => { + d.node_id.clone() + } + opcua::server::node_manager::HistoryUpdateDetails::UpdateStructureData(d) => { + d.node_id.clone() + } + opcua::server::node_manager::HistoryUpdateDetails::UpdateEvent(d) => { + d.node_id.clone() + } + opcua::server::node_manager::HistoryUpdateDetails::DeleteRawModified(d) => { + d.node_id.clone() + } + opcua::server::node_manager::HistoryUpdateDetails::DeleteAtTime(d) => { + d.node_id.clone() + } + opcua::server::node_manager::HistoryUpdateDetails::DeleteEvent(d) => { + d.node_id.clone() + } + }); + } + } + + for node in nodes.into_iter() { + self.history_update_node(node)?; + } + + Ok(()) + } + + async fn write( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes_to_write: &mut [&mut WriteNode], + ) -> Result<(), StatusCode> { + { + let mut call_info = self.call_info.lock(); + for node in nodes_to_write.iter() { + call_info + .write + .push((node.value().node_id.clone(), node.value().attribute_id)); + } + } + let mut address_space = trace_write_lock!(address_space); + let type_tree = trace_read_lock!(context.type_tree); + + for write in nodes_to_write { + let node = match address_space.validate_node_write(context, write.value(), &type_tree) { + Ok(v) => v, + Err(e) => { + write.set_status(e); + continue; + } + }; + + if matches!(write.value().attribute_id, AttributeId::Value) + && node.node_class() == NodeClass::Variable + { + let NodeType::Variable(var) = node else { + write.set_status(StatusCode::BadAttributeIdInvalid); + continue; + }; + if let Err(e) = var.set_value( + write.value().index_range.clone(), + write.value().value.value.clone().unwrap_or(Variant::Empty), + ) { + write.set_status(e); + continue; + } + + if var.historizing() { + let mut history_data = trace_write_lock!(self.history_data); + let values = history_data + .entry(write.value().node_id.clone()) + .or_default(); + values.values.push(var.value( + TimestampsToReturn::Both, + opcua::types::NumericRange::None, + &QualifiedName::null(), + 0.0, + )); + } + } else { + if let Err(e) = node.as_mut_node().set_attribute( + write.value().attribute_id, + write.value().value.value.clone().unwrap_or(Variant::Empty), + ) { + write.set_status(e); + continue; + } + } + + write.set_status(StatusCode::Good); + + // This is a little lazy, ideally avoid calling this method in a loop, instead create an iterator + // over values. + context.subscriptions.notify_data_change( + [( + write.value().value.clone(), + &write.value().node_id, + write.value().attribute_id, + )] + .into_iter(), + ); + } + + Ok(()) + } + + async fn call( + &self, + _context: &RequestContext, + _address_space: &RwLock, + methods_to_call: &mut [&mut &mut MethodCall], + ) -> Result<(), StatusCode> { + { + let mut call_info = self.call_info.lock(); + for m in methods_to_call.iter() { + call_info.call.push(m.method_id().clone()); + } + } + + let mut cbs = self.method_cbs.lock(); + for method in methods_to_call { + let Some(cb) = cbs.get_mut(method.method_id()) else { + method.set_status(StatusCode::BadMethodInvalid); + continue; + }; + let res = (*cb)(method.arguments()); + match res { + Ok(r) => { + method.set_outputs(r); + method.set_status(StatusCode::Good); + } + Err(e) => method.set_status(e), + } + } + + Ok(()) + } + + async fn add_nodes( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes_to_add: &mut [&mut AddNodeItem], + ) -> Result<(), StatusCode> { + { + let mut call_info = self.call_info.lock(); + for m in nodes_to_add.iter() { + call_info.add_nodes.push(m.browse_name().name.to_string()); + } + } + let parent_ids: Vec<_> = nodes_to_add + .iter() + .map(|n| n.parent_node_id().node_id.clone()) + .collect(); + let parent_nodes = get_node_metadata(context, &self.node_managers, &parent_ids).await; + + let mut address_space = trace_write_lock!(address_space); + let mut type_tree = trace_write_lock!(context.type_tree); + for (idx, node) in nodes_to_add.into_iter().enumerate() { + let node_id = if node.requested_new_node_id().is_null() { + self.next_node_id() + } else { + node.requested_new_node_id().clone() + }; + + if address_space.node_exists(&node_id) { + node.set_result(NodeId::null(), StatusCode::BadNodeIdExists); + continue; + } + + let Some(Some(parent)) = parent_nodes.get(idx) else { + node.set_result(NodeId::null(), StatusCode::BadParentNodeIdInvalid); + continue; + }; + + let mut refs = Vec::new(); + + // Valid the type definition + if !node.type_definition_id().is_null() { + let Some(ty) = type_tree.get(&node.type_definition_id().node_id) else { + node.set_result(NodeId::null(), StatusCode::BadTypeDefinitionInvalid); + continue; + }; + + let valid = match node.node_class() { + opcua::types::NodeClass::Object => ty == NodeClass::ObjectType, + opcua::types::NodeClass::Variable => ty == NodeClass::VariableType, + _ => false, + }; + + if !valid { + node.set_result(NodeId::null(), StatusCode::BadTypeDefinitionInvalid); + continue; + } + + let ref_type_id = ReferenceTypeId::HasTypeDefinition.into(); + + refs.push(( + &node.type_definition_id().node_id, + ref_type_id, + ReferenceDirection::Forward, + )); + } + + if !matches!( + type_tree.get(node.reference_type_id()), + Some(NodeClass::ReferenceType) + ) { + node.set_result(NodeId::null(), StatusCode::BadReferenceTypeIdInvalid); + continue; + } + + let is_type = matches!( + node.node_class(), + NodeClass::DataType + | NodeClass::ObjectType + | NodeClass::ReferenceType + | NodeClass::VariableType + ); + + // There are restrictions on where and how types may be added. + if is_type { + // The parent is a type of the same kind. + let valid = type_tree + .get(&parent.node_id.node_id) + .is_some_and(|nc| nc == node.node_class()); + if !valid { + node.set_result(NodeId::null(), StatusCode::BadParentNodeIdInvalid); + continue; + } + } + + refs.push(( + &parent.node_id.node_id, + node.reference_type_id().clone(), + ReferenceDirection::Inverse, + )); + // Technically node managers are supposed to create all nodes required by the type definition here. + // In practice this is very server dependent, no servers allow you to create arbitrary nodes. + // For now we just ignore this requirement. + + let res = new_node_from_attributes( + node_id.clone(), + node.browse_name().clone(), + node.node_class(), + node.node_attributes().clone(), + ); + + match res { + Ok(n) => self.insert_node_inner( + &mut address_space, + &mut type_tree, + n, + &parent.node_id.node_id, + refs, + ), + Err(e) => { + node.set_result(NodeId::null(), e); + continue; + } + } + + node.set_result(node_id, StatusCode::Good); + } + + Ok(()) + } + + async fn add_references( + &self, + context: &RequestContext, + address_space: &RwLock, + references_to_add: &mut [&mut AddReferenceItem], + ) -> Result<(), StatusCode> { + { + let mut call_info = self.call_info.lock(); + for m in references_to_add.iter() { + call_info.add_references.push(( + m.source_node_id().clone(), + m.reference_type_id().clone(), + m.target_node_id().node_id.clone(), + )); + } + } + let node_pairs: Vec<_> = references_to_add + .iter() + .flat_map(|n| { + [ + n.source_node_id().clone(), + n.target_node_id().node_id.clone(), + ] + .into_iter() + }) + .collect(); + let nodes = get_node_metadata(context, &self.node_managers, &node_pairs).await; + let mut address_space = trace_write_lock!(address_space); + let type_tree = trace_read_lock!(context.type_tree); + for (idx, rf) in references_to_add.iter_mut().enumerate() { + let Some(Some(start_node)) = nodes.get(idx * 2) else { + if rf.source_node_id().namespace == self.namespace_index { + rf.set_source_result(StatusCode::BadSourceNodeIdInvalid); + } + continue; + }; + let Some(Some(end_node)) = nodes.get(idx * 2 + 1) else { + if rf.target_node_id().node_id.namespace == self.namespace_index { + rf.set_target_result(StatusCode::BadSourceNodeIdInvalid); + } + continue; + }; + + if !type_tree + .get(rf.reference_type_id()) + .is_some_and(|nc| nc == NodeClass::ReferenceType) + { + if rf.source_node_id().namespace == self.namespace_index { + rf.set_source_result(StatusCode::BadReferenceTypeIdInvalid); + } + if rf.target_node_id().node_id.namespace == self.namespace_index { + rf.set_target_result(StatusCode::BadReferenceTypeIdInvalid); + } + } + + // Most node managers will do a lot of validation here, to prevent cycles, + // and make sure the reference is used correctly. + if rf.is_forward() { + address_space.insert_reference( + &start_node.node_id.node_id, + &end_node.node_id.node_id, + rf.reference_type_id(), + ) + } else { + address_space.insert_reference( + &end_node.node_id.node_id, + &start_node.node_id.node_id, + rf.reference_type_id(), + ) + } + + if rf.source_node_id().namespace == self.namespace_index { + rf.set_source_result(StatusCode::Good); + } + if rf.target_node_id().node_id.namespace == self.namespace_index { + rf.set_target_result(StatusCode::Good); + } + } + + Ok(()) + } + + async fn delete_nodes( + &self, + context: &RequestContext, + address_space: &RwLock, + nodes_to_delete: &mut [&mut DeleteNodeItem], + ) -> Result<(), StatusCode> { + { + let mut call_info = self.call_info.lock(); + for m in nodes_to_delete.iter() { + call_info.delete_nodes.push(m.node_id().clone()); + } + } + + let mut address_space = trace_write_lock!(address_space); + let mut type_tree = trace_write_lock!(context.type_tree); + for node in nodes_to_delete { + if address_space + .delete(node.node_id(), node.delete_target_references()) + .is_none() + { + node.set_result(StatusCode::BadNodeIdInvalid); + continue; + } + + type_tree.remove(node.node_id()); + node.set_result(StatusCode::Good); + } + + Ok(()) + } + + async fn delete_references( + &self, + _context: &RequestContext, + address_space: &RwLock, + references_to_delete: &mut [&mut DeleteReferenceItem], + ) -> Result<(), StatusCode> { + { + let mut call_info = self.call_info.lock(); + for m in references_to_delete.iter() { + call_info.delete_references.push(( + m.source_node_id().clone(), + m.reference_type_id().clone(), + m.target_node_id().node_id.clone(), + )); + } + } + + let mut address_space = trace_write_lock!(address_space); + for rf in references_to_delete { + if !address_space.delete_reference( + rf.source_node_id(), + &rf.target_node_id().node_id, + rf.reference_type_id(), + ) { + if rf.source_node_id().namespace == self.namespace_index { + rf.set_source_result(StatusCode::BadNodeIdInvalid); + } + if rf.target_node_id().node_id.namespace == self.namespace_index { + rf.set_target_result(StatusCode::BadNodeIdInvalid); + } + + continue; + } + + if rf.source_node_id().namespace == self.namespace_index { + rf.set_source_result(StatusCode::Good); + } + if rf.target_node_id().node_id.namespace == self.namespace_index { + rf.set_target_result(StatusCode::Good); + } + } + + Ok(()) + } +} + +struct RawValue { + value: Variant, + timestamp: DateTime, + status: StatusCode, + orig_idx: usize, +} + +impl TestNodeManagerImpl { + #[allow(unused)] + pub fn new(namespace_index: u16, node_managers: NodeManagersRef) -> Self { + Self { + history_data: Default::default(), + call_info: Default::default(), + method_cbs: Default::default(), + node_id_generator: AtomicU32::new(1), + namespace_index, + node_managers, + } + } + + #[allow(unused)] + pub fn add_method_cb( + &self, + node_id: NodeId, + cb: impl FnMut(&[Variant]) -> Result, StatusCode> + Send + Sync + 'static, + ) { + let mut cbs = self.method_cbs.lock(); + cbs.insert(node_id, Box::new(cb)); + } + + fn history_read_raw_modified( + &self, + details: &ReadRawModifiedDetails, + nodes: &mut [&mut &mut HistoryNode], + ) { + let is_forward = match (details.start_time.is_null(), details.end_time.is_null()) { + (true, true) => true, + (true, false) => true, + (false, true) => false, + (false, false) => details.start_time < details.end_time, + }; + + let per_node = if details.num_values_per_node == 0 { + 10_000 + } else { + details.num_values_per_node.min(10_000) + } as usize; + + let history = trace_read_lock!(self.history_data); + + // At this point all nodes are validated to be history enabled + for node in nodes { + let Some(data) = history.get(node.node_id()) else { + node.set_status(StatusCode::Good); + node.set_result(&opcua::types::HistoryData { + data_values: Some(Vec::new()), + }); + continue; + }; + + let start_time = details.start_time.checked_ticks(); + let end_time = details.end_time.checked_ticks(); + + // Compute start index. If a continuation point is specified the server + // is allowed to just ignore the `details` parameter. See 11.6.3 + let start_index = if let Some(cp) = node.continuation_point() { + let Some(cp) = cp.get::() else { + node.set_status(StatusCode::BadContinuationPointInvalid); + continue; + }; + // Technically using a different set of details for a read with continuation point is invalid. + // We _could_ validate this here, but we don't have to. + cp.index + } else { + // Find where we should start reading. + let time = if is_forward { start_time } else { end_time }; + let r = data.values.binary_search_by(|v| { + let ticks = v + .source_timestamp + .as_ref() + .map(|v| v.checked_ticks()) + .unwrap_or_default(); + ticks.cmp(&time) + }); + // If OK, this is the index of the value that matched. + // Otherwise it will be the index _after_, which is correct for reading forward. + match r { + Ok(idx) => idx, + Err(idx) => idx, + } + }; + + // Note the behavior here. For forward reads, start_index is the _next_ value we will read, + // i.e. if the start_index is 1, we skip 1 node (index 0), and begin reading from node at index 1. + // For backward reads, it's the index of the _last_ value read, or completely outside the history data. + let values: Vec<_> = if is_forward { + data.values + .iter() + .skip(start_index) + .take(per_node) + .cloned() + .collect() + } else { + data.values + .iter() + .rev() + .skip(data.values.len() - start_index) + .take(per_node) + .cloned() + .collect() + }; + + node.set_status(StatusCode::Good); + node.set_result(&opcua::types::HistoryData { + data_values: Some(values), + }); + if is_forward { + let end_index = start_index.saturating_add(per_node); + if end_index < data.values.len() { + node.set_next_continuation_point(Some(ContinuationPoint::new(Box::new( + HistoryContinuationPoint { index: end_index }, + )))); + } + } else { + let end_index = start_index.saturating_sub(per_node); + if end_index > 0 { + node.set_next_continuation_point(Some(ContinuationPoint::new(Box::new( + HistoryContinuationPoint { index: end_index }, + )))); + } + }; + } + } + + fn history_update_node(&self, node: &mut HistoryUpdateNode) -> Result<(), StatusCode> { + let details = match node.details() { + opcua::server::node_manager::HistoryUpdateDetails::UpdateData(d) => d, + _ => return Err(StatusCode::BadHistoryOperationUnsupported), + }; + + if details.perform_insert_replace == PerformUpdateType::Remove { + return Err(StatusCode::BadInvalidArgument); + } + + let mut data = trace_write_lock!(self.history_data); + + let values = data.entry(details.node_id.clone()).or_default(); + + let mode = details.perform_insert_replace; + + // This is a little fiddly, it would be easy in an actually indexed store, + // but when keeping it just sequentially in memory it's a lot harder. + + // First, sort the values in ascending order. + let ln = details + .update_values + .as_ref() + .map(|v| v.len()) + .unwrap_or_default(); + + let mut to_update = Vec::with_capacity(ln); + let mut results = vec![StatusCode::Good; ln]; + + for (idx, value) in details + .update_values + .clone() + .unwrap_or_default() + .into_iter() + .enumerate() + { + if let Some(v) = value.source_timestamp { + to_update.push(RawValue { + value: value.value.unwrap_or(Variant::Empty), + timestamp: v, + status: value.status.unwrap_or(StatusCode::Good), + orig_idx: idx, + }); + } else { + results[idx] = StatusCode::BadInvalidTimestamp; + } + } + to_update.sort_by(|a, b| a.timestamp.cmp(&b.timestamp)); + + let now = DateTime::now(); + let mut index = 0; + for value in to_update { + while index < values.values.len() + && values.values[index].source_timestamp.as_ref().unwrap() < &value.timestamp + { + index += 1; + } + let data_value = DataValue { + value: Some(value.value), + status: Some(value.status), + server_timestamp: Some(now), + source_timestamp: Some(value.timestamp), + ..Default::default() + }; + + if index < values.values.len() + && values.values[index].source_timestamp.as_ref().unwrap() == &value.timestamp + { + if mode == PerformUpdateType::Insert { + results[value.orig_idx] = StatusCode::BadEntryExists; + } else { + values.values.remove(index); + results[value.orig_idx] = StatusCode::GoodEntryReplaced; + values.values.insert(index, data_value); + } + } else { + if mode == PerformUpdateType::Replace { + results[value.orig_idx] = StatusCode::BadNoEntryExists; + } else { + results[value.orig_idx] = StatusCode::GoodEntryInserted; + values.values.insert(index, data_value); + } + } + } + + node.set_operation_results(Some(results)); + node.set_status(StatusCode::Good); + + Ok(()) + } + + pub fn next_node_id(&self) -> NodeId { + let val = self + .node_id_generator + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + NodeId::new(self.namespace_index, val) + } + + #[allow(unused)] + pub fn add_history(&self, node_id: &NodeId, values: impl Iterator) { + let mut hist = trace_write_lock!(self.history_data); + let data = hist.entry(node_id.clone()).or_default(); + + data.values.extend(values); + } + + #[allow(unused)] + pub fn add_node<'a>( + &self, + address_space: &RwLock, + type_tree: &RwLock, + node: NodeType, + parent_id: &'a NodeId, + reference_type_id: &'a NodeId, + type_def: Option<&'a NodeId>, + mut refs: Vec<(&'a NodeId, NodeId, ReferenceDirection)>, + ) { + if let Some(type_def) = type_def { + refs.push(( + type_def, + ReferenceTypeId::HasTypeDefinition.into(), + ReferenceDirection::Forward, + )); + } + refs.push(( + parent_id, + reference_type_id.clone(), + ReferenceDirection::Inverse, + )); + let mut address_space = trace_write_lock!(address_space); + let mut type_tree = trace_write_lock!(type_tree); + self.insert_node_inner(&mut address_space, &mut type_tree, node, parent_id, refs); + } + + #[allow(unused)] + pub fn add_references<'a>( + &self, + address_space: &RwLock, + source: &'a NodeId, + refs: Vec<(&'a NodeId, NodeId, ReferenceDirection)>, + ) { + let mut address_space = trace_write_lock!(address_space); + for (target, ty, dir) in refs { + if matches!(dir, ReferenceDirection::Forward) { + address_space.insert_reference(&source, target, ty); + } else { + address_space.insert_reference(target, &source, ty); + } + } + } + + fn insert_node_inner( + &self, + address_space: &mut AddressSpace, + type_tree: &mut TypeTree, + node: NodeType, + parent_id: &NodeId, + refs: Vec<(&NodeId, NodeId, ReferenceDirection)>, + ) { + let node_id = node.node_id().clone(); + let node_class = node.node_class(); + let browse_name = node.as_node().browse_name().clone(); + + address_space.insert(node, None::<&[(_, &NodeId, _)]>); + for (target, ty, dir) in refs { + if matches!(dir, ReferenceDirection::Forward) { + address_space.insert_reference(&node_id, target, ty); + } else { + address_space.insert_reference(target, &node_id, ty); + } + } + + let is_type = matches!( + node_class, + NodeClass::DataType + | NodeClass::ObjectType + | NodeClass::ReferenceType + | NodeClass::VariableType + ); + + // If the node is a new node in the type hierarchy, add it there. + if is_type { + type_tree.add_type_node(&node_id, &parent_id, node_class); + } else if let Some(type_node) = type_tree.get_node(&parent_id) { + let (browse_path, ty) = match type_node { + TypeTreeNode::Type(_) => (vec![browse_name.clone()], parent_id.clone()), + TypeTreeNode::Property(p) => ( + p.path + .iter() + .cloned() + .chain([browse_name.clone()].into_iter()) + .collect(), + p.type_id.clone(), + ), + }; + let path_ref: Vec<_> = browse_path.iter().collect(); + type_tree.add_type_property(&node_id, &ty, &path_ref, node_class); + } + } +} diff --git a/lib/tests/utils/tester.rs b/lib/tests/utils/tester.rs new file mode 100644 index 000000000..d6ace8356 --- /dev/null +++ b/lib/tests/utils/tester.rs @@ -0,0 +1,385 @@ +use std::{ + net::SocketAddr, + path::PathBuf, + sync::{ + atomic::{AtomicU16, Ordering}, + Arc, + }, + time::Duration, +}; + +use opcua::{ + client::{Client, ClientBuilder, IdentityToken, Session, SessionEventLoop}, + crypto::SecurityPolicy, + server::{ServerBuilder, ServerHandle, ServerUserToken, ANONYMOUS_USER_TOKEN_ID}, + types::{MessageSecurityMode, StatusCode}, +}; +use tokio::net::TcpListener; +use tokio_util::sync::{CancellationToken, DropGuard}; + +use super::{test_node_manager, TestNodeManager, CLIENT_USERPASS_ID, CLIENT_X509_ID}; + +pub struct Tester { + pub handle: ServerHandle, + pub client: Client, + _guard: DropGuard, + pub addr: SocketAddr, + pub test_id: u16, +} + +pub static TEST_COUNTER: AtomicU16 = AtomicU16::new(0); + +#[allow(unused)] +const USER_X509_CERTIFICATE_PATH: &str = "./tests/x509/user_cert.der"; +#[allow(unused)] +const USER_X509_PRIVATE_KEY_PATH: &str = "./tests/x509/user_private_key.pem"; + +pub fn hostname() -> String { + // To avoid certificate trouble, use the computer's own name for the endpoint + let mut names = opcua::crypto::X509Data::computer_hostnames(); + if names.is_empty() { + "localhost".to_string() + } else { + names.remove(0) + } +} + +#[allow(unused)] +pub async fn setup() -> (Tester, Arc, Arc) { + let server = test_server(); + let mut tester = Tester::new(server, false).await; + let nm = tester + .handle + .node_managers() + .get_of_type::() + .unwrap(); + let (session, lp) = tester.connect_default().await.unwrap(); + lp.spawn(); + tokio::time::timeout(Duration::from_secs(2), session.wait_for_connection()) + .await + .unwrap(); + + (tester, nm, session) +} + +#[allow(unused)] +pub fn client_user_token() -> IdentityToken { + IdentityToken::UserName( + CLIENT_USERPASS_ID.to_owned(), + format!("{CLIENT_USERPASS_ID}_password"), + ) +} + +#[allow(unused)] +pub fn client_x509_token() -> IdentityToken { + IdentityToken::X509( + PathBuf::from(USER_X509_CERTIFICATE_PATH), + PathBuf::from(USER_X509_PRIVATE_KEY_PATH), + ) +} + +pub fn default_server() -> ServerBuilder { + let endpoint_path = "/"; + let user_token_ids = vec![ANONYMOUS_USER_TOKEN_ID, CLIENT_USERPASS_ID, CLIENT_X509_ID]; + let mut builder = ServerBuilder::new() + .application_name("integration_server") + .application_uri("urn:integration_server") + .product_uri("urn:integration_server Testkit") + .create_sample_keypair(true) + .host(hostname()) + .trust_client_certs(true) + .add_user_token( + CLIENT_USERPASS_ID, + ServerUserToken::user_pass( + CLIENT_USERPASS_ID, + &format!("{CLIENT_USERPASS_ID}_password"), + ), + ) + .add_user_token( + CLIENT_X509_ID, + ServerUserToken::x509(CLIENT_X509_ID, &PathBuf::from(USER_X509_CERTIFICATE_PATH)), + ) + .add_endpoint( + "none", + ( + endpoint_path, + SecurityPolicy::None, + MessageSecurityMode::None, + &user_token_ids as &[&str], + ), + ) + .add_endpoint( + "basic128rsa15_sign", + ( + endpoint_path, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::Sign, + &user_token_ids as &[&str], + ), + ) + .add_endpoint( + "basic128rsa15_sign_encrypt", + ( + endpoint_path, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids as &[&str], + ), + ) + .add_endpoint( + "basic256_sign", + ( + endpoint_path, + SecurityPolicy::Basic256, + MessageSecurityMode::Sign, + &user_token_ids as &[&str], + ), + ) + .add_endpoint( + "basic256_sign_encrypt", + ( + endpoint_path, + SecurityPolicy::Basic256, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids as &[&str], + ), + ) + .add_endpoint( + "basic256sha256_sign", + ( + endpoint_path, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::Sign, + &user_token_ids as &[&str], + ), + ) + .add_endpoint( + "basic256sha256_sign_encrypt", + ( + endpoint_path, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids as &[&str], + ), + ) + .add_endpoint( + "endpoint_aes128sha256rsaoaep_sign", + ( + endpoint_path, + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::Sign, + &user_token_ids as &[&str], + ), + ) + .add_endpoint( + "endpoint_aes128sha256rsaoaep_sign_encrypt", + ( + endpoint_path, + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids as &[&str], + ), + ) + .add_endpoint( + "endpoint_aes256sha256rsapss_sign", + ( + endpoint_path, + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::Sign, + &user_token_ids as &[&str], + ), + ) + .add_endpoint( + "endpoint_aes256sha256rsapss_sign_encrypt", + ( + endpoint_path, + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids as &[&str], + ), + ); + + let limits = builder.limits_mut(); + limits.max_message_size = 1024 * 1024 * 64; + limits.max_array_length = 100_000; + limits.subscriptions.max_queued_notifications = 200; + + builder +} + +pub fn default_client(test_id: u16, quick_timeout: bool) -> ClientBuilder { + let client = ClientBuilder::new() + .application_name("integration_client") + .application_uri("x") + .pki_dir(format!("./pki-client/{test_id}")) + .create_sample_keypair(true) + .trust_server_certs(true) + .session_retry_initial(Duration::from_millis(200)) + .max_array_length(100_000) + .max_message_size(1024 * 1024 * 64); + let client = if quick_timeout { + client.session_retry_limit(1) + } else { + client + }; + client +} + +#[allow(unused)] +pub fn test_server() -> ServerBuilder { + default_server().with_node_manager(test_node_manager()) +} + +impl Tester { + async fn listener() -> TcpListener { + TcpListener::bind(format!("{}:0", hostname())) + .await + .unwrap() + } + + #[allow(unused)] + pub async fn new_default_server(quick_timeout: bool) -> Self { + opcua::console_logging::init(); + + let test_id = TEST_COUNTER.fetch_add(1, Ordering::Relaxed); + let listener = Self::listener().await; + let addr = listener.local_addr().unwrap(); + + let server = default_server() + .discovery_urls(vec![format!("opc.tcp://{}:{}", hostname(), addr.port())]) + .pki_dir(format!("./pki-server/{test_id}")); + + let (server, handle) = server.build().unwrap(); + let token = CancellationToken::new(); + + tokio::task::spawn(server.run_with(listener)); + + let client = default_client(test_id, quick_timeout).client().unwrap(); + + Self { + _guard: handle.token().clone().drop_guard(), + handle, + client, + addr, + test_id, + } + } + + #[allow(unused)] + pub async fn new(server: ServerBuilder, quick_timeout: bool) -> Self { + opcua::console_logging::init(); + + let test_id = TEST_COUNTER.fetch_add(1, Ordering::Relaxed); + let listener = Self::listener().await; + let addr = listener.local_addr().unwrap(); + + let server = server + .pki_dir(format!("./pki-server/{test_id}")) + .discovery_urls(vec![format!("opc.tcp://{}:{}", hostname(), addr.port())]); + + let (server, handle) = server.build().unwrap(); + + tokio::task::spawn(server.run_with(listener)); + + let client = default_client(test_id, quick_timeout).client().unwrap(); + + Self { + _guard: handle.token().clone().drop_guard(), + handle, + client, + addr, + test_id, + } + } + + #[allow(unused)] + pub async fn new_custom_client(server: ServerBuilder, client: ClientBuilder) -> Self { + opcua::console_logging::init(); + + let test_id = TEST_COUNTER.fetch_add(1, Ordering::Relaxed); + let listener = Self::listener().await; + let token = CancellationToken::new(); + let addr = listener.local_addr().unwrap(); + + let server = server + .pki_dir(format!("./pki-server/{test_id}")) + .discovery_urls(vec![format!("opc.tcp://{}:{}", hostname(), addr.port())]); + let client = client.pki_dir(format!("./pki-client/{test_id}")); + + let (server, handle) = server.build().unwrap(); + + tokio::task::spawn(server.run_with(listener)); + + let client = client.client().unwrap(); + + Self { + _guard: handle.token().clone().drop_guard(), + handle, + client, + addr, + test_id, + } + } + + pub async fn connect( + &mut self, + security_policy: SecurityPolicy, + security_mode: MessageSecurityMode, + user_identity: IdentityToken, + ) -> Result<(Arc, SessionEventLoop), StatusCode> { + self.client + .new_session_from_endpoint( + ( + &self.endpoint() as &str, + security_policy.to_str(), + security_mode, + ), + user_identity, + ) + .await + } + + #[allow(unused)] + pub async fn connect_and_wait( + &mut self, + security_policy: SecurityPolicy, + security_mode: MessageSecurityMode, + user_identity: IdentityToken, + ) -> Result, StatusCode> { + let (session, evt_loop) = self + .client + .new_session_from_endpoint( + ( + &self.endpoint() as &str, + security_policy.to_str(), + security_mode, + ), + user_identity, + ) + .await?; + + evt_loop.spawn(); + + tokio::time::timeout(Duration::from_millis(10_000), session.wait_for_connection()) + .await + .unwrap(); + + Ok(session) + } + + #[allow(unused)] + pub async fn connect_default( + &mut self, + ) -> Result<(Arc, SessionEventLoop), StatusCode> { + self.connect( + SecurityPolicy::None, + MessageSecurityMode::None, + IdentityToken::Anonymous, + ) + .await + } + + pub fn endpoint(&self) -> String { + format!("opc.tcp://{}:{}/", hostname(), self.addr.port()) + } +} diff --git a/lib/tests/write.rs b/lib/tests/write.rs new file mode 100644 index 000000000..3991f71d1 --- /dev/null +++ b/lib/tests/write.rs @@ -0,0 +1,879 @@ +use chrono::TimeDelta; +use opcua::{ + client::{HistoryReadAction, HistoryUpdateAction, Session}, + server::address_space::{ + AccessLevel, DataTypeBuilder, EventNotifier, MethodBuilder, NodeType, ObjectBuilder, + ObjectTypeBuilder, ReferenceTypeBuilder, UserAccessLevel, VariableBuilder, + VariableTypeBuilder, ViewBuilder, + }, + types::{ + AttributeId, ByteString, DataTypeId, DataValue, DateTime, HistoryData, HistoryReadValueId, + LocalizedText, NodeId, ObjectId, ObjectTypeId, QualifiedName, ReadRawModifiedDetails, + ReferenceTypeId, StatusCode, TimestampsToReturn, UAString, UpdateDataDetails, + VariableTypeId, Variant, WriteMask, WriteValue, + }, +}; +// Write is not implemented in the core library itself, only in the test node manager, +// we still test here to test write functionality in the address space. +use utils::{array_value, read_value_id, setup}; + +mod utils; + +fn write_value( + attribute_id: AttributeId, + value: impl Into, + node_id: impl Into, +) -> WriteValue { + WriteValue { + value: DataValue { + value: Some(value.into()), + status: Some(StatusCode::Good), + source_timestamp: Some(DateTime::now()), + ..Default::default() + }, + node_id: node_id.into(), + attribute_id: attribute_id as u32, + index_range: UAString::null(), + } +} + +async fn write_then_read(session: &Session, values: &[WriteValue]) { + let r = session.write(values).await.unwrap(); + assert_eq!(r.len(), values.len()); + for s in r { + assert_eq!(s, StatusCode::Good); + } + + let reads: Vec<_> = values + .iter() + .map(|r| read_value_id(AttributeId::from_u32(r.attribute_id).unwrap(), &r.node_id)) + .collect(); + + let r = session + .read(&reads, TimestampsToReturn::Both, 0.0) + .await + .unwrap(); + + assert_eq!(r.len(), values.len()); + for (read, write) in r.into_iter().zip(values) { + assert_eq!(read.value, write.value.value); + } +} + +#[tokio::test] +async fn write_variable() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .write_mask( + WriteMask::DISPLAY_NAME + | WriteMask::BROWSE_NAME + | WriteMask::DESCRIPTION + | WriteMask::ARRAY_DIMENSIONS + | WriteMask::VALUE_RANK + | WriteMask::DATA_TYPE + | WriteMask::ACCESS_LEVEL + | WriteMask::USER_ACCESS_LEVEL + | WriteMask::HISTORIZING, + ) + .data_type(DataTypeId::String) + .value("value") + .access_level(AccessLevel::CURRENT_READ | AccessLevel::CURRENT_WRITE) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + write_then_read( + &session, + &[ + write_value(AttributeId::DisplayName, LocalizedText::from("NewVar"), &id), + write_value(AttributeId::BrowseName, QualifiedName::from("NewVar"), &id), + write_value( + AttributeId::Description, + LocalizedText::from("Description"), + &id, + ), + write_value(AttributeId::ArrayDimensions, vec![2u32], &id), + write_value(AttributeId::ValueRank, 1, &id), + write_value( + AttributeId::DataType, + Variant::NodeId(Box::new(DataTypeId::Int32.into())), + &id, + ), + write_value( + AttributeId::AccessLevel, + (AccessLevel::CURRENT_READ + | AccessLevel::CURRENT_WRITE + | AccessLevel::HISTORY_READ) + .bits(), + &id, + ), + write_value( + AttributeId::UserAccessLevel, + (UserAccessLevel::CURRENT_READ + | UserAccessLevel::CURRENT_WRITE + | UserAccessLevel::HISTORY_READ) + .bits(), + &id, + ), + write_value(AttributeId::Historizing, true, &id), + write_value(AttributeId::Value, vec![1, 2], &id), + ], + ) + .await; +} + +#[tokio::test] +async fn write_object() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectBuilder::new(&id, "TestObj1", "TestObj1") + .description("Description") + .write_mask( + WriteMask::DISPLAY_NAME + | WriteMask::BROWSE_NAME + | WriteMask::DESCRIPTION + | WriteMask::EVENT_NOTIFIER, + ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&ObjectTypeId::FolderType.into()), + Vec::new(), + ); + + write_then_read( + &session, + &[ + write_value(AttributeId::DisplayName, LocalizedText::from("NewObj"), &id), + write_value(AttributeId::BrowseName, QualifiedName::from("NewObj"), &id), + write_value( + AttributeId::Description, + LocalizedText::from("Description"), + &id, + ), + write_value( + AttributeId::EventNotifier, + EventNotifier::SUBSCRIBE_TO_EVENTS.bits(), + &id, + ), + ], + ) + .await; +} + +#[tokio::test] +async fn write_view() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ViewBuilder::new(&id, "TestView1", "TestView1") + .description("Description") + .write_mask( + WriteMask::DISPLAY_NAME + | WriteMask::BROWSE_NAME + | WriteMask::DESCRIPTION + | WriteMask::EVENT_NOTIFIER + | WriteMask::CONTAINS_NO_LOOPS, + ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + None, + Vec::new(), + ); + + write_then_read( + &session, + &[ + write_value( + AttributeId::DisplayName, + LocalizedText::from("NewView"), + &id, + ), + write_value(AttributeId::BrowseName, QualifiedName::from("NewView"), &id), + write_value( + AttributeId::Description, + LocalizedText::from("Description"), + &id, + ), + write_value( + AttributeId::EventNotifier, + EventNotifier::SUBSCRIBE_TO_EVENTS.bits(), + &id, + ), + write_value(AttributeId::ContainsNoLoops, true, &id), + ], + ) + .await; +} + +#[tokio::test] +async fn write_method() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + MethodBuilder::new(&id, "TestMethod1", "TestMethod1") + .description("Description") + .write_mask( + WriteMask::DISPLAY_NAME + | WriteMask::BROWSE_NAME + | WriteMask::DESCRIPTION + | WriteMask::EXECUTABLE + | WriteMask::USER_EXECUTABLE, + ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + None, + Vec::new(), + ); + + write_then_read( + &session, + &[ + write_value( + AttributeId::DisplayName, + LocalizedText::from("NewMethod"), + &id, + ), + write_value( + AttributeId::BrowseName, + QualifiedName::from("NewMethod"), + &id, + ), + write_value( + AttributeId::Description, + LocalizedText::from("Description"), + &id, + ), + write_value(AttributeId::Executable, true, &id), + write_value(AttributeId::UserExecutable, true, &id), + ], + ) + .await; +} + +#[tokio::test] +async fn write_object_type() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ObjectTypeBuilder::new(&id, "TestObjectType1", "TestObjectType1") + .description("Description") + .write_mask( + WriteMask::DISPLAY_NAME + | WriteMask::BROWSE_NAME + | WriteMask::DESCRIPTION + | WriteMask::IS_ABSTRACT, + ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + None, + Vec::new(), + ); + + write_then_read( + &session, + &[ + write_value( + AttributeId::DisplayName, + LocalizedText::from("NewObjectType"), + &id, + ), + write_value( + AttributeId::BrowseName, + QualifiedName::from("NewObjectType"), + &id, + ), + write_value( + AttributeId::Description, + LocalizedText::from("Description"), + &id, + ), + write_value(AttributeId::IsAbstract, true, &id), + ], + ) + .await; +} + +#[tokio::test] +async fn write_variable_type() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableTypeBuilder::new(&id, "TestVariableType1", "TestVariableType1") + .description("Description") + .write_mask( + WriteMask::DISPLAY_NAME + | WriteMask::BROWSE_NAME + | WriteMask::DESCRIPTION + | WriteMask::IS_ABSTRACT + | WriteMask::DATA_TYPE + | WriteMask::ARRAY_DIMENSIONS + | WriteMask::VALUE_FOR_VARIABLE_TYPE + | WriteMask::VALUE_RANK, + ) + .data_type(DataTypeId::String) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + None, + Vec::new(), + ); + + write_then_read( + &session, + &[ + write_value( + AttributeId::DisplayName, + LocalizedText::from("NewVariableType"), + &id, + ), + write_value( + AttributeId::BrowseName, + QualifiedName::from("NewVariableType"), + &id, + ), + write_value( + AttributeId::Description, + LocalizedText::from("Description"), + &id, + ), + write_value(AttributeId::IsAbstract, true, &id), + write_value(AttributeId::ArrayDimensions, vec![2u32], &id), + write_value(AttributeId::ValueRank, 1, &id), + write_value( + AttributeId::DataType, + Variant::NodeId(Box::new(DataTypeId::Int32.into())), + &id, + ), + write_value(AttributeId::Value, vec![1, 2], &id), + ], + ) + .await; +} + +#[tokio::test] +async fn write_data_type() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + DataTypeBuilder::new(&id, "TestObjectType1", "TestObjectType1") + .description("Description") + .write_mask( + WriteMask::DISPLAY_NAME + | WriteMask::BROWSE_NAME + | WriteMask::DESCRIPTION + | WriteMask::IS_ABSTRACT, + ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + None, + Vec::new(), + ); + + write_then_read( + &session, + &[ + write_value( + AttributeId::DisplayName, + LocalizedText::from("NewDataType"), + &id, + ), + write_value( + AttributeId::BrowseName, + QualifiedName::from("NewDataType"), + &id, + ), + write_value( + AttributeId::Description, + LocalizedText::from("Description"), + &id, + ), + write_value(AttributeId::IsAbstract, true, &id), + ], + ) + .await; +} + +#[tokio::test] +async fn write_reference_type() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + ReferenceTypeBuilder::new(&id, "TestRefType1", "TestRefType1") + .description("Description") + .write_mask( + WriteMask::DISPLAY_NAME + | WriteMask::BROWSE_NAME + | WriteMask::DESCRIPTION + | WriteMask::IS_ABSTRACT + | WriteMask::SYMMETRIC + | WriteMask::INVERSE_NAME, + ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + None, + Vec::new(), + ); + + write_then_read( + &session, + &[ + write_value( + AttributeId::DisplayName, + LocalizedText::from("NewRefType"), + &id, + ), + write_value( + AttributeId::BrowseName, + QualifiedName::from("NewRefType"), + &id, + ), + write_value( + AttributeId::Description, + LocalizedText::from("Description"), + &id, + ), + write_value(AttributeId::IsAbstract, true, &id), + write_value(AttributeId::Symmetric, true, &id), + write_value( + AttributeId::InverseName, + LocalizedText::from("Inverse"), + &id, + ), + ], + ) + .await; +} + +#[tokio::test] +async fn write_invalid() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .write_mask( + WriteMask::DISPLAY_NAME + | WriteMask::BROWSE_NAME + | WriteMask::DESCRIPTION + | WriteMask::DATA_TYPE + | WriteMask::HISTORIZING, + ) + .data_type(DataTypeId::String) + .value("value") + .access_level(AccessLevel::CURRENT_READ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + let r = session + .write(&[ + // Wrong type + write_value(AttributeId::DataType, LocalizedText::from("uhoh"), &id), + // Not valid for variables. + write_value(AttributeId::EventNotifier, 1, &id), + // Not allowed + write_value( + AttributeId::AccessLevel, + (AccessLevel::CURRENT_READ | AccessLevel::CURRENT_WRITE).bits(), + &id, + ), + // Not allowed value + write_value(AttributeId::Value, "foo", &id), + ]) + .await + .unwrap(); + + assert_eq!(r[0], StatusCode::BadTypeMismatch); + assert_eq!(r[1], StatusCode::BadNotWritable); + assert_eq!(r[2], StatusCode::BadNotWritable); + assert_eq!(r[3], StatusCode::BadUserAccessDenied); +} + +#[tokio::test] +async fn write_limits() { + let (tester, _nm, session) = setup().await; + + let write_limit = tester + .handle + .info() + .config + .limits + .operational + .max_nodes_per_write; + + // Write zero. This doesn't actually reach the server, since we intercept it in the client. + // we still protect against it on the server, but we don't have a way to bypass that check here. + let r = session.write(&[]).await.unwrap_err(); + assert_eq!(r, StatusCode::BadNothingToDo); + + // Too many operations + let ops: Vec<_> = (0..(write_limit + 1)) + .map(|r| write_value(AttributeId::Value, 123, NodeId::new(2, r as i32))) + .collect(); + + let r = session.write(&ops).await.unwrap_err(); + assert_eq!(r, StatusCode::BadTooManyOperations); + + // Exact number of operations + let ops: Vec<_> = (0..write_limit) + .map(|r| write_value(AttributeId::Value, 123, NodeId::new(2, r as i32))) + .collect(); + + session.write(&ops).await.unwrap(); +} + +#[tokio::test] +async fn write_bytestring_to_byte_array() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .value(vec![0u8; 16]) + .data_type(DataTypeId::Byte) + .value_rank(1) + .access_level(AccessLevel::CURRENT_WRITE) + .user_access_level(UserAccessLevel::CURRENT_WRITE) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + let bytes = ByteString::from(vec![0x1u8, 0x2u8, 0x3u8, 0x4u8]); + let mut write = write_value(AttributeId::Value, bytes, &id); + write.index_range = "0:4".into(); + let r = session.write(&[write]).await.unwrap(); + assert_eq!(StatusCode::Good, r[0]); + + { + let sp = nm.address_space().read(); + let node = sp.find(&id).unwrap(); + let NodeType::Variable(v) = node else { + panic!(""); + }; + let val = v.value( + TimestampsToReturn::Both, + opcua::types::NumericRange::None, + &Default::default(), + 0.0, + ); + + println!("{val:?}"); + + let arr = array_value(&val); + assert_eq!(16, arr.len()); + assert_eq!( + &arr[0..5], + &[ + Variant::Byte(1), + Variant::Byte(2), + Variant::Byte(3), + Variant::Byte(4), + Variant::Byte(0) + ] + ); + } +} + +#[tokio::test] +async fn write_index_range() { + let (tester, nm, session) = setup().await; + + let id1 = nm.inner().next_node_id(); + let id2 = nm.inner().next_node_id(); + for id in [&id1, &id2] { + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(id, "TestVar", "TestVar") + .value(vec![0u8; 16]) + .data_type(DataTypeId::Byte) + .value_rank(1) + .access_level(AccessLevel::CURRENT_WRITE) + .user_access_level(UserAccessLevel::CURRENT_WRITE) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + } + + let nodes_to_write = [ + WriteValue { + node_id: id1.clone(), + attribute_id: AttributeId::Value as u32, + index_range: "12".into(), + value: DataValue::new_now(vec![73u8]), + }, + WriteValue { + node_id: id2.clone(), + attribute_id: AttributeId::Value as u32, + index_range: "4:12".into(), + value: DataValue::new_now(vec![1u8, 2u8, 3u8, 4u8, 5u8, 6u8, 7u8, 8u8, 9u8]), + }, + ]; + + let r = session.write(&nodes_to_write).await.unwrap(); + assert_eq!(r[0], StatusCode::Good); + assert_eq!(r[1], StatusCode::Good); + + let sp = nm.address_space().read(); + // Node 1 + let node = sp.find(&id1).unwrap(); + let NodeType::Variable(v) = node else { + panic!(""); + }; + let val = v.value( + TimestampsToReturn::Both, + opcua::types::NumericRange::None, + &Default::default(), + 0.0, + ); + let mut bytes: Vec<_> = vec![0u8; 16]; + bytes[12] = 73; + assert_eq!(val.value.unwrap(), bytes.into()); + // Node 2 + let node = sp.find(&id2).unwrap(); + let NodeType::Variable(v) = node else { + panic!(""); + }; + let val = v.value( + TimestampsToReturn::Both, + opcua::types::NumericRange::None, + &Default::default(), + 0.0, + ); + let mut bytes: Vec<_> = vec![0u8; 16]; + for i in 4..13 { + bytes[i] = (i - 3) as u8; + } + assert_eq!(val.value.unwrap(), bytes.into()); +} + +#[tokio::test] +async fn history_update_insert() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .historizing(true) + .value(0) + .description("Description") + .data_type(DataTypeId::Int32) + .access_level(AccessLevel::HISTORY_WRITE | AccessLevel::HISTORY_READ) + .user_access_level(UserAccessLevel::HISTORY_WRITE | UserAccessLevel::HISTORY_READ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + let start = DateTime::now() - TimeDelta::try_seconds(1000).unwrap(); + + let action = HistoryUpdateAction::UpdateDataDetails(UpdateDataDetails { + node_id: id.clone(), + perform_insert_replace: opcua::types::PerformUpdateType::Insert, + update_values: Some( + (0..1000) + .map(|v| DataValue { + value: Some((v as i32).into()), + status: Some(StatusCode::Good), + source_timestamp: Some(start + TimeDelta::try_seconds(v).unwrap()), + ..Default::default() + }) + .collect(), + ), + }); + + let results = session.history_update(&[action]).await.unwrap(); + assert_eq!(1, results.len()); + assert_eq!(StatusCode::Good, results[0].status_code); + let res = results[0].operation_results.as_ref().unwrap(); + for s in res { + assert_eq!(s, &StatusCode::GoodEntryInserted); + } + + let r = session + .history_read( + &HistoryReadAction::ReadRawModifiedDetails(ReadRawModifiedDetails { + is_read_modified: false, + start_time: start, + end_time: start + TimeDelta::try_seconds(2000).unwrap(), + num_values_per_node: 1000, + return_bounds: false, + }), + TimestampsToReturn::Both, + false, + &[HistoryReadValueId { + node_id: id.clone(), + index_range: Default::default(), + data_encoding: Default::default(), + continuation_point: Default::default(), + }], + ) + .await + .unwrap(); + + let v = &r[0]; + assert!(v.continuation_point.is_null()); + assert_eq!(v.status_code, StatusCode::Good); + let data = v + .history_data + .decode_inner::(session.decoding_options()) + .unwrap() + .data_values + .unwrap(); + + assert_eq!(data.len(), 1000); + for (idx, it) in data.into_iter().enumerate() { + let v = match it.value.as_ref().unwrap() { + Variant::Int32(v) => *v, + _ => panic!("Wrong value type: {:?}", it.value), + }; + assert_eq!(idx as i32, v); + assert_eq!( + it.source_timestamp, + Some(start + TimeDelta::try_seconds(idx as i64).unwrap()) + ); + } +} + +#[tokio::test] +async fn history_update_fail() { + let (tester, nm, session) = setup().await; + + let id = nm.inner().next_node_id(); + nm.inner().add_node( + nm.address_space(), + tester.handle.type_tree(), + VariableBuilder::new(&id, "TestVar1", "TestVar1") + .historizing(true) + .value(0) + .description("Description") + .data_type(DataTypeId::Int32) + .access_level(AccessLevel::CURRENT_READ) + .user_access_level(UserAccessLevel::CURRENT_READ) + .build() + .into(), + &ObjectId::ObjectsFolder.into(), + &ReferenceTypeId::Organizes.into(), + Some(&VariableTypeId::BaseDataVariableType.into()), + Vec::new(), + ); + + // Write nothing + let r = session.history_update(&[]).await.unwrap_err(); + assert_eq!(r, StatusCode::BadNothingToDo); + + let history_update_limit = tester + .handle + .info() + .config + .limits + .operational + .max_nodes_per_history_update; + + // Write too many + let r = session + .history_update( + &(0..(history_update_limit + 1)) + .map(|i| { + HistoryUpdateAction::UpdateDataDetails(UpdateDataDetails { + node_id: NodeId::new(2, i as i32), + perform_insert_replace: opcua::types::PerformUpdateType::Insert, + update_values: None, + }) + }) + .collect::>(), + ) + .await + .unwrap_err(); + + assert_eq!(r, StatusCode::BadTooManyOperations); + + // Write without access + let r = session + .history_update(&[HistoryUpdateAction::UpdateDataDetails(UpdateDataDetails { + node_id: id.clone(), + perform_insert_replace: opcua::types::PerformUpdateType::Insert, + update_values: None, + })]) + .await + .unwrap(); + + assert_eq!(r[0].status_code, StatusCode::BadUserAccessDenied); + + // Write node that doesn't exist + let r = session + .history_update(&[HistoryUpdateAction::UpdateDataDetails(UpdateDataDetails { + node_id: NodeId::new(2, 100), + perform_insert_replace: opcua::types::PerformUpdateType::Insert, + update_values: None, + })]) + .await + .unwrap(); + + assert_eq!(r[0].status_code, StatusCode::BadNodeIdUnknown); +} diff --git a/integration/x509/user_cert.der b/lib/tests/x509/user_cert.der similarity index 100% rename from integration/x509/user_cert.der rename to lib/tests/x509/user_cert.der diff --git a/integration/x509/user_private_key.pem b/lib/tests/x509/user_private_key.pem similarity index 100% rename from integration/x509/user_private_key.pem rename to lib/tests/x509/user_private_key.pem diff --git a/samples/async-server/Cargo.toml b/samples/async-server/Cargo.toml new file mode 100644 index 000000000..d6699e488 --- /dev/null +++ b/samples/async-server/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "async-server" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tokio = { version = "1.37.0", features = ["full"] } +chrono = "0.4" +log = "0.4" +ctrlc = "3.4.4" + +[dependencies.opcua] +path = "../../lib" +version = "0.13.0" # OPCUARustVersion +features = ["server", "console-logging"] diff --git a/samples/async-server/src/main.rs b/samples/async-server/src/main.rs new file mode 100644 index 000000000..36e10033a --- /dev/null +++ b/samples/async-server/src/main.rs @@ -0,0 +1,59 @@ +use std::{path::PathBuf, time::Duration}; + +use log::info; +use opcua::{ + client::{Client, ClientConfig}, + core::config::Config, + server::{ServerBuilder, ServerHandle}, +}; + +#[tokio::main] +async fn main() { + opcua::console_logging::init(); + + let (server, handle) = ServerBuilder::new() + .with_config_from("../server.conf") + .build() + .unwrap(); + + let token = handle.token().clone(); + ctrlc::set_handler(move || token.cancel()).unwrap(); + + let join_handle = tokio::task::spawn(server.run()); + + let mut client = Client::new(ClientConfig::load(&PathBuf::from("../client.conf")).unwrap()); + + let (session, event_loop) = client + .new_session_from_endpoint( + "opc.tcp://127.0.0.1:4855", + opcua::client::IdentityToken::Anonymous, + ) + .await + .unwrap(); + + let client_handle = tokio::task::spawn(event_loop.run()); + + session.wait_for_connection().await; + + info!("Connected to server!"); + + session.disconnect().await.unwrap(); + client_handle.await.unwrap(); + + info!("Closed session"); + tokio::select! { + r = join_handle => { r.unwrap() }, + _ = gen_values(&handle) => { unreachable!() } + } + .unwrap(); +} + +async fn gen_values(handle: &ServerHandle) { + let mut counter = 0u8; + loop { + tokio::time::sleep(Duration::from_millis(500)).await; + + handle.set_service_level(counter); + counter = counter.wrapping_add(1); + } +} diff --git a/samples/chess-server/Cargo.toml b/samples/chess-server/Cargo.toml index c3fee1b6b..17660b149 100644 --- a/samples/chess-server/Cargo.toml +++ b/samples/chess-server/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Adam Lock "] edition = "2021" [dependencies] +tokio = { version = "1.38.0", features = ["full"] } uci = "0.1.1" [dependencies.opcua] diff --git a/samples/chess-server/src/main.rs b/samples/chess-server/src/main.rs index 48b379465..10d839960 100644 --- a/samples/chess-server/src/main.rs +++ b/samples/chess-server/src/main.rs @@ -3,13 +3,16 @@ // Copyright (C) 2017-2024 Adam Lock use std::env; -use std::path::PathBuf; use std::sync::Arc; -use std::thread; use std::time::Duration; -use opcua::server::prelude::*; +use opcua::server::address_space::VariableBuilder; +use opcua::server::node_manager::memory::{ + simple_node_manager, NamespaceMetadata, SimpleNodeManager, +}; +use opcua::server::{ServerBuilder, SubscriptionCache}; use opcua::sync::Mutex; +use opcua::types::*; mod game; @@ -32,7 +35,8 @@ fn default_engine_path() -> String { }) } -fn main() { +#[tokio::main] +async fn main() { let engine_path = if env::args().len() > 1 { env::args().nth(1).unwrap() } else { @@ -42,20 +46,33 @@ fn main() { let game = Arc::new(Mutex::new(game::Game::new(&engine_path))); // Create an OPC UA server with sample configuration and default node set - let server = Server::new(ServerConfig::load(&PathBuf::from("../server.conf")).unwrap()); - - let address_space = server.address_space(); - - let ns = { - let mut address_space = address_space.write(); - - let ns = address_space - .register_namespace("urn:chess-server") - .unwrap(); - - let board_node_id = address_space - .add_folder("Board", "Board", &NodeId::objects_folder_id()) - .unwrap(); + let (server, handle) = ServerBuilder::new() + .with_config_from("../server.conf") + .with_node_manager(simple_node_manager( + NamespaceMetadata { + namespace_uri: "urn:chess-server".to_owned(), + ..Default::default() + }, + "chess", + )) + .build() + .unwrap(); + let node_manager = handle + .node_managers() + .get_of_type::() + .unwrap(); + let ns = handle.get_namespace_index("urn:chess-server").unwrap(); + + { + let mut address_space = node_manager.address_space().write(); + + let board_node_id = NodeId::new(2, "board"); + address_space.add_folder( + &board_node_id, + "Board", + "Board", + &NodeId::objects_folder_id(), + ); BOARD_SQUARES.iter().for_each(|square| { // Variable represents each square's state @@ -76,19 +93,19 @@ fn main() { .value(false) .insert(&mut address_space); }); + }; + { let game = game.lock(); - update_board_state(&game, &mut address_space, ns); - - ns - }; + update_board_state(&game, &node_manager, 2, &handle.subscriptions()); + } // Spawn a thread for the game which will update server state // Each variable will hold a value representing what's in the square. A client can subscribe to the content // of the variables and observe games being played. - thread::spawn(move || { + tokio::task::spawn(async move { let sleep_time = Duration::from_millis(1500); let mut game = game.lock(); loop { @@ -113,28 +130,36 @@ fn main() { game.make_move(bestmove); game.print_board(); - { - let mut address_space = address_space.write(); - update_board_state(&game, &mut address_space, ns); - } + update_board_state(&game, &node_manager, ns, &handle.subscriptions()); } - thread::sleep(sleep_time); + tokio::time::sleep(sleep_time).await; } }); - // Run the server. This does not ordinarily exit so you must Ctrl+C to terminate - server.run(); + // Run the server. + server.run().await.unwrap(); } -fn update_board_state(game: &game::Game, address_space: &mut AddressSpace, ns: u16) { +fn update_board_state( + game: &game::Game, + nm: &SimpleNodeManager, + ns: u16, + subscriptions: &SubscriptionCache, +) { let now = DateTime::now(); BOARD_SQUARES.iter().for_each(|square| { // Piece on the square let square_value = game.square_from_str(square); let node_id = NodeId::new(ns, *square); - let _ = address_space.set_variable_value(node_id, square_value as u8, &now, &now); + nm.set_value( + subscriptions, + &node_id, + None, + DataValue::new_at(square_value as u8, now), + ) + .unwrap(); // Highlight the square let node_id = NodeId::new(ns, format!("{}.highlight", square)); @@ -143,6 +168,12 @@ fn update_board_state(game: &game::Game, address_space: &mut AddressSpace, ns: u } else { false }; - let _ = address_space.set_variable_value(node_id, highlight_square, &now, &now); + nm.set_value( + subscriptions, + &node_id, + None, + DataValue::new_at(highlight_square, now), + ) + .unwrap(); }); } diff --git a/samples/client.conf b/samples/client.conf index 8fb8f9b07..85acd4030 100644 --- a/samples/client.conf +++ b/samples/client.conf @@ -62,8 +62,8 @@ publish_timeout: secs: 60 nanos: 0 min_publish_interval: - secs: 1 - nanos: 0 + secs: 0 + nanos: 100000000 max_inflight_publish: 2 session_timeout: 0 performance: diff --git a/samples/demo-server/Cargo.toml b/samples/demo-server/Cargo.toml index 2e48132ce..dba99d7d2 100644 --- a/samples/demo-server/Cargo.toml +++ b/samples/demo-server/Cargo.toml @@ -15,8 +15,9 @@ log4rs = "1.2" lazy_static = "1.4.0" pico-args = "0.5" tokio = { version = "1", features = ["full"] } +tokio-util = "0.7.11" [dependencies.opcua] path = "../../lib" version = "0.13.0" # OPCUARustVersion -features = ["all"] \ No newline at end of file +features = ["all"] diff --git a/samples/demo-server/src/control.rs b/samples/demo-server/src/control.rs index 542898a51..7a258fec6 100644 --- a/samples/demo-server/src/control.rs +++ b/samples/demo-server/src/control.rs @@ -2,46 +2,57 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2024 Adam Lock -use opcua::server::prelude::*; +use std::sync::Arc; -pub fn add_control_switches(server: &mut Server, ns: u16) { +use opcua::{ + server::{ + address_space::VariableBuilder, node_manager::memory::SimpleNodeManager, SubscriptionCache, + }, + types::{DataTypeId, NodeId, StatusCode, Variant}, +}; +use tokio_util::sync::CancellationToken; + +pub fn add_control_switches( + ns: u16, + manager: Arc, + subscriptions: Arc, + token: CancellationToken, +) { // The address space is guarded so obtain a lock to change it let abort_node_id = NodeId::new(ns, "abort"); - - let address_space = server.address_space(); - let server_state = server.server_state(); + let control_folder_id = NodeId::new(ns, "control"); { - let mut address_space = address_space.write(); - let folder_id = address_space - .add_folder("Control", "Control", &NodeId::objects_folder_id()) - .unwrap(); + let mut address_space = manager.address_space().write(); + address_space.add_folder( + &control_folder_id, + "Control", + "Control", + &NodeId::objects_folder_id(), + ); VariableBuilder::new(&abort_node_id, "Abort", "Abort") .data_type(DataTypeId::Boolean) .value(false) .writable() - .organized_by(&folder_id) + .organized_by(&control_folder_id) .insert(&mut address_space); } - server.add_polling_action(1000, move || { - let address_space = address_space.read(); - // Test for abort flag - let abort = if let Ok(v) = address_space.get_variable_value(abort_node_id.clone()) { - match v.value { - Some(Variant::Boolean(v)) => v, - _ => { - panic!("Abort value should be true or false"); + let mgr_ref = manager.clone(); + manager + .inner() + .add_write_callback(abort_node_id.clone(), move |v, _| { + if let Some(Variant::Boolean(val)) = v.value { + if val { + token.cancel(); } + mgr_ref + .set_value(&subscriptions, &abort_node_id, None, v) + .unwrap(); + StatusCode::Good + } else { + StatusCode::BadTypeMismatch } - } else { - panic!("Abort value should be in address space"); - }; - // Check if abort has been set to true, in which case abort - if abort { - let mut server_state = server_state.write(); - server_state.abort(); - } - }); + }); } diff --git a/samples/demo-server/src/historical.rs b/samples/demo-server/src/historical.rs deleted file mode 100644 index 1886cd88b..000000000 --- a/samples/demo-server/src/historical.rs +++ /dev/null @@ -1,39 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Implementations of HistoricalDataProvider and HistoricalEventProvider -use std::sync::Arc; - -use opcua::server::prelude::*; -use opcua::sync::RwLock; - -// Register some historical data providers -pub fn add_providers(server: &mut Server) { - let server_state = server.server_state(); - let mut server_state = server_state.write(); - server_state.set_historical_data_provider(Box::new(DataProvider)); - server_state.set_historical_event_provider(Box::new(EventProvider)); -} - -pub struct DataProvider; - -pub struct EventProvider; - -impl HistoricalDataProvider for DataProvider { - fn read_raw_modified_details( - &self, - _address_space: Arc>, - _request: ReadRawModifiedDetails, - _timestamps_to_return: TimestampsToReturn, - _release_continuation_points: bool, - _nodes_to_read: &[HistoryReadValueId], - ) -> Result, StatusCode> { - println!("Overridden read_raw_modified_details"); - Err(StatusCode::BadHistoryOperationUnsupported) - } -} - -impl HistoricalEventProvider for EventProvider { - // -} diff --git a/samples/demo-server/src/machine.rs b/samples/demo-server/src/machine.rs index deb908e9a..f7e42f4cf 100644 --- a/samples/demo-server/src/machine.rs +++ b/samples/demo-server/src/machine.rs @@ -2,16 +2,38 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2024 Adam Lock -use std::sync::{ - atomic::{AtomicU16, AtomicU32, Ordering}, - Arc, +use std::{ + sync::{ + atomic::{AtomicU16, AtomicU32, Ordering}, + Arc, + }, + time::Duration, }; -use chrono::TimeDelta; -use opcua::server::{events::event::*, prelude::*}; +use opcua::{ + crypto::random, + server::{ + address_space::{ + AddressSpace, EventNotifier, ObjectBuilder, ObjectTypeBuilder, VariableBuilder, + }, + node_manager::memory::SimpleNodeManager, + BaseEventType, Event, SubscriptionCache, + }, + types::{ + DataTypeId, DataValue, DateTime, NodeId, ObjectId, ObjectTypeId, UAString, VariableTypeId, + }, +}; +use rand; +use tokio_util::sync::CancellationToken; -pub fn add_machinery(server: &mut Server, ns: u16, raise_event: bool) { - let address_space = server.address_space(); +pub fn add_machinery( + ns: u16, + manager: Arc, + subscriptions: Arc, + raise_event: bool, + token: CancellationToken, +) { + let address_space = manager.address_space(); let machine1_counter = Arc::new(AtomicU16::new(0)); let machine2_counter = Arc::new(AtomicU16::new(50)); @@ -20,12 +42,16 @@ pub fn add_machinery(server: &mut Server, ns: u16, raise_event: bool) { add_machinery_model(&mut address_space, ns); // Create a folder under static folder - let devices_folder_id = address_space - .add_folder("Devices", "Devices", &NodeId::objects_folder_id()) - .unwrap(); + let devices_folder_id = NodeId::new(ns, "devices"); + address_space.add_folder( + &devices_folder_id, + "Devices", + "Devices", + &NodeId::objects_folder_id(), + ); // Create the machine events folder - let _ = address_space.add_folder_with_id( + let _ = address_space.add_folder( &machine_events_folder_id(ns), "Events", "Events", @@ -35,6 +61,7 @@ pub fn add_machinery(server: &mut Server, ns: u16, raise_event: bool) { // Create an object representing a machine that cycles from 0 to 100. Each time it cycles it will create an event let machine1_id = add_machine( &mut address_space, + &manager, ns, devices_folder_id.clone(), "Machine 1", @@ -42,6 +69,7 @@ pub fn add_machinery(server: &mut Server, ns: u16, raise_event: bool) { ); let machine2_id = add_machine( &mut address_space, + &manager, ns, devices_folder_id, "Machine 2", @@ -50,23 +78,28 @@ pub fn add_machinery(server: &mut Server, ns: u16, raise_event: bool) { (machine1_id, machine2_id) }; - // Increment counters - server.add_polling_action(300, move || { - let mut address_space = address_space.write(); - increment_counter( - &mut address_space, - ns, - machine1_counter.clone(), - &machine1_id, - raise_event, - ); - increment_counter( - &mut address_space, - ns, - machine2_counter.clone(), - &machine2_id, - raise_event, - ); + tokio::task::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_millis(300)); + while !token.is_cancelled() { + interval.tick().await; + + increment_counter( + &manager, + &subscriptions, + ns, + machine1_counter.clone(), + &machine1_id, + raise_event, + ); + increment_counter( + &manager, + &subscriptions, + ns, + machine2_counter.clone(), + &machine2_id, + raise_event, + ); + } }); } @@ -110,6 +143,7 @@ fn add_machinery_model(address_space: &mut AddressSpace, ns: u16) { fn add_machine( address_space: &mut AddressSpace, + manager: &SimpleNodeManager, ns: u16, folder_id: NodeId, name: &str, @@ -128,30 +162,41 @@ fn add_machine( .property_of(machine_id.clone()) .data_type(DataTypeId::UInt16) .has_type_definition(VariableTypeId::PropertyType) - .value_getter(AttrFnGetter::new_boxed( - move |_, _, _, _, _, _| -> Result, StatusCode> { - let value = counter.load(Ordering::Relaxed); - Ok(Some(DataValue::new_now(value))) - }, - )) .insert(address_space); + manager + .inner() + .add_read_callback(counter_id, move |_, _, _| { + let value = counter.load(Ordering::Relaxed); + Ok(DataValue::new_now(value)) + }); + machine_id } pub struct MachineCycledEventType { base: BaseEventType, + ns: u16, } impl Event for MachineCycledEventType { - type Err = (); + fn get_field( + &self, + type_definition_id: &NodeId, + browse_path: &[opcua::types::QualifiedName], + attribute_id: opcua::types::AttributeId, + index_range: opcua::types::NumericRange, + ) -> opcua::types::Variant { + self.base + .get_field(type_definition_id, browse_path, attribute_id, index_range) + } - fn is_valid(&self) -> bool { - self.base.is_valid() + fn time(&self) -> &opcua::types::DateTime { + self.base.time() } - fn raise(&mut self, address_space: &mut AddressSpace) -> Result { - self.base.raise(address_space) + fn matches_type_id(&self, id: &NodeId) -> bool { + self.base.matches_type_id(id) || id != &Self::event_type_id(self.ns) } } @@ -166,88 +211,52 @@ lazy_static! { } impl MachineCycledEventType { - fn new( - machine_name: &str, - ns: u16, - node_id: R, - browse_name: S, - display_name: T, - parent_node: U, - source_node: V, - time: DateTime, - ) -> Self - where - R: Into, - S: Into, - T: Into, - U: Into, - V: Into, - { + fn new(machine_name: &str, ns: u16, source_node: impl Into, time: DateTime) -> Self { let event_type_id = MachineCycledEventType::event_type_id(ns); let source_node: NodeId = source_node.into(); MachineCycledEventType { base: BaseEventType::new( - node_id, event_type_id, - browse_name, - display_name, - parent_node, + random::byte_string(128), + format!("A machine cycled event from machine {}", source_node), time, ) - .source_node(source_node.clone()) - .source_name(UAString::from(machine_name)) - .message(LocalizedText::from(format!( - "A machine cycled event from machine {}", - source_node - ))) - .severity(rand::random::() % 999u16 + 1u16), + .set_source_node(source_node.clone()) + .set_source_name(UAString::from(machine_name)) + .set_severity(rand::random::() % 999u16 + 1u16), + ns, } } } fn raise_machine_cycled_event( - address_space: &mut AddressSpace, + manager: &SimpleNodeManager, + subscriptions: &SubscriptionCache, ns: u16, source_machine_id: &NodeId, ) { - // Remove old events - let now = chrono::Utc::now(); - let happened_before = now - TimeDelta::try_minutes(5).unwrap(); - purge_events( - source_machine_id, - MachineCycledEventType::event_type_id(ns), - address_space, - &happened_before, - ); - - let machine_name = if let Some(node) = address_space.find_node(source_machine_id) { - format!("{}", node.as_node().display_name().text) - } else { - "Machine ???".to_string() + let machine_name = { + let address_space = manager.address_space(); + let address_space_lck = address_space.read(); + if let Some(node) = address_space_lck.find_node(source_machine_id) { + format!("{}", node.as_node().display_name().text) + } else { + "Machine ???".to_string() + } }; // New event - let event_node_id = NodeId::next_numeric(ns); - let event_id = MACHINE_CYCLED_EVENT_ID.fetch_add(1, Ordering::Relaxed); - let event_name = format!("Event{}", event_id); let now = DateTime::now(); - let mut event = MachineCycledEventType::new( - &machine_name, - ns, - &event_node_id, - event_name.clone(), - event_name, - machine_events_folder_id(ns), - source_machine_id, - now, - ); + let event = MachineCycledEventType::new(&machine_name, ns, source_machine_id, now); // create an event object in a folder with the - let _ = event.raise(address_space); + + subscriptions.notify_events([(&event as &dyn Event, &ObjectId::Server.into())].into_iter()); } fn increment_counter( - address_space: &mut AddressSpace, + manager: &SimpleNodeManager, + subscriptions: &SubscriptionCache, ns: u16, machine_counter: Arc, machine_id: &NodeId, @@ -259,7 +268,7 @@ fn increment_counter( } else { if raise_event { // Raise new event - raise_machine_cycled_event(address_space, ns, machine_id); + raise_machine_cycled_event(manager, subscriptions, ns, machine_id); } 0 }; diff --git a/samples/demo-server/src/main.rs b/samples/demo-server/src/main.rs index d4ff0f395..807eea3e3 100644 --- a/samples/demo-server/src/main.rs +++ b/samples/demo-server/src/main.rs @@ -20,13 +20,16 @@ extern crate lazy_static; #[macro_use] extern crate log; -use std::{path::PathBuf, sync::Arc}; +use std::path::PathBuf; -use opcua::server::{http, prelude::*}; -use opcua::sync::RwLock; +use tokio; + +use opcua::server::{ + node_manager::memory::{simple_node_manager, NamespaceMetadata, SimpleNodeManager}, + ServerBuilder, +}; mod control; -mod historical; mod machine; mod methods; mod scalar; @@ -105,7 +108,8 @@ Usage: } } -fn main() { +#[tokio::main] +async fn main() { let args = Args::parse_args().unwrap(); if args.help { Args::usage(); @@ -114,61 +118,52 @@ fn main() { log4rs::init_file("log4rs.yaml", Default::default()).unwrap(); // Create an OPC UA server with sample configuration and default node set - let mut server = Server::new(ServerConfig::load(&args.config_path).unwrap()); + let (server, handle) = ServerBuilder::new() + .with_config_from(&args.config_path) + .with_node_manager(simple_node_manager( + NamespaceMetadata { + namespace_uri: "urn:DemoServer".to_owned(), + ..Default::default() + }, + "demo", + )) + .build() + .unwrap(); - let ns = { - let address_space = server.address_space(); - let mut address_space = address_space.write(); - address_space.register_namespace("urn:demo-server").unwrap() - }; + let node_manager = handle + .node_managers() + .get_of_type::() + .unwrap(); + let ns = handle.get_namespace_index("urn:DemoServer").unwrap(); + + let token = handle.token(); // Add some objects representing machinery - machine::add_machinery(&mut server, ns, args.raise_events); + machine::add_machinery( + ns, + node_manager.clone(), + handle.subscriptions().clone(), + args.raise_events, + token.clone(), + ); // Add some scalar variables - scalar::add_scalar_variables(&mut server, ns); + scalar::add_scalar_variables(node_manager.clone(), handle.subscriptions().clone(), ns); // Add some rapidly changing values - scalar::add_stress_variables(&mut server, ns); + scalar::add_stress_variables(node_manager.clone(), handle.subscriptions().clone(), ns); // Add some control switches, e.g. abort flag - control::add_control_switches(&mut server, ns); + control::add_control_switches( + ns, + node_manager.clone(), + handle.subscriptions().clone(), + token.clone(), + ); // Add some methods - methods::add_methods(&mut server, ns); + methods::add_methods(node_manager, ns); - // Add historical data providers - historical::add_providers(&mut server); - - // OPCUA and Actix are sharing tokio runtime, so create it first - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .unwrap(); - - // Start the http server, used for metrics - start_http_server(&runtime, &server, args.content_path.to_str().unwrap()); - - // Run the server. This does not ordinarily exit so you must Ctrl+C to terminate - Server::run_server_on_runtime( - runtime, - Server::new_server_task(Arc::new(RwLock::new(server))), - true, - ); + server.run().await.unwrap(); } } - -fn start_http_server(runtime: &tokio::runtime::Runtime, server: &Server, content_path: &str) { - let server_state = server.server_state(); - let connections = server.connections(); - let metrics = server.server_metrics(); - // The index.html is in a path relative to the working dir. - let _ = http::run_http_server( - runtime, - "127.0.0.1:8585", - content_path, - server_state, - connections, - metrics, - ); -} diff --git a/samples/demo-server/src/methods.rs b/samples/demo-server/src/methods.rs index 3efbacce2..4f1c935d6 100644 --- a/samples/demo-server/src/methods.rs +++ b/samples/demo-server/src/methods.rs @@ -6,13 +6,16 @@ use std::sync::Arc; -use opcua::server::{ - address_space::method::MethodBuilder, callbacks, prelude::*, session::SessionManager, +use opcua::{ + server::{ + address_space::{EventNotifier, MethodBuilder, ObjectBuilder}, + node_manager::memory::SimpleNodeManager, + }, + types::{DataTypeId, NodeId, ObjectId, StatusCode, Variant}, }; -use opcua::sync::RwLock; -pub fn add_methods(server: &mut Server, ns: u16) { - let address_space = server.address_space(); +pub fn add_methods(manager: Arc, ns: u16) { + let address_space = manager.address_space(); let mut address_space = address_space.write(); let object_id = NodeId::new(ns, "Functions"); @@ -25,161 +28,77 @@ pub fn add_methods(server: &mut Server, ns: u16) { let fn_node_id = NodeId::new(ns, "NoOp"); MethodBuilder::new(&fn_node_id, "NoOp", "NoOp") .component_of(object_id.clone()) - .callback(Box::new(NoOp)) + .executable(true) + .user_executable(true) .insert(&mut address_space); + manager.inner().add_method_callback(fn_node_id, |_| { + debug!("NoOp method called"); + Ok(Vec::new()) + }); // HelloWorld has 0 inputs and 1 output - returns "Hello World" in a result parameter let fn_node_id = NodeId::new(ns, "HelloWorld"); MethodBuilder::new(&fn_node_id, "HelloWorld", "HelloWorld") .component_of(object_id.clone()) - .output_args(&mut address_space, &[("Result", DataTypeId::String).into()]) - .callback(Box::new(HelloWorld)) + .executable(true) + .user_executable(true) + .output_args( + &mut address_space, + &NodeId::new(ns, "HelloWorldOutput"), + &[("Result", DataTypeId::String).into()], + ) .insert(&mut address_space); + manager.inner().add_method_callback(fn_node_id, |_| { + debug!("HelloWorld method called"); + Ok(vec![Variant::from("Hello World!".to_owned())]) + }); // HelloX has 1 one input and 1 output - "Hello Foo" in a result parameter let fn_node_id = NodeId::new(ns, "HelloX"); MethodBuilder::new(&fn_node_id, "HelloX", "HelloX") .component_of(object_id.clone()) + .executable(true) + .user_executable(true) .input_args( &mut address_space, + &NodeId::new(ns, "HelloXInput"), &[("YourName", DataTypeId::String).into()], ) - .output_args(&mut address_space, &[("Result", DataTypeId::String).into()]) - .callback(Box::new(HelloX)) + .output_args( + &mut address_space, + &NodeId::new(ns, "HelloXOutput"), + &[("Result", DataTypeId::String).into()], + ) .insert(&mut address_space); + manager.inner().add_method_callback(fn_node_id, |args| { + // We don't actually need to do much validation here, since it should all have happened elsewhere, + // but we don't want to panic if something goes wrong. + let Some(Variant::String(s)) = args.get(0) else { + return Err(StatusCode::BadTypeMismatch); + }; + + Ok(vec![Variant::String( + format!("Hello {}!", s.as_ref()).into(), + )]) + }); // Boop has 1 one input and 0 output let fn_node_id = NodeId::new(ns, "Boop"); MethodBuilder::new(&fn_node_id, "Boop", "Boop") .component_of(object_id.clone()) - .input_args(&mut address_space, &[("Ping", DataTypeId::String).into()]) - .callback(Box::new(Boop)) + .executable(true) + .user_executable(true) + .input_args( + &mut address_space, + &NodeId::new(ns, "BoopInput"), + &[("Ping", DataTypeId::String).into()], + ) .insert(&mut address_space); -} - -struct NoOp; - -impl callbacks::Method for NoOp { - fn call( - &mut self, - _session_id: &NodeId, - _session_map: Arc>, - _request: &CallMethodRequest, - ) -> Result { - debug!("NoOp method called"); - Ok(CallMethodResult { - status_code: StatusCode::Good, - input_argument_results: None, - input_argument_diagnostic_infos: None, - output_arguments: None, - }) - } -} -struct Boop; - -impl callbacks::Method for Boop { - fn call( - &mut self, - _session_id: &NodeId, - _session_map: Arc>, - request: &CallMethodRequest, - ) -> Result { - // Validate input to be a string - debug!("Boop method called"); - let in1_status = if let Some(ref input_arguments) = request.input_arguments { - if let Some(in1) = input_arguments.first() { - if let Variant::String(_) = in1 { - StatusCode::Good - } else { - StatusCode::BadInvalidArgument - } - } else if input_arguments.is_empty() { - return Err(StatusCode::BadArgumentsMissing); - } else { - // Shouldn't get here because there is 1 argument - return Err(StatusCode::BadTooManyArguments); - } - } else { - return Err(StatusCode::BadArgumentsMissing); - }; - - let status_code = if in1_status.is_good() { - StatusCode::Good - } else { - StatusCode::BadInvalidArgument + manager.inner().add_method_callback(fn_node_id, |args| { + let Some(Variant::String(_)) = args.get(0) else { + return Err(StatusCode::BadInvalidArgument); }; - - Ok(CallMethodResult { - status_code, - input_argument_results: Some(vec![in1_status]), - input_argument_diagnostic_infos: None, - output_arguments: None, - }) - } -} - -struct HelloWorld; - -impl callbacks::Method for HelloWorld { - fn call( - &mut self, - _session_id: &NodeId, - _session_map: Arc>, - _request: &CallMethodRequest, - ) -> Result { - debug!("HelloWorld method called"); - let message = format!("Hello World!"); - Ok(CallMethodResult { - status_code: StatusCode::Good, - input_argument_results: None, - input_argument_diagnostic_infos: None, - output_arguments: Some(vec![Variant::from(message)]), - }) - } -} - -struct HelloX; - -impl callbacks::Method for HelloX { - fn call( - &mut self, - _session_id: &NodeId, - _session_map: Arc>, - request: &CallMethodRequest, - ) -> Result { - debug!("HelloX method called"); - // Validate input to be a string - let mut out1 = Variant::Empty; - let in1_status = if let Some(ref input_arguments) = request.input_arguments { - if let Some(in1) = input_arguments.first() { - if let Variant::String(in1) = in1 { - out1 = Variant::from(format!("Hello {}!", &in1)); - StatusCode::Good - } else { - StatusCode::BadTypeMismatch - } - } else if input_arguments.is_empty() { - return Err(StatusCode::BadArgumentsMissing); - } else { - // Shouldn't get here because there is 1 argument - return Err(StatusCode::BadTooManyArguments); - } - } else { - return Err(StatusCode::BadArgumentsMissing); - }; - - let status_code = if in1_status.is_good() { - StatusCode::Good - } else { - StatusCode::BadInvalidArgument - }; - - Ok(CallMethodResult { - status_code, - input_argument_results: Some(vec![in1_status]), - input_argument_diagnostic_infos: None, - output_arguments: Some(vec![out1]), - }) - } + Ok(Vec::new()) + }); } diff --git a/samples/demo-server/src/scalar.rs b/samples/demo-server/src/scalar.rs index 5e5989100..910cbc512 100644 --- a/samples/demo-server/src/scalar.rs +++ b/samples/demo-server/src/scalar.rs @@ -2,33 +2,48 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2024 Adam Lock +use std::sync::Arc; + +use opcua::server::address_space::VariableBuilder; +use opcua::server::node_manager::memory::SimpleNodeManager; +use opcua::server::SubscriptionCache; use rand::distributions::Alphanumeric; use rand::Rng; -use opcua::server::prelude::*; - -pub fn add_scalar_variables(server: &mut Server, ns: u16) { - let (static_folder_id, dynamic_folder_id) = { - let address_space = server.address_space(); +use opcua::types::*; + +pub fn add_scalar_variables( + manager: Arc, + subscriptions: Arc, + ns: u16, +) { + let static_folder_id = NodeId::new(ns, "static"); + let dynamic_folder_id = NodeId::new(ns, "dynamic"); + { + let address_space = manager.address_space(); let mut address_space = address_space.write(); - ( - address_space - .add_folder("Static", "Static", &NodeId::objects_folder_id()) - .unwrap(), - address_space - .add_folder("Dynamic", "Dynamic", &NodeId::objects_folder_id()) - .unwrap(), - ) + address_space.add_folder( + &static_folder_id, + "Static", + "Static", + &NodeId::objects_folder_id(), + ); + address_space.add_folder( + &dynamic_folder_id, + "Dynamic", + "Dynamic", + &NodeId::objects_folder_id(), + ); }; // Add static scalar values - add_static_scalar_variables(server, ns, &static_folder_id); - add_static_array_variables(server, ns, &static_folder_id); + add_static_scalar_variables(&manager, ns, &static_folder_id); + add_static_array_variables(&manager, ns, &static_folder_id); // Add dynamically changing scalar values - add_dynamic_scalar_variables(server, ns, &dynamic_folder_id); - add_dynamic_array_variables(server, ns, &dynamic_folder_id); - set_dynamic_timers(server, ns); + add_dynamic_scalar_variables(&manager, ns, &dynamic_folder_id); + add_dynamic_array_variables(&manager, ns, &dynamic_folder_id); + set_dynamic_timers(manager, ns, subscriptions); } const SCALAR_TYPES: [DataTypeId; 14] = [ @@ -167,15 +182,14 @@ pub fn scalar_random_value(id: DataTypeId) -> Variant { } /// Creates some sample variables, and some push / pull examples that update them -fn add_static_scalar_variables(server: &mut Server, ns: u16, static_folder_id: &NodeId) { +fn add_static_scalar_variables(manager: &SimpleNodeManager, ns: u16, static_folder_id: &NodeId) { // The address space is guarded so obtain a lock to change it - let address_space = server.address_space(); + let address_space = manager.address_space(); let mut address_space = address_space.write(); // Create a folder under static folder - let folder_id = address_space - .add_folder("Scalar", "Scalar", static_folder_id) - .unwrap(); + let scalar_folder_id = NodeId::new(ns, "static_scalar"); + address_space.add_folder(&scalar_folder_id, "Scalar", "Scalar", &static_folder_id); for sn in SCALAR_TYPES.iter() { let name = scalar_name(*sn); @@ -183,21 +197,20 @@ fn add_static_scalar_variables(server: &mut Server, ns: u16, static_folder_id: & VariableBuilder::new(&node_id, name, name) .data_type(sn) .value(scalar_default_value(*sn)) - .organized_by(&folder_id) + .organized_by(&scalar_folder_id) .writable() .insert(&mut address_space); } } -fn add_static_array_variables(server: &mut Server, ns: u16, static_folder_id: &NodeId) { +fn add_static_array_variables(manager: &SimpleNodeManager, ns: u16, static_folder_id: &NodeId) { // The address space is guarded so obtain a lock to change it - let address_space = server.address_space(); + let address_space = manager.address_space(); let mut address_space = address_space.write(); // Create a folder under static folder - let folder_id = address_space - .add_folder("Array", "Array", static_folder_id) - .unwrap(); + let array_folder_id = NodeId::new(ns, "static_array"); + address_space.add_folder(&array_folder_id, "Array", "Array", &static_folder_id); SCALAR_TYPES.iter().for_each(|sn| { let node_id = scalar_node_id(ns, *sn, false, true); @@ -211,21 +224,20 @@ fn add_static_array_variables(server: &mut Server, ns: u16, static_folder_id: &N .data_type(*sn) .value_rank(1) .value((value_type, values)) - .organized_by(&folder_id) + .organized_by(&array_folder_id) .writable() .insert(&mut address_space); }); } -fn add_dynamic_scalar_variables(server: &mut Server, ns: u16, dynamic_folder_id: &NodeId) { +fn add_dynamic_scalar_variables(manager: &SimpleNodeManager, ns: u16, dynamic_folder_id: &NodeId) { // The address space is guarded so obtain a lock to change it - let address_space = server.address_space(); + let address_space = manager.address_space(); let mut address_space = address_space.write(); // Create a folder under static folder - let folder_id = address_space - .add_folder("Scalar", "Scalar", dynamic_folder_id) - .unwrap(); + let scalar_folder_id = NodeId::new(ns, "dynamic_scalar"); + address_space.add_folder(&scalar_folder_id, "Scalar", "Scalar", &dynamic_folder_id); SCALAR_TYPES.iter().for_each(|sn| { let node_id = scalar_node_id(ns, *sn, true, false); @@ -233,20 +245,19 @@ fn add_dynamic_scalar_variables(server: &mut Server, ns: u16, dynamic_folder_id: VariableBuilder::new(&node_id, name, name) .data_type(*sn) .value(scalar_default_value(*sn)) - .organized_by(&folder_id) + .organized_by(&scalar_folder_id) .insert(&mut address_space); }); } -fn add_dynamic_array_variables(server: &mut Server, ns: u16, dynamic_folder_id: &NodeId) { +fn add_dynamic_array_variables(manager: &SimpleNodeManager, ns: u16, dynamic_folder_id: &NodeId) { // The address space is guarded so obtain a lock to change it - let address_space = server.address_space(); + let address_space = manager.address_space(); let mut address_space = address_space.write(); // Create a folder under static folder - let folder_id = address_space - .add_folder("Array", "Array", dynamic_folder_id) - .unwrap(); + let array_folder_id = NodeId::new(ns, "dynamic_array"); + address_space.add_folder(&array_folder_id, "Array", "Array", &dynamic_folder_id); SCALAR_TYPES.iter().for_each(|sn| { let node_id = scalar_node_id(ns, *sn, true, true); @@ -259,72 +270,99 @@ fn add_dynamic_array_variables(server: &mut Server, ns: u16, dynamic_folder_id: .data_type(*sn) .value_rank(1) .value((value_type, values)) - .organized_by(&folder_id) + .organized_by(&array_folder_id) .insert(&mut address_space); }); } -fn set_dynamic_timers(server: &mut Server, ns: u16) { - let address_space = server.address_space(); - +fn set_dynamic_timers( + manager: Arc, + ns: u16, + subscriptions: Arc, +) { // Standard change timers - server.add_polling_action(250, move || { - let mut address_space = address_space.write(); - // Scalar - let now = DateTime::now(); - SCALAR_TYPES.iter().for_each(|sn| { - let node_id = scalar_node_id(ns, *sn, true, false); - let _ = address_space.set_variable_value_by_ref( - &node_id, - scalar_random_value(*sn), - &now, - &now, - ); - - let node_id = scalar_node_id(ns, *sn, true, true); - let values = (0..10) - .map(|_| scalar_random_value(*sn)) - .collect::>(); - let value_type = values.first().unwrap().type_id(); - let _ = - address_space.set_variable_value_by_ref(&node_id, (value_type, values), &now, &now); - }); + tokio::task::spawn(async move { + let mut interval = tokio::time::interval(std::time::Duration::from_millis(250)); + + loop { + interval.tick().await; + + let now = DateTime::now(); + for sn in SCALAR_TYPES { + let sc_node_id = scalar_node_id(ns, sn, true, false); + let scalar_val = DataValue::new_at(scalar_random_value(sn), now); + + let arr_node_id = scalar_node_id(ns, sn, true, true); + let arr = (0..10).map(|_| scalar_random_value(sn)).collect::>(); + let type_id = arr[0].type_id(); + let array_val = DataValue::new_at(Array::new(type_id, arr).unwrap(), now); + + manager + .set_values( + &subscriptions, + [ + (&sc_node_id, None, scalar_val), + (&arr_node_id, None, array_val), + ] + .into_iter(), + ) + .unwrap(); + } + } }); } -pub fn add_stress_variables(server: &mut Server, ns: u16) { +pub fn add_stress_variables( + manager: Arc, + subscriptions: Arc, + ns: u16, +) { let node_ids = (0..1000) .map(|i| NodeId::new(ns, format!("v{:04}", i))) .collect::>(); - let address_space = server.address_space(); - let mut address_space = address_space.write(); + { + let address_space = manager.address_space(); + let mut address_space = address_space.write(); - let folder_id = address_space - .add_folder("Stress", "Stress", &NodeId::objects_folder_id()) - .unwrap(); + let folder_id = NodeId::new(ns, "stress"); + address_space.add_folder(&folder_id, "Stress", "Stress", &NodeId::objects_folder_id()); - node_ids.iter().enumerate().for_each(|(i, node_id)| { - let name = format!("v{:04}", i); - VariableBuilder::new(node_id, &name, &name) - .data_type(DataTypeId::Int32) - .value(0i32) - .organized_by(&folder_id) - .insert(&mut address_space); - }); + node_ids.iter().enumerate().for_each(|(i, node_id)| { + let name = format!("v{:04}", i); + VariableBuilder::new(&node_id, &name, &name) + .data_type(DataTypeId::Int32) + .value(0i32) + .organized_by(&folder_id) + .insert(&mut address_space); + }); + } - set_stress_timer(server, node_ids); + set_stress_timer(manager, subscriptions, node_ids); } -fn set_stress_timer(server: &mut Server, node_ids: Vec) { - let address_space = server.address_space(); - server.add_polling_action(100, move || { - let mut rng = rand::thread_rng(); - let mut address_space = address_space.write(); - let now = DateTime::now(); - node_ids.iter().for_each(|node_id| { - let value: Variant = rng.gen::().into(); - let _ = address_space.set_variable_value_by_ref(node_id, value, &now, &now); - }); +fn set_stress_timer( + manager: Arc, + subscriptions: Arc, + node_ids: Vec, +) { + // Update 1000 variables. + // Note that for large servers you will typically want to avoid using the simple node manager in this way, + // instead using callbacks. + tokio::task::spawn(async move { + let mut interval = tokio::time::interval(std::time::Duration::from_millis(100)); + loop { + interval.tick().await; + let mut rng = rand::thread_rng(); + let now = DateTime::now(); + manager + .set_values( + &subscriptions, + node_ids + .iter() + .map(|id| (id, None, DataValue::new_at(rng.gen::(), now))), + ) + .unwrap(); + } }); } diff --git a/samples/discovery-client/src/main.rs b/samples/discovery-client/src/main.rs index 01be91dd6..0c74ffe17 100644 --- a/samples/discovery-client/src/main.rs +++ b/samples/discovery-client/src/main.rs @@ -55,7 +55,7 @@ async fn main() -> Result<(), ()> { opcua::console_logging::init(); // The client API has a simple `find_servers` function that connects and returns servers for us. - let mut client = Client::new(ClientConfig::new("DiscoveryClient", "urn:DiscoveryClient")); + let client = Client::new(ClientConfig::new("DiscoveryClient", "urn:DiscoveryClient")); match client.find_servers(url).await { Ok(servers) => { println!("Discovery server responded with {} servers:", servers.len()); diff --git a/samples/server.conf b/samples/server.conf index ffac355aa..f7b66ba4e 100644 --- a/samples/server.conf +++ b/samples/server.conf @@ -16,9 +16,9 @@ tcp_config: limits: clients_can_modify_address_space: false max_subscriptions: 100 - max_monitored_items_per_sub: 1000 + max_monitored_items_per_sub: 0 max_monitored_item_queue_size: 10 - max_array_length: 1000 + max_array_length: 100000 max_string_length: 65535 max_byte_string_length: 65535 min_sampling_interval: 0.1 diff --git a/samples/simple-client/src/main.rs b/samples/simple-client/src/main.rs index 639a6effa..9e518a3b7 100644 --- a/samples/simple-client/src/main.rs +++ b/samples/simple-client/src/main.rs @@ -45,7 +45,7 @@ Usage: } } -const DEFAULT_URL: &str = "opc.tcp://localhost:4855"; +const DEFAULT_URL: &str = "opc.tcp://127.0.0.1:4855"; #[tokio::main] async fn main() -> Result<(), ()> { @@ -116,7 +116,7 @@ async fn subscribe_to_variables(session: Arc, ns: u16) -> Result<(), St println!("Created a subscription with id = {}", subscription_id); // Create some monitored items - let items_to_create: Vec = ["v1", "v2", "v3", "v4"] + let items_to_create: Vec = ["v1", "v2", "v3", "v4", "v5"] .iter() .map(|v| NodeId::new(ns, *v).into()) .collect(); diff --git a/samples/simple-server/Cargo.toml b/samples/simple-server/Cargo.toml index 1c176db00..6f982e585 100644 --- a/samples/simple-server/Cargo.toml +++ b/samples/simple-server/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" [dependencies] chrono = "0.4" log = "0.4" +tokio = { version = "1.38.0", features = ["full"] } [dependencies.opcua] path = "../../lib" diff --git a/samples/simple-server/src/main.rs b/samples/simple-server/src/main.rs index 2cc624542..8d727637f 100644 --- a/samples/simple-server/src/main.rs +++ b/samples/simple-server/src/main.rs @@ -5,53 +5,77 @@ //! This is a simple server for OPC UA. Our sample creates a server with the default settings //! adds some variables to the address space and the listeners for connections. It also has //! a timer that updates those variables so anything monitoring variables sees the values changing. -use std::path::PathBuf; +use std::sync::atomic::{AtomicBool, AtomicI32, Ordering}; use std::sync::Arc; - -use opcua::server::prelude::*; -use opcua::sync::Mutex; - -fn main() { +use std::time::{Duration, Instant}; + +use opcua::server::address_space::Variable; +use opcua::server::node_manager::memory::{ + simple_node_manager, InMemoryNodeManager, NamespaceMetadata, SimpleNodeManager, + SimpleNodeManagerImpl, +}; +use opcua::server::{ServerBuilder, SubscriptionCache}; +use opcua::types::{DataValue, NodeId, UAString}; + +#[tokio::main] +async fn main() { // This enables logging via env_logger & log crate macros. If you don't need logging or want // to implement your own, omit this line. opcua::console_logging::init(); // Create an OPC UA server with sample configuration and default node set - let mut server = Server::new(ServerConfig::load(&PathBuf::from("../server.conf")).unwrap()); - let ns = { - let address_space = server.address_space(); - let mut address_space = address_space.write(); - address_space - .register_namespace("urn:simple-server") - .unwrap() - }; + let (server, handle) = ServerBuilder::new() + .with_config_from("../server.conf") + .with_node_manager(simple_node_manager( + NamespaceMetadata { + namespace_uri: "urn:SimpleServer".to_owned(), + ..Default::default() + }, + "simple", + )) + .build() + .unwrap(); + let node_manager = handle + .node_managers() + .get_of_type::() + .unwrap(); + let ns = handle.get_namespace_index("urn:SimpleServer").unwrap(); // Add some variables of our own - add_example_variables(&mut server, ns); + add_example_variables(ns, node_manager, handle.subscriptions().clone()); // Run the server. This does not ordinarily exit so you must Ctrl+C to terminate - server.run(); + server.run().await.unwrap(); } /// Creates some sample variables, and some push / pull examples that update them -fn add_example_variables(server: &mut Server, ns: u16) { +fn add_example_variables( + ns: u16, + manager: Arc>, + subscriptions: Arc, +) { // These will be the node ids of the new variables let v1_node = NodeId::new(ns, "v1"); let v2_node = NodeId::new(ns, "v2"); let v3_node = NodeId::new(ns, "v3"); let v4_node = NodeId::new(ns, "v4"); + let v5_node = NodeId::new(ns, "v5"); - let address_space = server.address_space(); + let address_space = manager.address_space(); // The address space is guarded so obtain a lock to change it { let mut address_space = address_space.write(); // Create a sample folder under objects folder - let sample_folder_id = address_space - .add_folder("Sample", "Sample", &NodeId::objects_folder_id()) - .unwrap(); + let sample_folder_id = NodeId::new(ns, "folder"); + address_space.add_folder( + &sample_folder_id, + "Sample", + "Sample", + &NodeId::objects_folder_id(), + ); // Add some variables to our sample folder. Values will be overwritten by the timer let _ = address_space.add_variables( @@ -60,66 +84,73 @@ fn add_example_variables(server: &mut Server, ns: u16) { Variable::new(&v2_node, "v2", "v2", false), Variable::new(&v3_node, "v3", "v3", UAString::from("")), Variable::new(&v4_node, "v4", "v4", 0f64), + Variable::new(&v5_node, "v5", "v5", "Static Value"), ], &sample_folder_id, ); } - // OPC UA for Rust allows you to push or pull values from a variable so here are examples - // of each method. - - // 1) Pull. This code will add getters to v3 & v4 that returns their values by calling - // function. + // Depending on your choice of node manager, you can use different methods to provide the value of a node. + // The simple node manager lets you set dynamic getters: { - let address_space = server.address_space(); - let mut address_space = address_space.write(); - if let Some(ref mut v) = address_space.find_variable_mut(v3_node.clone()) { - // Hello world's counter will increment with each get - slower interval == slower increment - let mut counter = 0; - let getter = AttrFnGetter::new( - move |_, _, _, _, _, _| -> Result, StatusCode> { - counter += 1; - Ok(Some(DataValue::new_now(UAString::from(format!( - "Hello World times {}", - counter - ))))) - }, - ); - v.set_value_getter(Arc::new(Mutex::new(getter))); - } - - if let Some(ref mut v) = address_space.find_variable_mut(v4_node.clone()) { - // Sine wave draws 2*PI over course of 10 seconds - use chrono::Utc; - use std::f64::consts; - let start_time = Utc::now(); - let getter = AttrFnGetter::new( - move |_, _, _, _, _, _| -> Result, StatusCode> { - let elapsed = Utc::now() - .signed_duration_since(start_time) - .num_milliseconds(); - let moment = (elapsed % 10000) as f64 / 10000.0; - Ok(Some(DataValue::new_now((2.0 * consts::PI * moment).sin()))) - }, - ); - v.set_value_getter(Arc::new(Mutex::new(getter))); - } + let counter = AtomicI32::new(0); + manager + .inner() + .add_read_callback(v3_node.clone(), move |_, _, _| { + Ok(DataValue::new_now(UAString::from(format!( + "Hello World times {}", + counter.fetch_add(1, Ordering::Relaxed) + )))) + }); + + let start_time = Instant::now(); + manager + .inner() + .add_read_callback(v4_node.clone(), move |_, _, _| { + let elapsed = (Instant::now() - start_time).as_millis(); + let moment = (elapsed % 10_000) as f64 / 10_000.0; + Ok(DataValue::new_now( + (2.0 * std::f64::consts::PI * moment).sin(), + )) + }); } - // 2) Push. This code will use a timer to set the values on variable v1 & v2 on an interval. - // Note you can use any kind of timer callback that you like for this. The library - // contains a simple add_polling_action for your convenience. + // Alternatively, you can set the value in the node manager on a timer. + // This is typically a better choice if updates are relatively rare, and you always know when + // an update occurs. Fundamentally, the server is event-driven. When using a getter like above, + // the node manager will sample the value if a user subscribes to it. When publishing a value like below, + // clients will only be notified when a change actually happens, but we will need to store each new value. + + // Typically, you will use a getter or a custom node manager for dynamic values, and direct modification for + // properties or other less-commonly changing values. { // Store a counter and a flag in a tuple - let data = Arc::new(Mutex::new((0, true))); - server.add_polling_action(300, move || { - let mut data = data.lock(); - data.0 += 1; - data.1 = !data.1; - let mut address_space = address_space.write(); - let now = DateTime::now(); - let _ = address_space.set_variable_value(v1_node.clone(), data.0, &now, &now); - let _ = address_space.set_variable_value(v2_node.clone(), data.1, &now, &now); + let counter = AtomicI32::new(0); + let flag = AtomicBool::new(false); + tokio::task::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_millis(300)); + loop { + interval.tick().await; + + manager + .set_values( + &subscriptions, + [ + ( + &v1_node, + None, + DataValue::new_now(counter.fetch_add(1, Ordering::Relaxed)), + ), + ( + &v2_node, + None, + DataValue::new_now(flag.fetch_xor(true, Ordering::Relaxed)), + ), + ] + .into_iter(), + ) + .unwrap(); + } }); } }