From cbae5d968d30f3cb950fbfa27c6e3aaf85d9f45a Mon Sep 17 00:00:00 2001 From: puffyCid <16283453+puffyCid@users.noreply.github.com> Date: Wed, 27 Sep 2023 06:09:57 +0000 Subject: [PATCH] Server uploads (#71) --- .../unreleased/Added-20230919-004918.yaml | 3 + .../Dependencies-20230919-003807.yaml | 3 + Cargo.lock | 145 ++++++----- server/Cargo.toml | 3 +- server/src/artifacts/enrollment.rs | 4 +- server/src/artifacts/jobs.rs | 35 ++- server/src/db/endpoints.rs | 227 ------------------ server/src/db/error.rs | 36 --- server/src/db/jobs.rs | 128 ---------- server/src/db/tables.rs | 227 ------------------ server/src/enrollment/enroll.rs | 95 ++++---- server/src/enrollment/uris.rs | 26 +- server/src/filestore/endpoints.rs | 123 ++++++++++ server/src/filestore/error.rs | 28 +++ server/src/filestore/jobs.rs | 213 ++++++++++++++++ server/src/{db => filestore}/mod.rs | 1 - server/src/lib.rs | 3 +- server/src/routes.rs | 33 ++- server/src/server.rs | 40 +-- server/src/socket/command.rs | 127 ++++++++++ server/src/socket/heartbeat.rs | 66 ++++- server/src/socket/mod.rs | 1 + server/src/socket/uris.rs | 25 +- server/src/socket/websocket.rs | 207 ++++++++++++---- server/src/uploads/mod.rs | 2 + server/src/uploads/upload.rs | 199 +++++++++++++++ server/src/uploads/uris.rs | 53 ++++ server/src/utils/config.rs | 45 +++- server/src/utils/error.rs | 4 +- server/src/utils/filesystem.rs | 126 ++++++++-- .../enroll.json | 8 + .../heartbeat.jsonl | 10 + .../jobs.json | 15 ++ server/tests/test_data/jobs.redb | Bin 1081344 -> 1589248 bytes 34 files changed, 1347 insertions(+), 914 deletions(-) create mode 100644 .changes/unreleased/Added-20230919-004918.yaml create mode 100644 .changes/unreleased/Dependencies-20230919-003807.yaml delete mode 100644 server/src/db/endpoints.rs delete mode 100644 server/src/db/error.rs delete mode 100644 server/src/db/jobs.rs delete mode 100644 server/src/db/tables.rs create mode 100644 server/src/filestore/endpoints.rs create mode 100644 server/src/filestore/error.rs create mode 100644 server/src/filestore/jobs.rs rename server/src/{db => filestore}/mod.rs (71%) create mode 100644 server/src/socket/command.rs create mode 100644 server/src/uploads/mod.rs create mode 100644 server/src/uploads/upload.rs create mode 100644 server/src/uploads/uris.rs create mode 100644 server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/enroll.json create mode 100644 server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/heartbeat.jsonl create mode 100644 server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/jobs.json diff --git a/.changes/unreleased/Added-20230919-004918.yaml b/.changes/unreleased/Added-20230919-004918.yaml new file mode 100644 index 00000000..4a31d859 --- /dev/null +++ b/.changes/unreleased/Added-20230919-004918.yaml @@ -0,0 +1,3 @@ +kind: Added +body: Server upload support for compressed jsonl data. Also more async code. +time: 2023-09-19T00:49:18.961877-04:00 diff --git a/.changes/unreleased/Dependencies-20230919-003807.yaml b/.changes/unreleased/Dependencies-20230919-003807.yaml new file mode 100644 index 00000000..bf50e5e7 --- /dev/null +++ b/.changes/unreleased/Dependencies-20230919-003807.yaml @@ -0,0 +1,3 @@ +kind: Dependencies +body: Removed redb +time: 2023-09-19T00:38:07.949505-04:00 diff --git a/Cargo.lock b/Cargo.lock index aca231ef..2de0b2da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -30,9 +30,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.5" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c378d78423fdad8089616f827526ee33c19f2fddbd5de1629152c9593ba4783" +checksum = "0f2135563fb5c609d2b2b87c1e8ce7bc41b0b45430fa9661f457981503dd5bf0" dependencies = [ "memchr", ] @@ -342,7 +342,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -376,6 +376,7 @@ dependencies = [ "matchit", "memchr", "mime", + "multer", "percent-encoding", "pin-project-lite", "rustversion", @@ -523,9 +524,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "bytecount" @@ -611,9 +612,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.30" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ "android-tzdata", "iana-time-zone", @@ -653,9 +654,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.3" +version = "4.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84ed82781cea27b43c9b106a979fe450a13a31aab0500595fb3fc06616de08e6" +checksum = "b1d7b8d5ec32af0fadc644bf1fd509a688c2103b185644bb1e29d164e0703136" dependencies = [ "clap_builder", "clap_derive", @@ -663,9 +664,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.2" +version = "4.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" +checksum = "5179bb514e4d7c2051749d8fcefa2ed6d06a9f4e6d69faf3805f5d80b8cf8d56" dependencies = [ "anstream", "anstyle", @@ -682,7 +683,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -858,9 +859,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.65+curl-8.2.1" +version = "0.4.66+curl-8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961ba061c9ef2fe34bbd12b807152d96f0badd2bebe7b90ce6c8c8b7572a0986" +checksum = "70c44a72e830f0e40ad90dda8a6ab6ed6314d39776599a58a2e5e37fbc6db5b9" dependencies = [ "cc", "libc", @@ -869,7 +870,7 @@ dependencies = [ "openssl-sys", "pkg-config", "vcpkg", - "winapi", + "windows-sys", ] [[package]] @@ -905,7 +906,7 @@ checksum = "3c65c2ffdafc1564565200967edc4851c7b55422d3913466688907efd05ea26f" dependencies = [ "deno-proc-macro-rules-macros", "proc-macro2", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -917,7 +918,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -963,7 +964,7 @@ dependencies = [ "regex", "strum", "strum_macros 0.25.2", - "syn 2.0.32", + "syn 2.0.37", "thiserror", ] @@ -1047,7 +1048,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -1152,7 +1153,7 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -1372,7 +1373,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -1867,9 +1868,9 @@ checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "libnghttp2-sys" @@ -2043,6 +2044,24 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "multer" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01acbdc23469fd8fe07ab135923371d5f5a422fbf9c522158677c8eb15bc51c2" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http", + "httparse", + "log", + "memchr", + "mime", + "spin 0.9.8", + "version_check", +] + [[package]] name = "native-tls" version = "0.2.11" @@ -2226,7 +2245,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -2346,7 +2365,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -2417,7 +2436,7 @@ checksum = "52a40bc70c2c58040d2d8b167ba9a5ff59fc9dab7ad44771cfde3dcfde7a09c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -2460,9 +2479,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ "unicode-ident", ] @@ -2478,16 +2497,6 @@ dependencies = [ "unicase", ] -[[package]] -name = "pyo3-build-config" -version = "0.19.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076c73d0bc438f7a4ef6fdd0c3bb4732149136abd952b110ac93e4edb13a6ba5" -dependencies = [ - "once_cell", - "target-lexicon", -] - [[package]] name = "quick-xml" version = "0.23.1" @@ -2595,16 +2604,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "redb" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d37f4ea12036ad04bd2a02ce385bf28e56f97344223952dccaacbf8704823e" -dependencies = [ - "libc", - "pyo3-build-config", -] - [[package]] name = "redox_syscall" version = "0.2.16" @@ -2715,7 +2714,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", + "spin 0.5.2", "untrusted", "web-sys", "winapi", @@ -2931,14 +2930,14 @@ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] name = "serde_json" -version = "1.0.106" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ "indexmap 2.0.0", "itoa", @@ -3008,10 +3007,10 @@ name = "server" version = "0.1.0" dependencies = [ "axum", + "flate2", "futures", "hyper", "log", - "redb", "serde", "serde_json", "tokio", @@ -3190,6 +3189,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "static_assertions" version = "1.1.0" @@ -3247,7 +3252,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -3269,9 +3274,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.32" +version = "2.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" +checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" dependencies = [ "proc-macro2", "quote", @@ -3299,12 +3304,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "target-lexicon" -version = "0.12.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0e916b1148c8e263850e1ebcbd046f333e0683c724876bb0da63ea4373dc8a" - [[package]] name = "tempfile" version = "3.8.0" @@ -3375,7 +3374,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -3469,7 +3468,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -3602,7 +3601,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", ] [[package]] @@ -3661,9 +3660,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicase" @@ -3682,15 +3681,15 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-id" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d70b6494226b36008c8366c288d77190b3fad2eb4c10533139c1c1f461127f1a" +checksum = "b1b6def86329695390197b82c1e244a54a131ceb66c996f2088a3876e2ae083f" [[package]] name = "unicode-ident" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" @@ -3828,7 +3827,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", "wasm-bindgen-shared", ] @@ -3862,7 +3861,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.37", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/server/Cargo.toml b/server/Cargo.toml index 5366ee8c..f745e569 100644 --- a/server/Cargo.toml +++ b/server/Cargo.toml @@ -17,6 +17,7 @@ axum = { version = "0.6.20", default-features = false, features = [ "form", "query", "ws", + "multipart", ] } tokio = { version = "1.32.0", features = ["full"] } log = "0.4.20" @@ -25,7 +26,7 @@ serde_json = "1.0.106" uuid = { version = "1.4.1", features = ["v4"] } toml = "0.8.0" futures = "0.3.28" -redb = { version = "1.1.0", default-features = false } +flate2 = "1.0.27" [dev-dependencies] tower = { version = "0.4.13", features = ["util"] } diff --git a/server/src/artifacts/enrollment.rs b/server/src/artifacts/enrollment.rs index 42fb69ce..99eb03e4 100644 --- a/server/src/artifacts/enrollment.rs +++ b/server/src/artifacts/enrollment.rs @@ -20,12 +20,14 @@ pub(crate) struct Endpoint { } #[derive(Debug, Deserialize, Serialize)] -pub(crate) struct EndpointDb { +/// Static data about and endpoint that rarely changes +pub(crate) struct EndpointStatic { pub(crate) hostname: String, pub(crate) platform: String, pub(crate) tags: Vec, pub(crate) notes: Vec, pub(crate) checkin: u64, + pub(crate) id: String, } #[derive(Debug, Deserialize, Serialize)] diff --git a/server/src/artifacts/jobs.rs b/server/src/artifacts/jobs.rs index 67d848b1..f572160a 100644 --- a/server/src/artifacts/jobs.rs +++ b/server/src/artifacts/jobs.rs @@ -1,6 +1,16 @@ +use std::collections::HashSet; + use serde::{Deserialize, Serialize}; -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, Clone)] +pub(crate) struct Command { + /**Unique list of endpoint IDs */ + pub(crate) targets: HashSet, + /**Job to send to the targets */ + pub(crate) job: JobInfo, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] pub(crate) struct JobInfo { pub(crate) id: u64, pub(crate) name: String, @@ -13,12 +23,33 @@ pub(crate) struct JobInfo { pub(crate) status: Status, /**Base64 encoded TOML collection */ pub(crate) collection: String, + /**When endpoint should start job */ + pub(crate) start_time: u64, + /**How long job should run */ + pub(crate) duration: u64, + pub(crate) action: Action, + pub(crate) job_type: JobType, } -#[derive(Debug, Serialize, Deserialize, PartialEq)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] pub(crate) enum Status { NotStarted, Started, Finished, Failed, + Cancelled, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] +pub(crate) enum Action { + Start, + Stop, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] +pub(crate) enum JobType { + Collection, + Processes, + Filelist, + Script, } diff --git a/server/src/db/endpoints.rs b/server/src/db/endpoints.rs deleted file mode 100644 index 726f3b82..00000000 --- a/server/src/db/endpoints.rs +++ /dev/null @@ -1,227 +0,0 @@ -use super::{ - error::DbError, - tables::{add_table_data, lookup_table_data}, -}; -use crate::{ - artifacts::{ - enrollment::{EndpointDb, EndpointInfo}, - sockets::{Heartbeat, Pulse}, - }, - utils::time::time_now, -}; -use log::error; -use redb::Database; - -/// Enroll endpoint into the `EndpointDB` -pub(crate) fn enroll_endpointdb( - endpoint: &EndpointInfo, - id: &str, - db: &Database, -) -> Result<(), DbError> { - let data = EndpointDb { - hostname: endpoint.hostname.clone(), - platform: endpoint.platform.clone(), - tags: Vec::new(), - notes: Vec::new(), - checkin: time_now(), - }; - - let serde_result = serde_json::to_vec(&data); - let value = match serde_result { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to serialize endpoint data into DB table: {err:?}"); - return Err(DbError::Serialize); - } - }; - - let result = add_table_data(db, id, &value, "endpoints"); - if result.is_err() { - return Err(DbError::EndpointDb); - } - - Ok(()) -} - -/// Update heartbeat information table -pub(crate) fn update_heartbeat( - heartbeat: &Heartbeat, - id: &str, - db: &Database, -) -> Result<(), DbError> { - let serde_result = serde_json::to_vec(heartbeat); - let value = match serde_result { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to serialize heartbeat data into DB table: {err:?}"); - return Err(DbError::Serialize); - } - }; - - let result = add_table_data(db, id, &value, "heartbeat"); - if result.is_err() { - return Err(DbError::EndpointDb); - } - - Ok(()) -} - -/// Update pulse information table -pub(crate) fn update_pulse(pulse: &Pulse, id: &str, db: &Database) -> Result<(), DbError> { - let serde_result = serde_json::to_vec(pulse); - let value = match serde_result { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to serialize pulse data into DB table: {err:?}"); - return Err(DbError::Serialize); - } - }; - - let result = add_table_data(db, id, &value, "pulse"); - if result.is_err() { - return Err(DbError::EndpointDb); - } - - Ok(()) -} - -/// Lookup an endpoint ID in the `EndpointDB` -pub(crate) fn lookup_endpoint(db: &Database, id: &str) -> Result<(bool, EndpointDb), DbError> { - let value = lookup_table_data("endpoints", id, db)?; - if value.is_empty() { - let empty = EndpointDb { - hostname: String::new(), - platform: String::new(), - tags: Vec::new(), - notes: Vec::new(), - checkin: 0, - }; - - return Ok((false, empty)); - } - - let serde_value = serde_json::from_slice(&value); - let db_value: EndpointDb = match serde_value { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to deserialize endpoint data: {err:?}"); - return Err(DbError::Deserialize); - } - }; - - Ok((true, db_value)) -} - -#[cfg(test)] -mod tests { - use super::{enroll_endpointdb, update_heartbeat, update_pulse}; - use crate::{ - artifacts::{ - enrollment::EndpointInfo, - sockets::{Heartbeat, Pulse}, - systeminfo::Memory, - }, - db::{endpoints::lookup_endpoint, tables::setup_db}, - utils::filesystem::create_dirs, - }; - use std::path::PathBuf; - - #[test] - fn test_enroll_endpointdb() { - create_dirs("./tmp").unwrap(); - let path = "./tmp/endpoints.redb"; - - let id = "arandomkey"; - let data = EndpointInfo { - boot_time: 1111, - hostname: String::from("hello"), - os_version: String::from("12.1"), - uptime: 100, - kernel_version: String::from("12.11"), - platform: String::from("linux"), - cpu: Vec::new(), - disks: Vec::new(), - memory: Memory { - available_memory: 111, - free_memory: 111, - free_swap: 111, - total_memory: 111, - total_swap: 111, - used_memory: 111, - used_swap: 111, - }, - }; - - let db = setup_db(path).unwrap(); - - enroll_endpointdb(&data, &id, &db).unwrap(); - } - - #[test] - fn test_update_heartbeat() { - create_dirs("./tmp").unwrap(); - let path = "./tmp/endpointsbeat.redb"; - let id = "arandomkey"; - let data = Heartbeat { - endpoint_id: id.to_string(), - heartbeat: true, - jobs_running: 0, - timestamp: 10, - boot_time: 1111, - hostname: String::from("hello"), - os_version: String::from("12.1"), - uptime: 100, - kernel_version: String::from("12.11"), - platform: String::from("linux"), - cpu: Vec::new(), - disks: Vec::new(), - memory: Memory { - available_memory: 111, - free_memory: 111, - free_swap: 111, - total_memory: 111, - total_swap: 111, - used_memory: 111, - used_swap: 111, - }, - }; - - let db = setup_db(path).unwrap(); - - update_heartbeat(&data, &id, &db).unwrap(); - } - - #[test] - fn test_update_pulse() { - create_dirs("./tmp").unwrap(); - let path = "./tmp/endpointspulse.redb"; - let id = "arandomkey"; - let data = Pulse { - endpoint_id: id.to_string(), - pulse: true, - jobs_running: 0, - timestamp: 10, - }; - - let db = setup_db(path).unwrap(); - - update_pulse(&data, &id, &db).unwrap(); - } - - #[test] - fn test_lookup_endpoint() { - let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - test_location.push("tests/test_data/endpoints.redb"); - let path = test_location.display().to_string(); - - let id = "3482136c-3176-4272-9bd7-b79f025307d6"; - let db = setup_db(&path).unwrap(); - - let (found, value) = lookup_endpoint(&db, id).unwrap(); - assert!(found); - - assert_eq!(value.hostname, "aStudio.lan"); - assert_eq!(value.platform, "Darwin"); - assert_eq!(value.checkin, 1693968058); - } -} diff --git a/server/src/db/error.rs b/server/src/db/error.rs deleted file mode 100644 index a932f046..00000000 --- a/server/src/db/error.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::fmt; - -#[derive(Debug)] -pub enum DbError { - EndpointDb, - JobDb, - NoDb, - BeginRead, - OpenTable, - Get, - Open, - BeginWrite, - Insert, - Serialize, - Deserialize, - Commit, -} - -impl fmt::Display for DbError { - fn fmt<'a>(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - DbError::EndpointDb => write!(f, "Could not add endpoint to DB"), - DbError::JobDb => write!(f, "Could not add job to DB"), - DbError::NoDb => write!(f, "No DB file to open"), - DbError::BeginRead => write!(f, "Could not begin DB read"), - DbError::BeginWrite => write!(f, "Could not begin DB write"), - DbError::OpenTable => write!(f, "Could not open table"), - DbError::Get => write!(f, "Could not get DB value"), - DbError::Insert => write!(f, "Could not insert DB value"), - DbError::Open => write!(f, "Could not open DB"), - DbError::Serialize => write!(f, "Could not serialize DB data"), - DbError::Deserialize => write!(f, "Could not deserialize DB data"), - DbError::Commit => write!(f, "Could not commit DB data"), - } - } -} diff --git a/server/src/db/jobs.rs b/server/src/db/jobs.rs deleted file mode 100644 index 3658aba0..00000000 --- a/server/src/db/jobs.rs +++ /dev/null @@ -1,128 +0,0 @@ -use super::{ - error::DbError, - tables::{check_write, lookup_table_data, write_table_data}, -}; -use crate::artifacts::jobs::JobInfo; -use log::error; -use redb::Database; - -/// Add a collection Job into the `JobDB` -pub(crate) fn add_job(id: &str, job: JobInfo, db: &Database) -> Result<(), DbError> { - // Before writing data. Always check and get existing data first. So we dont overwrite any jobs. `write_data` locks the db - let (write_data, existing_data) = check_write(db, id, "jobs")?; - - let mut jobs = Vec::new(); - if !existing_data.is_empty() { - let jobs_result = serde_json::from_slice(&existing_data); - let mut old_jobs: Vec = match jobs_result { - Ok(result) => result, - Err(err) => { - error!("[server] Could not deserialize old jobs: {err:?}"); - return Err(DbError::Deserialize); - } - }; - - jobs.append(&mut old_jobs); - } - - jobs.push(job); - - let serde_result = serde_json::to_vec(&jobs); - let value = match serde_result { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to serialize job data into DB table: {err:?}"); - return Err(DbError::Serialize); - } - }; - - let result = write_table_data(write_data, id, &value, "jobs"); - if result.is_err() { - return Err(DbError::JobDb); - } - - Ok(()) -} - -/// Get all jobs associated with endpoint ID -pub(crate) fn get_jobs(id: &str, db: &Database) -> Result, DbError> { - let value = lookup_table_data("jobs", id, db)?; - if value.is_empty() { - return Ok(Vec::new()); - } - - let serde_value = serde_json::from_slice(&value); - let jobs: Vec = match serde_value { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to deserialize endpoint data: {err:?}"); - return Err(DbError::Deserialize); - } - }; - - Ok(jobs) -} - -#[cfg(test)] -mod tests { - use super::{add_job, get_jobs}; - use crate::{ - artifacts::jobs::{JobInfo, Status}, - db::tables::setup_db, - utils::filesystem::create_dirs, - }; - use std::path::PathBuf; - - #[test] - fn test_add_job() { - create_dirs("./tmp").unwrap(); - let path = "./tmp/jobs.redb"; - let id = "jobkey"; - let data = JobInfo { - id: 0, - name: String::from("randomjob"), - created: 10, - started: 0, - finished: 0, - status: Status::NotStarted, - collection: String::from("c3lzdGVtID0gIndpbmRvd3MiCgpbb3V0cHV0XQpuYW1lID0gInByZWZldGNoX2NvbGxlY3Rpb24iCmRpcmVjdG9yeSA9ICIuL3RtcCIKZm9ybWF0ID0gImpzb24iCmNvbXByZXNzID0gZmFsc2UKZW5kcG9pbnRfaWQgPSAiNmM1MWIxMjMtMTUyMi00NTcyLTlmMmEtMGJkNWFiZDgxYjgyIgpjb2xsZWN0aW9uX2lkID0gMQpvdXRwdXQgPSAibG9jYWwiCgpbW2FydGlmYWN0c11dCmFydGlmYWN0X25hbWUgPSAicHJlZmV0Y2giClthcnRpZmFjdHMucHJlZmV0Y2hdCmFsdF9kcml2ZSA9ICdDJwo="), - }; - - let db = setup_db(path).unwrap(); - - add_job(&id, data, &db).unwrap(); - } - - #[test] - fn test_get_jobs() { - create_dirs("./tmp").unwrap(); - let path = "./tmp/jobsnofound.redb"; - let id = "1cacf8ac-c98d-45cb-a69b-166338aabe9a"; - let db = setup_db(path).unwrap(); - - let jobs = get_jobs(&id, &db).unwrap(); - assert!(jobs.is_empty()); - } - - #[test] - fn test_get_jobs_id() { - let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - test_location.push("tests/test_data/jobs.redb"); - let path = test_location.display().to_string(); - - let id = "1cacf8ac-c98d-45cb-a69b-166338aabe9a"; - let db = setup_db(&path).unwrap(); - - let jobs = get_jobs(id, &db).unwrap(); - - assert!(!jobs.is_empty()); - - assert_eq!(jobs[0].collection, "c3lzdGVtID0gIndpbmRvd3MiCgpbb3V0cHV0XQpuYW1lID0gInByZWZldGNoX2NvbGxlY3Rpb24iCmRpcmVjdG9yeSA9ICIuL3RtcCIKZm9ybWF0ID0gImpzb24iCmNvbXByZXNzID0gZmFsc2UKZW5kcG9pbnRfaWQgPSAiNmM1MWIxMjMtMTUyMi00NTcyLTlmMmEtMGJkNWFiZDgxYjgyIgpjb2xsZWN0aW9uX2lkID0gMQpvdXRwdXQgPSAibG9jYWwiCgpbW2FydGlmYWN0c11dCmFydGlmYWN0X25hbWUgPSAicHJlZmV0Y2giClthcnRpZmFjdHMucHJlZmV0Y2hdCmFsdF9kcml2ZSA9ICdDJwo="); - assert_eq!(jobs[0].started, 0); - assert_eq!(jobs[0].finished, 0); - assert_eq!(jobs[0].status, Status::NotStarted); - assert_eq!(jobs[0].created, 10); - assert_eq!(jobs[0].name, "randomjob"); - assert_eq!(jobs[0].id, 0); - } -} diff --git a/server/src/db/tables.rs b/server/src/db/tables.rs deleted file mode 100644 index d38d4c23..00000000 --- a/server/src/db/tables.rs +++ /dev/null @@ -1,227 +0,0 @@ -use super::error::DbError; -use crate::utils::filesystem::is_file; -use log::{error, warn}; -use redb::{Database, DatabaseError, ReadableTable, TableDefinition, TableError, WriteTransaction}; -use std::{sync::Arc, thread::sleep, time::Duration}; - -/** - * Before writing to database we may want to first get existing data before overwriting. This function locks the database. - * If you **don't** care about existing data use `add_table_data` - */ -pub(crate) fn check_write<'a>( - db: &'a Database, - id: &str, - table_name: &str, -) -> Result<(WriteTransaction<'a>, Vec), DbError> { - let begin_result = db.begin_write(); - let write_start = match begin_result { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to start {table_name} DB write: {err:?}"); - return Err(DbError::BeginWrite); - } - }; - - let data = lookup_table_data(table_name, id, db)?; - Ok((write_start, data)) -} - -/// Add data to a provide table and path -pub(crate) fn add_table_data( - db: &Database, - id: &str, - data: &[u8], - table_name: &str, -) -> Result<(), DbError> { - let begin_result = db.begin_write(); - let write_start = match begin_result { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to start {table_name} DB write: {err:?}"); - return Err(DbError::BeginWrite); - } - }; - - write_table_data(write_start, id, data, table_name) -} - -/// Write data to a provided table name -pub(crate) fn write_table_data( - write_start: WriteTransaction<'_>, - id: &str, - data: &[u8], - table_name: &str, -) -> Result<(), DbError> { - let table: TableDefinition<'_, &str, &[u8]> = TableDefinition::new(table_name); - - // Open the table for writing - { - let table_result = write_start.open_table(table); - let mut table_write = match table_result { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to open {table_name} DB table for writing: {err:?}"); - return Err(DbError::OpenTable); - } - }; - - let result = table_write.insert(id, data); - match result { - Ok(_) => {} - Err(err) => { - error!("[server] Failed to insert data into {table_name} DB table: {err:?}"); - return Err(DbError::Insert); - } - } - } - - let commit_result = write_start.commit(); - if commit_result.is_err() { - error!( - "[server] Failed to commit data into {table_name} DB table: {:?}", - commit_result.unwrap_err() - ); - return Err(DbError::Commit); - } - - Ok(()) -} - -/// Get table data at path based on ID. Empty data means no value was found -pub(crate) fn lookup_table_data( - table_name: &str, - id: &str, - db: &Database, -) -> Result, DbError> { - let table: TableDefinition<'_, &str, &[u8]> = TableDefinition::new(table_name); - - let begin_result = db.begin_read(); - let read_start = match begin_result { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to start {table_name} DB read: {err:?}"); - return Err(DbError::BeginRead); - } - }; - - let table_result = read_start.open_table(table); - let table_read = match table_result { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to open {table_name} DB table for reading: {err:?}"); - if let TableError::TableDoesNotExist(_) = err { - // If table does not exist yet thats ok just return empty data - return Ok(Vec::new()); - } - return Err(DbError::OpenTable); - } - }; - - let read_result = table_read.get(id); - let data_value = match read_result { - Ok(result) => result, - Err(err) => { - error!("[server] Failed to get {table_name} DB data: {err:?}"); - return Err(DbError::Get); - } - }; - - if let Some(value) = data_value { - let db_data = value.value(); - return Ok(db_data.to_vec()); - } - - Ok(Vec::new()) -} - -/// Open a database file at provided path -pub(crate) fn setup_db(path: &str) -> Result, DbError> { - let db_result = if !is_file(path) { - Database::create(path) - } else { - Database::open(path) - }; - - let db = match db_result { - Ok(result) => result, - Err(err) => { - // Open errors should only occur during tests. When the server is running the Database is opened and should be shared via axum::State - if let DatabaseError::DatabaseAlreadyOpen = err { - let sleep_time = 2; - warn!("[server] {path} already opened. Sleeping {sleep_time} millisecond(s)"); - sleep(Duration::from_millis(sleep_time)); - return setup_db(path); - } else { - println!("[server] Failed to open {path} DB: {err:?}"); - return Err(DbError::Open); - } - } - }; - - Ok(Arc::new(db)) -} - -#[cfg(test)] -mod tests { - use super::{add_table_data, check_write, lookup_table_data, setup_db, write_table_data}; - use crate::artifacts::enrollment::EndpointDb; - use std::path::PathBuf; - - #[test] - fn test_add_table_data() { - let path = "./tmp/endpointsadd.redb"; - - let db = setup_db(path).unwrap(); - let id = "arandomkey"; - - add_table_data(&db, id, &[1, 2, 3, 4], "endpoints").unwrap(); - } - - #[test] - fn test_check_write() { - let path = "./tmp/jobscheck.redb"; - - let db = setup_db(path).unwrap(); - let id = "arandomkey"; - - check_write(&db, id, "jobs").unwrap(); - } - - #[test] - fn test_write_table_data() { - let path = "./tmp/jobswrite.redb"; - - let db = setup_db(path).unwrap(); - let id = "arandomkey"; - - let (write_start, _) = check_write(&db, id, "jobs").unwrap(); - - write_table_data(write_start, id, &[1, 2, 3, 4, 5], "jobs").unwrap(); - } - - #[test] - fn test_lookup_table_data() { - let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - test_location.push("tests/test_data/endpoints.redb"); - let path = test_location.display().to_string(); - - let id = "3482136c-3176-4272-9bd7-b79f025307d6"; - let db = setup_db(&path).unwrap(); - - let result = lookup_table_data("endpoints", id, &db).unwrap(); - let endpoint_serde: EndpointDb = serde_json::from_slice(&result).unwrap(); - - assert_eq!(endpoint_serde.hostname, "aStudio.lan"); - assert_eq!(endpoint_serde.platform, "Darwin"); - assert_eq!(endpoint_serde.checkin, 1693968058); - } - - #[test] - fn test_setup_db() { - let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - test_location.push("tests/test_data/endpoints.redb"); - let path = test_location.display().to_string(); - - let _ = setup_db(&path).unwrap(); - } -} diff --git a/server/src/enrollment/enroll.rs b/server/src/enrollment/enroll.rs index 15a3f825..299c3dae 100644 --- a/server/src/enrollment/enroll.rs +++ b/server/src/enrollment/enroll.rs @@ -1,13 +1,12 @@ use crate::{ artifacts::enrollment::{Endpoint, EndpointInfo}, - db::endpoints::{enroll_endpointdb, lookup_endpoint}, + filestore::endpoints::create_endpoint_path, server::ServerState, - utils::uuid::generate_uuid, + utils::filesystem::is_directory, }; use axum::Json; use axum::{extract::State, http::StatusCode}; use log::error; -use redb::Database; use serde::{Deserialize, Serialize}; use serde_json::Error; @@ -36,9 +35,17 @@ pub(crate) async fn enroll_endpoint( return Err(StatusCode::BAD_REQUEST); } - let endpoint_id = generate_uuid(); + let id_result = + create_endpoint_path(&state.config.endpoint_server.storage, &data.endpoint_info).await; + + let endpoint_id = match id_result { + Ok(result) => result, + Err(err) => { + error!("[server] Could not create enrollment storage directory: {err:?}"); + return Err(StatusCode::INTERNAL_SERVER_ERROR); + } + }; - let _ = enroll_endpointdb(&data.endpoint_info, &endpoint_id, &state.endpoint_db); let enrolled = Enrolled { endpoint_id, client_config: String::new(), @@ -48,28 +55,24 @@ pub(crate) async fn enroll_endpoint( } /// Verify the provided `endpoint_id` is registered with artemis. Based on path to storage directory -pub(crate) fn verify_enrollment(data: &str, ip: &str, db: &Database) -> bool { +pub(crate) fn verify_enrollment(data: &str, ip: &str, path: &str) -> Result<(), StatusCode> { let verify_result: Result = serde_json::from_str(data); let verify = match verify_result { Ok(result) => result, Err(err) => { error!("[server] Failed to deserialize endpoint verification from {ip}: {err:?}"); - return false; + return Err(StatusCode::BAD_REQUEST); } }; - let value_result = lookup_endpoint(db, &verify.endpoint_id); - let (found, _) = match value_result { - Ok(result) => result, - Err(err) => { - error!( - "[server] Could not lookup {} in endpoints db: {err:?}", - verify.endpoint_id - ); - return false; - } - }; - found + let endpoint_path = format!("{path}/{}", verify.endpoint_id); + + let status = is_directory(&endpoint_path); + if !status { + return Err(StatusCode::UNPROCESSABLE_ENTITY); + } + + Ok(()) } #[cfg(test)] @@ -77,13 +80,13 @@ mod tests { use super::verify_enrollment; use crate::{ artifacts::{enrollment::EndpointInfo, systeminfo::Memory}, - db::tables::setup_db, enrollment::enroll::{enroll_endpoint, Enrollment}, server::ServerState, - utils::config::read_config, + utils::{config::read_config, filesystem::create_dirs}, }; use axum::{extract::State, Json}; - use std::path::PathBuf; + use std::{collections::HashMap, path::PathBuf, sync::Arc}; + use tokio::sync::RwLock; #[tokio::test] async fn test_enroll_endpoint() { @@ -112,22 +115,15 @@ mod tests { let test = Json(info); let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_location.push("tests/test_data/server.toml"); + create_dirs("./tmp").await.unwrap(); - let config = read_config(&test_location.display().to_string()).unwrap(); - let endpointdb = setup_db(&format!( - "{}/endpoints.redb", - &config.endpoint_server.storage - )) - .unwrap(); + let config = read_config(&test_location.display().to_string()) + .await + .unwrap(); - let jobdb = setup_db(&format!("{}/jobs.redb", &config.endpoint_server.storage)).unwrap(); - - let state_server = ServerState { - config, - endpoint_db: endpointdb, - job_db: jobdb, - }; - let test2 = State(state_server); + let command = Arc::new(RwLock::new(HashMap::new())); + let server_state = ServerState { config, command }; + let test2 = State(server_state); let result = enroll_endpoint(test2, test).await.unwrap(); assert!(!result.endpoint_id.is_empty()) @@ -161,22 +157,15 @@ mod tests { let test = Json(info); let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_location.push("tests/test_data/server.toml"); + create_dirs("./tmp").await.unwrap(); - let config = read_config(&test_location.display().to_string()).unwrap(); - let endpointdb = setup_db(&format!( - "{}/endpoints.redb", - &config.endpoint_server.storage - )) - .unwrap(); + let config = read_config(&test_location.display().to_string()) + .await + .unwrap(); - let jobdb = setup_db(&format!("{}/jobs.redb", &config.endpoint_server.storage)).unwrap(); - - let state_server = ServerState { - config, - endpoint_db: endpointdb, - job_db: jobdb, - }; - let test2 = State(state_server); + let command = Arc::new(RwLock::new(HashMap::new())); + let server_state = ServerState { config, command }; + let test2 = State(server_state); let result = enroll_endpoint(test2, test).await.unwrap(); assert!(!result.endpoint_id.is_empty()) @@ -188,11 +177,9 @@ mod tests { let ip = "127.0.0.1"; let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - test_location.push("tests/test_data/endpoints.redb"); + test_location.push("tests/test_data"); let path = test_location.display().to_string(); - let db = setup_db(&path).unwrap(); - let result = verify_enrollment(data, ip, &db); - assert!(result) + verify_enrollment(data, ip, &path).unwrap(); } } diff --git a/server/src/enrollment/uris.rs b/server/src/enrollment/uris.rs index c9651ac0..1ee65757 100644 --- a/server/src/enrollment/uris.rs +++ b/server/src/enrollment/uris.rs @@ -10,13 +10,14 @@ pub(crate) fn enroll_routes(base: &str) -> Router { #[cfg(test)] mod tests { use super::enroll_routes; - use crate::{db::tables::setup_db, server::ServerState, utils::config::read_config}; + use crate::{server::ServerState, utils::config::read_config}; use axum::{ body::Body, http::{Method, Request, StatusCode}, }; - use std::path::PathBuf; - use tower::util::ServiceExt; + use std::{collections::HashMap, path::PathBuf, sync::Arc}; + use tokio::sync::RwLock; + use tower::ServiceExt; #[tokio::test] async fn test_enroll_routes() { @@ -25,22 +26,15 @@ mod tests { let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_location.push("tests/test_data/server.toml"); - let config = read_config(&test_location.display().to_string()).unwrap(); - let endpointdb = setup_db(&format!( - "{}/endpoints.redb", - &config.endpoint_server.storage - )) - .unwrap(); + let config = read_config(&test_location.display().to_string()) + .await + .unwrap(); - let jobdb = setup_db(&format!("{}/jobs.redb", &config.endpoint_server.storage)).unwrap(); + let command = Arc::new(RwLock::new(HashMap::new())); + let server_state = ServerState { config, command }; - let state_server = ServerState { - config, - endpoint_db: endpointdb, - job_db: jobdb, - }; let res = route - .with_state(state_server) + .with_state(server_state) .oneshot( Request::builder() .method(Method::POST) diff --git a/server/src/filestore/endpoints.rs b/server/src/filestore/endpoints.rs new file mode 100644 index 00000000..2280b66f --- /dev/null +++ b/server/src/filestore/endpoints.rs @@ -0,0 +1,123 @@ +use crate::{ + artifacts::enrollment::{EndpointInfo, EndpointStatic}, + filestore::error::StoreError, + utils::{ + filesystem::{create_dirs, write_file}, + time::time_now, + uuid::generate_uuid, + }, +}; +use log::error; + +/// Create the endpoint storage directory and generate an ID +pub(crate) async fn create_endpoint_path( + path: &str, + endpoint: &EndpointInfo, +) -> Result { + let id = generate_uuid(); + + let data = EndpointStatic { + hostname: endpoint.hostname.clone(), + platform: endpoint.platform.clone(), + tags: Vec::new(), + notes: Vec::new(), + checkin: time_now(), + id, + }; + + let serde_result = serde_json::to_vec(&data); + let value = match serde_result { + Ok(result) => result, + Err(err) => { + error!("[server] Failed to serialize enrollment: {err:?}"); + return Err(StoreError::Serialize); + } + }; + + let endpoint_path = format!("{path}/{}", data.id); + + let status = create_dirs(&endpoint_path).await; + if status.is_err() { + error!( + "[server] Failed to create endpoint storage directory: {:?}", + status.unwrap_err() + ); + return Err(StoreError::CreateDirectory); + } + + let enroll_file = format!("{endpoint_path}/enroll.json"); + let jobs_file = format!("{endpoint_path}/jobs.json"); + let heartbeat_file = format!("{endpoint_path}/heartbeat.jsonl"); + let pulse_file = format!("{endpoint_path}/pulse.json"); + + create_enroll_file(&enroll_file, &value).await?; + create_enroll_file(&jobs_file, &[]).await?; + create_enroll_file(&heartbeat_file, &[]).await?; + create_enroll_file(&pulse_file, &[]).await?; + + Ok(data.id) +} + +/// Create enrollment related files +async fn create_enroll_file(path: &str, data: &[u8]) -> Result<(), StoreError> { + let status = write_file(data, path, false).await; + if status.is_err() { + error!( + "[server] Failed to write endpoint enrollment file {path}: {:?}", + status.unwrap_err() + ); + return Err(StoreError::WriteFile); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::{create_endpoint_path, create_enroll_file}; + use crate::{ + artifacts::{enrollment::EndpointInfo, systeminfo::Memory}, + utils::filesystem::create_dirs, + }; + + #[tokio::test] + async fn test_create_endpoint_path() { + let path = "./tmp"; + let data = EndpointInfo { + boot_time: 1111, + hostname: String::from("hello"), + os_version: String::from("12.1"), + uptime: 100, + kernel_version: String::from("12.11"), + platform: String::from("linux"), + cpu: Vec::new(), + disks: Vec::new(), + memory: Memory { + available_memory: 111, + free_memory: 111, + free_swap: 111, + total_memory: 111, + total_swap: 111, + used_memory: 111, + used_swap: 111, + }, + }; + + let result = create_endpoint_path(path, &data).await.unwrap(); + assert!(!result.is_empty()); + } + + #[tokio::test] + async fn test_create_enroll_file() { + create_dirs("./tmp").await.unwrap(); + let test = "./tmp/test.json"; + create_enroll_file(&test, b"hello").await.unwrap(); + } + + #[tokio::test] + #[should_panic(expected = "WriteFile")] + async fn test_create_enroll_file_bad() { + let test = "."; + create_enroll_file(&test, b"hello").await.unwrap(); + } +} diff --git a/server/src/filestore/error.rs b/server/src/filestore/error.rs new file mode 100644 index 00000000..652c47ca --- /dev/null +++ b/server/src/filestore/error.rs @@ -0,0 +1,28 @@ +use std::fmt; + +#[derive(Debug)] +pub enum StoreError { + Endpoint, + Job, + NoFile, + CreateDirectory, + WriteFile, + ReadFile, + Serialize, + Deserialize, +} + +impl fmt::Display for StoreError { + fn fmt<'a>(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + StoreError::Endpoint => write!(f, "Could not add endpoint to DB"), + StoreError::Job => write!(f, "Could not add job to DB"), + StoreError::NoFile => write!(f, "No file to open"), + StoreError::CreateDirectory => write!(f, "Could not create endpoint directory"), + StoreError::WriteFile => write!(f, "Could not write file"), + StoreError::ReadFile => write!(f, "Could not read file"), + StoreError::Serialize => write!(f, "Could not serialize DB data"), + StoreError::Deserialize => write!(f, "Could not deserialize DB data"), + } + } +} diff --git a/server/src/filestore/jobs.rs b/server/src/filestore/jobs.rs new file mode 100644 index 00000000..992b4ce5 --- /dev/null +++ b/server/src/filestore/jobs.rs @@ -0,0 +1,213 @@ +use super::error::StoreError; +use crate::{ + artifacts::jobs::JobInfo, + utils::filesystem::{is_file, read_file, write_file}, +}; +use log::error; +use std::collections::HashMap; + +/** + * Save `JobInfo` to endpoint `jobs.json` file. + * Path is full path to endpoint **including** the endpoint ID + * + * Only `JobType::Collection` entries are written to disk. Otherwise the Job is sent directly to endpoint + */ +pub(crate) async fn save_job(mut job: JobInfo, path: &str) -> Result<(), StoreError> { + let job_file = format!("{path}/jobs.json"); + let mut jobs = HashMap::new(); + + if is_file(&job_file) { + let jobs_result = read_file(&job_file).await; + let job_data = match jobs_result { + Ok(result) => result, + Err(err) => { + error!("[server] Failed to read jobs file for endpoint at {path}: {err:?}"); + return Err(StoreError::ReadFile); + } + }; + + if !job_data.is_empty() { + let jobs_result = serde_json::from_slice(&job_data); + let existing_jobs: HashMap = match jobs_result { + Ok(result) => result, + Err(err) => { + error!("[server] Could not deserialize existing jobs: {err:?}"); + return Err(StoreError::Deserialize); + } + }; + jobs = existing_jobs; + } + } + + job.id = jobs.len() as u64 + 1; + jobs.insert(job.id, job); + + let serde_result = serde_json::to_vec(&jobs); + let value = match serde_result { + Ok(result) => result, + Err(err) => { + error!("[server] Failed to serialize job data: {err:?}"); + return Err(StoreError::Serialize); + } + }; + + let status = write_file(&value, &job_file, false).await; + if status.is_err() { + error!("[server] Could not write jobs file"); + return Err(StoreError::WriteFile); + } + + Ok(()) +} + +/// Return all Jobs for endpoint. Path is full path to endpoint **including** the endpoint ID +pub(crate) async fn get_jobs(path: &str) -> Result, StoreError> { + let job_file = format!("{path}/jobs.json"); + + if !is_file(&job_file) { + return Ok(HashMap::new()); + } + + let value_result = read_file(&job_file).await; + let value = match value_result { + Ok(result) => result, + Err(err) => { + error!("[server] Failed to read {job_file}: {err:?}"); + return Err(StoreError::ReadFile); + } + }; + + let serde_value = serde_json::from_slice(&value); + let jobs: HashMap = match serde_value { + Ok(result) => result, + Err(err) => { + error!("[server] Failed to deserialize job data: {err:?}"); + return Err(StoreError::Deserialize); + } + }; + + Ok(jobs) +} + +/** + * Update `JobInfo` at endpoint `jobs.json` file. + * Path is full path to endpoint **including** the endpoint ID + * + * Jobs are only updated by the endpoint + */ +pub(crate) async fn update_job(job: JobInfo, path: &str) -> Result<(), StoreError> { + let job_file = format!("{path}/jobs.json"); + let mut jobs = HashMap::new(); + + if is_file(&job_file) { + let jobs_result = read_file(&job_file).await; + let job_data = match jobs_result { + Ok(result) => result, + Err(err) => { + error!("[server] Failed to read jobs file for endpoint at {path} for updating: {err:?}"); + return Err(StoreError::ReadFile); + } + }; + + let jobs_result = serde_json::from_slice(&job_data); + let existing_jobs: HashMap = match jobs_result { + Ok(result) => result, + Err(err) => { + error!("[server] Could not deserialize existing jobs for updating: {err:?}"); + return Err(StoreError::Deserialize); + } + }; + jobs = existing_jobs; + } + + jobs.insert(job.id, job); + + let serde_result = serde_json::to_vec(&jobs); + let value = match serde_result { + Ok(result) => result, + Err(err) => { + error!("[server] Failed to serialize job data for update: {err:?}"); + return Err(StoreError::Serialize); + } + }; + + let status = write_file(&value, &job_file, false).await; + if status.is_err() { + error!("[server] Could not update jobs file"); + return Err(StoreError::WriteFile); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use crate::artifacts::jobs::{Action, JobInfo, JobType, Status}; + use crate::filestore::jobs::{get_jobs, save_job, update_job}; + use crate::utils::filesystem::create_dirs; + use std::path::PathBuf; + + #[tokio::test] + async fn test_save_job() { + create_dirs("./tmp/save").await.unwrap(); + let path = "./tmp/save"; + let data = JobInfo { + id: 0, + name: String::from("randomjob"), + created: 10, + started: 0, + finished: 0, + status: Status::NotStarted, + duration: 0, + start_time: 0, + action: Action::Start, + job_type: JobType::Collection, + collection: String::from("c3lzdGVtID0gIndpbmRvd3MiCgpbb3V0cHV0XQpuYW1lID0gInByZWZldGNoX2NvbGxlY3Rpb24iCmRpcmVjdG9yeSA9ICIuL3RtcCIKZm9ybWF0ID0gImpzb24iCmNvbXByZXNzID0gZmFsc2UKZW5kcG9pbnRfaWQgPSAiNmM1MWIxMjMtMTUyMi00NTcyLTlmMmEtMGJkNWFiZDgxYjgyIgpjb2xsZWN0aW9uX2lkID0gMQpvdXRwdXQgPSAibG9jYWwiCgpbW2FydGlmYWN0c11dCmFydGlmYWN0X25hbWUgPSAicHJlZmV0Y2giClthcnRpZmFjdHMucHJlZmV0Y2hdCmFsdF9kcml2ZSA9ICdDJwo="), + }; + + save_job(data, &path).await.unwrap(); + } + + #[tokio::test] + async fn test_get_jobs() { + let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + test_location.push("tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6"); + + let result = get_jobs(&test_location.display().to_string()) + .await + .unwrap(); + + assert_eq!(result.get(&1).unwrap().id, 1); + assert_eq!(result.get(&1).unwrap().name, "randomjob"); + assert_eq!(result.get(&1).unwrap().created, 10); + assert_eq!(result.get(&1).unwrap().started, 0); + assert_eq!(result.get(&1).unwrap().finished, 0); + assert_eq!(result.get(&1).unwrap().status, Status::NotStarted); + assert_eq!(result.get(&1).unwrap().collection.len(), 372); + } + + #[tokio::test] + async fn test_update_job() { + create_dirs("./tmp").await.unwrap(); + + let path = "./tmp"; + + let mut data = JobInfo { + id: 0, + name: String::from("randomjob"), + created: 10, + started: 0, + finished: 0, + status: Status::NotStarted, + duration: 0, + start_time: 0, + action: Action::Start, + job_type: JobType::Collection, + collection: String::from("c3lzdGVtID0gIndpbmRvd3MiCgpbb3V0cHV0XQpuYW1lID0gInByZWZldGNoX2NvbGxlY3Rpb24iCmRpcmVjdG9yeSA9ICIuL3RtcCIKZm9ybWF0ID0gImpzb24iCmNvbXByZXNzID0gZmFsc2UKZW5kcG9pbnRfaWQgPSAiNmM1MWIxMjMtMTUyMi00NTcyLTlmMmEtMGJkNWFiZDgxYjgyIgpjb2xsZWN0aW9uX2lkID0gMQpvdXRwdXQgPSAibG9jYWwiCgpbW2FydGlmYWN0c11dCmFydGlmYWN0X25hbWUgPSAicHJlZmV0Y2giClthcnRpZmFjdHMucHJlZmV0Y2hdCmFsdF9kcml2ZSA9ICdDJwo="), + }; + + save_job(data.clone(), path).await.unwrap(); + data.status = Status::Finished; + update_job(data, path).await.unwrap(); + } +} diff --git a/server/src/db/mod.rs b/server/src/filestore/mod.rs similarity index 71% rename from server/src/db/mod.rs rename to server/src/filestore/mod.rs index 4888216d..46cfdbd8 100644 --- a/server/src/db/mod.rs +++ b/server/src/filestore/mod.rs @@ -1,4 +1,3 @@ pub(crate) mod endpoints; mod error; pub(crate) mod jobs; -pub(crate) mod tables; diff --git a/server/src/lib.rs b/server/src/lib.rs index debd77f1..139c3699 100644 --- a/server/src/lib.rs +++ b/server/src/lib.rs @@ -74,9 +74,10 @@ )] mod artifacts; -mod db; mod enrollment; +mod filestore; mod routes; pub mod server; mod socket; +mod uploads; mod utils; diff --git a/server/src/routes.rs b/server/src/routes.rs index cb0af47e..43730391 100644 --- a/server/src/routes.rs +++ b/server/src/routes.rs @@ -1,4 +1,7 @@ -use crate::{enrollment::uris::enroll_routes, server::ServerState, socket::uris::socket_routes}; +use crate::{ + enrollment::uris::enroll_routes, server::ServerState, socket::uris::socket_routes, + uploads::uris::upload_routes, +}; use axum::{routing::get, Router}; pub(crate) fn setup_routes() -> Router { @@ -11,43 +14,37 @@ pub(crate) fn setup_routes() -> Router { app = app.merge(enroll_routes(&base)); app = app.merge(socket_routes(&base)); + app = app.merge(upload_routes(&base)); app } #[cfg(test)] mod tests { use super::setup_routes; - use crate::{db::tables::setup_db, server::ServerState, utils::config::read_config}; + use crate::{server::ServerState, utils::config::read_config}; use axum::{ body::Body, http::{Request, StatusCode}, }; - use std::path::PathBuf; - use tower::util::ServiceExt; + use std::{collections::HashMap, path::PathBuf, sync::Arc}; + use tokio::sync::RwLock; + use tower::ServiceExt; #[tokio::test] async fn test_setup_routes() { let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_location.push("tests/test_data/server.toml"); - let result = read_config(&test_location.display().to_string()).unwrap(); - let endpointdb = setup_db(&format!( - "{}/endpoints.redb", - &result.endpoint_server.storage - )) - .unwrap(); - - let jobdb = setup_db(&format!("{}/jobs.redb", &result.endpoint_server.storage)).unwrap(); + let config = read_config(&test_location.display().to_string()) + .await + .unwrap(); - let state_server = ServerState { - config: result, - endpoint_db: endpointdb, - job_db: jobdb, - }; + let command = Arc::new(RwLock::new(HashMap::new())); + let server_state = ServerState { config, command }; let app = setup_routes(); let res = app - .with_state(state_server) + .with_state(server_state) .oneshot(Request::builder().uri("/").body(Body::empty()).unwrap()) .await .unwrap(); diff --git a/server/src/server.rs b/server/src/server.rs index 44857f36..376736d4 100644 --- a/server/src/server.rs +++ b/server/src/server.rs @@ -1,28 +1,28 @@ use crate::{ - db::tables::setup_db, routes, utils::{ config::{read_config, ArtemisConfig}, filesystem::create_dirs, }, }; +use axum::extract::ws::Message; use log::error; -use redb::Database; use std::{ + collections::HashMap, net::{IpAddr, Ipv4Addr, SocketAddr}, sync::Arc, }; +use tokio::sync::{mpsc, RwLock}; #[derive(Debug, Clone)] pub(crate) struct ServerState { pub(crate) config: ArtemisConfig, - pub(crate) endpoint_db: Arc, - pub(crate) job_db: Arc, + pub(crate) command: Arc>>>, } #[tokio::main] pub async fn start(path: &str) { - let config_result = read_config(path); + let config_result = read_config(path).await; let config = match config_result { Ok(result) => result, Err(err) => { @@ -31,26 +31,17 @@ pub async fn start(path: &str) { } }; - let dir_result = create_dirs(&config.endpoint_server.storage); + let dir_result = create_dirs(&config.endpoint_server.storage).await; if dir_result.is_err() { error!("[server] Failed to start artemis server. Could not create storage directory",); return; } - let endpoint_db = setup_state(&format!( - "{}/endpoints.redb", - config.endpoint_server.storage - )); - let job_db = setup_state(&format!("{}/jobs.redb", config.endpoint_server.storage)); - - let server_state = ServerState { - config, - endpoint_db, - job_db, - }; + let command = Arc::new(RwLock::new(HashMap::new())); + let server_state = ServerState { config, command }; let app = routes::setup_routes().with_state(server_state); - let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8000); + let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000); let status = axum::Server::bind(&address) .serve(app.into_make_service_with_connect_info::()) @@ -64,15 +55,9 @@ pub async fn start(path: &str) { } } -/// Setup the server state for `axum::State`. If we cannot setup the databases then we cannot start the server -fn setup_state(path: &str) -> Arc { - setup_db(path) - .unwrap_or_else(|_| unreachable!("Could not setup database at {path}. Cannot start server")) -} - #[cfg(test)] mod tests { - use super::{setup_state, start}; + use super::start; use std::path::PathBuf; #[test] @@ -83,9 +68,4 @@ mod tests { let config_path = test_location.display().to_string(); start(&config_path) } - - #[test] - fn test_setup_state() { - let _ = setup_state("./tmp/endpoints.redb"); - } } diff --git a/server/src/socket/command.rs b/server/src/socket/command.rs new file mode 100644 index 00000000..77d1b703 --- /dev/null +++ b/server/src/socket/command.rs @@ -0,0 +1,127 @@ +use crate::{ + artifacts::jobs::{Command, JobType}, + filestore::jobs::save_job, +}; +use axum::extract::ws::Message; +use log::error; +use serde_json::Error; +use std::collections::HashMap; +use tokio::sync::{mpsc, RwLockReadGuard}; + +/// Parse the Server command Job info. If the `JobType` is a collection save the job to disk for client to pickup on checkin +pub(crate) async fn parse_command(data: &str, path: &str) -> Result<(), Error> { + let command_result: Result = serde_json::from_str(data); + let command = match command_result { + Ok(result) => result, + Err(err) => { + error!("[server] Failed to deserialize command: {err:?}"); + return Err(err); + } + }; + + if command.job.job_type == JobType::Collection { + for target in command.targets { + let endpoint_path = format!("{path}/{target}"); + let status = save_job(command.job.clone(), &endpoint_path).await; + if status.is_err() { + error!("[server] Could not save job collection at {endpoint_path}"); + } + } + } + + Ok(()) +} + +/// Send jobs to client endpoints from server. The data is uploaded via websockets +pub(crate) async fn quick_jobs( + data: &str, + channels: &RwLockReadGuard<'_, HashMap>>, +) -> Result<(), Error> { + let command_result: Result = serde_json::from_str(data); + let command = match command_result { + Ok(result) => result, + Err(err) => { + error!("[server] Failed to deserialize command: {err:?}"); + return Err(err); + } + }; + + // Cannot send collection jobs through websockets. These are picked up when the client connects via heartbeat or pulse + if command.job.job_type == JobType::Collection { + return Ok(()); + } + + // Loop through target endpoint IDs that should receive the job + for target in command.targets { + // Check if target endpoint ID found in HashMap + if let Some(sender) = channels.get(&target) { + let job_result = serde_json::to_string(&command.job); + let job = match job_result { + Ok(result) => result, + Err(err) => { + error!("[server] Failed to serialize job: {err:?}"); + continue; + } + }; + // Send job to the async client task. The job will only be sent the associated endpoint ID + let result = sender.send(Message::Text(job)).await; + if result.is_err() { + error!( + "[server] Could not send quick job command: {:?}", + result.unwrap_err() + ); + } + } + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use crate::socket::command::{parse_command, quick_jobs}; + use crate::utils::filesystem::create_dirs; + use axum::extract::ws::Message; + use std::collections::HashMap; + use tokio::sync::{mpsc, RwLock}; + + #[tokio::test] + async fn test_parse_command() { + let data = r#"{"targets":["0998b365-b60d-4c0c-a629-f631afa83d2c", "madeup"],"job":{"id":1,"name":"processes","created":10000,"started":10001,"finished":20000,"status":"NotStarted","collection":"adssafasdfsadfs==","duration":10,"start_time":100,"action":"Start","job_type":"Collection"}}"#; + let path = "./tmp"; + create_dirs(path).await.unwrap(); + parse_command(data, path).await.unwrap(); + } + + #[tokio::test] + #[should_panic(expected = "Error")] + async fn test_parse_command_bad_data() { + let data = r#"{"asdfasdf"}"#; + let path = "./tmp"; + parse_command(data, path).await.unwrap(); + } + + #[tokio::test] + async fn test_quick_jobs() { + let data = r#"{"targets":["0998b365-b60d-4c0c-a629-f631afa83d2c"],"job":{"id":1,"name":"processes","created":10000,"started":10001,"finished":20000,"status":"NotStarted","collection":"adssafasdfsadfs==","duration":10,"start_time":100,"action":"Start","job_type":"Processes"}}"#; + let mut test = HashMap::new(); + let (client_send, mut client_recv) = mpsc::channel(5); + test.insert( + String::from("0998b365-b60d-4c0c-a629-f631afa83d2c"), + client_send.clone(), + ); + + let _send_task = tokio::spawn(async move { + while let Some(_msg) = client_recv.recv().await { + client_send + .send(Message::Text(data.to_string())) + .await + .unwrap(); + } + }); + + let rw = RwLock::new(test); + + quick_jobs(data, &rw.read().await).await.unwrap(); + } +} diff --git a/server/src/socket/heartbeat.rs b/server/src/socket/heartbeat.rs index 94f04336..e0f22ca3 100644 --- a/server/src/socket/heartbeat.rs +++ b/server/src/socket/heartbeat.rs @@ -1,9 +1,12 @@ -use crate::artifacts::sockets::{Heartbeat, Pulse}; +use crate::{ + artifacts::sockets::{Heartbeat, Pulse}, + utils::filesystem::{append_file, write_file}, +}; use log::error; use serde_json::Error; /// Parse a heartbeat from a system. Heartbeat occurs every 300 seconds -pub(crate) fn parse_heartbeat(data: &str, ip: &str) -> String { +pub(crate) async fn parse_heartbeat(data: &str, ip: &str, endpoint_path: &str) -> String { let beat_result: Result = serde_json::from_str(data); let beat = match beat_result { Ok(result) => result, @@ -12,11 +15,22 @@ pub(crate) fn parse_heartbeat(data: &str, ip: &str) -> String { return String::new(); } }; + + // Heartbeat.json size limit is 10MB + let beat_size_limit = 10485760; + let path = format!("{endpoint_path}/{}/heartbeat.jsonl", beat.endpoint_id); + let status = append_file(data, &path, &beat_size_limit).await; + if status.is_err() { + error!( + "[server] Could not update heartbeat.jsonl file from {ip}: {:?}", + status.unwrap_err() + ); + } beat.endpoint_id } /// Parse a pulse from a system. Pulse occurs every 30 seconds -pub(crate) fn parse_pulse(data: &str, ip: &str) -> String { +pub(crate) async fn parse_pulse(data: &str, ip: &str, endpoint_path: &str) -> String { let pulse_result: Result = serde_json::from_str(data); let pulse = match pulse_result { Ok(result) => result, @@ -25,28 +39,60 @@ pub(crate) fn parse_pulse(data: &str, ip: &str) -> String { return String::new(); } }; + + let path = format!("{endpoint_path}/{}/pulse.json", pulse.endpoint_id); + let status = write_file(data.as_bytes(), &path, false).await; + if status.is_err() { + error!( + "[server] Could not update pulse.json file from {ip}: {:?}", + status.unwrap_err() + ); + } pulse.endpoint_id } #[cfg(test)] mod tests { use super::parse_heartbeat; - use crate::socket::heartbeat::parse_pulse; + use crate::{socket::heartbeat::parse_pulse, utils::filesystem::create_dirs}; - #[test] - fn test_parse_heartbeat() { + #[tokio::test] + async fn test_parse_heartbeat() { let test = r#"{"endpoint_id":"randomkey","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}}"#; let ip = "127.0.0.1"; - let id = parse_heartbeat(test, ip); + let path = "./tmp"; + create_dirs(path).await.unwrap(); + let id = parse_heartbeat(test, ip, path).await; + assert_eq!(id, "randomkey"); + } + + #[tokio::test] + async fn test_parse_heartbeat_bad_path() { + let test = r#"{"endpoint_id":"randomkey","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}}"#; + let ip = "127.0.0.1"; + let path = "./tmp2"; + let id = parse_heartbeat(test, ip, path).await; + assert_eq!(id, "randomkey"); + } + + #[tokio::test] + async fn test_parse_pulse() { + let test = + r#"{"endpoint_id":"randomkey","pulse":true,"timestamp":1111111,"jobs_running":0}"#; + let path = "./tmp"; + create_dirs(path).await.unwrap(); + let ip = "127.0.0.1"; + let id = parse_pulse(test, ip, path).await; assert_eq!(id, "randomkey"); } - #[test] - fn test_parse_pulse() { + #[tokio::test] + async fn test_parse_pulse_bad_path() { let test = r#"{"endpoint_id":"randomkey","pulse":true,"timestamp":1111111,"jobs_running":0}"#; + let path = "./tmp2"; let ip = "127.0.0.1"; - let id = parse_pulse(test, ip); + let id = parse_pulse(test, ip, path).await; assert_eq!(id, "randomkey"); } } diff --git a/server/src/socket/mod.rs b/server/src/socket/mod.rs index 078c47e0..247dba4b 100644 --- a/server/src/socket/mod.rs +++ b/server/src/socket/mod.rs @@ -1,3 +1,4 @@ +pub(crate) mod command; mod heartbeat; pub(crate) mod uris; pub(crate) mod websocket; diff --git a/server/src/socket/uris.rs b/server/src/socket/uris.rs index e968d5aa..276c7169 100644 --- a/server/src/socket/uris.rs +++ b/server/src/socket/uris.rs @@ -10,12 +10,13 @@ pub(crate) fn socket_routes(base: &str) -> Router { #[cfg(test)] mod tests { use super::socket_routes; - use crate::{db::tables::setup_db, server::ServerState, utils::config::read_config}; + use crate::{server::ServerState, utils::config::read_config}; use axum::{ body::Body, http::{Method, Request, StatusCode}, }; - use std::path::PathBuf; + use std::{collections::HashMap, path::PathBuf, sync::Arc}; + use tokio::sync::RwLock; use tower::util::ServiceExt; #[tokio::test] @@ -26,23 +27,15 @@ mod tests { let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_location.push("tests/test_data/server.toml"); - let config = read_config(&test_location.display().to_string()).unwrap(); - let endpointdb = setup_db(&format!( - "{}/endpoints.redb", - &config.endpoint_server.storage - )) - .unwrap(); - - let jobdb = setup_db(&format!("{}/jobs.redb", &config.endpoint_server.storage)).unwrap(); + let config = read_config(&test_location.display().to_string()) + .await + .unwrap(); - let state_server = ServerState { - config, - endpoint_db: endpointdb, - job_db: jobdb, - }; + let command = Arc::new(RwLock::new(HashMap::new())); + let server_state = ServerState { config, command }; let res = route - .with_state(state_server) + .with_state(server_state) .oneshot( Request::builder() .method(Method::GET) diff --git a/server/src/socket/websocket.rs b/server/src/socket/websocket.rs index d0d5c98e..391fe70d 100644 --- a/server/src/socket/websocket.rs +++ b/server/src/socket/websocket.rs @@ -1,87 +1,205 @@ +use super::command::parse_command; use super::heartbeat::{parse_heartbeat, parse_pulse}; -use crate::db::jobs::get_jobs; use crate::enrollment::enroll::verify_enrollment; +use crate::filestore::jobs::get_jobs; use crate::server::ServerState; +use crate::socket::command::quick_jobs; use axum::extract::ws::{Message, WebSocket}; use axum::extract::{ConnectInfo, State, WebSocketUpgrade}; use axum::response::IntoResponse; use futures::{SinkExt, StreamExt}; use log::{error, warn}; -use redb::Database; +use std::collections::HashMap; use std::ops::ControlFlow::Continue; -use std::sync::Arc; use std::{net::SocketAddr, ops::ControlFlow}; +use tokio::sync::mpsc; -/// Accept `Web Sockets` +/// Accept websockets pub(crate) async fn socket_connection( - State(state): State, socket: WebSocketUpgrade, + State(state): State, ConnectInfo(addr): ConnectInfo, ) -> impl IntoResponse { - socket.on_upgrade(move |ws| handle_socket(ws, addr, state.endpoint_db, state.job_db)) + socket.on_upgrade(move |ws| handle_socket(ws, addr, state)) } -/// Process the `Web Socket` -async fn handle_socket( - socket: WebSocket, - addr: SocketAddr, - endpoint_db: Arc, - job_db: Arc, -) { +/// Parse all websocket communications +async fn handle_socket(socket: WebSocket, addr: SocketAddr, state: ServerState) { let (mut sender, mut receiver) = socket.split(); + let storage_path = state.config.endpoint_server.storage.clone(); + + let mut message_source = MessageSource::None; + let mut socket_message = String::new(); + /* + * When a system first connects over websockets we need to determine source of message: + * 1. MessageSource::Client - Socket connection is remote system + * 2. MessageSource::Server - Socket connection local server + * + * MessageSource::Client will return the endpoint_id as the `socket_message` + * MessageSource::Server will return the entire message as the `socket_message` + */ + while let Some(Ok(message)) = receiver.next().await { + let control = parse_message(&message, &addr, &storage_path).await; + if control.is_break() { + break; + } + + if let Continue((message, source)) = control { + message_source = source; + socket_message = message; + break; + } + } + // If the message is None, don't setup any async tasks. We should only + if message_source == MessageSource::None { + warn!("[server] Unexpected message source none"); + return; + } + + /* + * Register new endpoint clients and use ID to track channels to send websocket commands from server + * When client system first checks in setup an async task and channel. + * This async task will be used to communicate with single client. This task is stored in a shared state (HashMap) to track individual clients + */ + if message_source == MessageSource::Client + && state.command.read().await.get(&socket_message).is_none() + { + let (client_send, mut client_recv) = mpsc::channel(50); + + let _send_task = tokio::spawn(async move { + while let Some(msg) = client_recv.recv().await { + // If any websocket error, break loop. + let result = sender.send(msg).await; + if result.is_err() { + error!( + "[server] Could not send server message: {:?}", + result.unwrap_err() + ); + break; + } + } + }); + + // Register sender associated with endpoint client. Tracked via Endpoint ID + state + .command + .write() + .await + .insert(socket_message.clone(), client_send); + } - let _receive_task = tokio::spawn(async move { + /* + * After we have registerd an async task for a client. Spawn another task to receive websocket data. + * Types of websocket data: + * - Server commands + * - Client heartbeat + * - Client pulse + */ + let _recv_task = tokio::spawn(async move { while let Some(Ok(message)) = receiver.next().await { - let control = parse_message(&message, &addr, &endpoint_db); + // Parse the websocket data + let control = parse_message(&message, &addr, &storage_path).await; + // If the client disconnects from us, we need to remove from our tracker. We can no longer send commands from server if control.is_break() { + state.command.write().await.remove(&socket_message); break; } - if let Continue(id) = control { - let jobs_result = get_jobs(&id, &job_db); + if let Continue((socket_data, source)) = control { + if source == MessageSource::None { + continue; + } + + // If the source is the Server then the socket_data contains a command to be sent the client + if source == MessageSource::Server { + let send_result = quick_jobs(&socket_data, &state.command.read().await).await; + if send_result.is_err() { + error!( + "[server] Could not issue quick job command: {:?}", + send_result.unwrap_err() + ); + } + continue; + } + + /* + * The message source should now be Client. socket_data = endpoint_id + * The function `parse_message` already handles the heartbeat and pulse data + * + * At this point we just need the endpoint_id to check for collection jobs + */ + let endpoint_path = format!("{storage_path}/{socket_data}"); + let jobs_result = get_jobs(&endpoint_path).await; let jobs = match jobs_result { Ok(result) => result, Err(err) => { - error!("[server] Could not get jobs using ID {id}: {err:?}"); - Vec::new() + error!("[server] Could not get jobs using ID {socket_data}: {err:?}"); + HashMap::new() } }; + + // Serialize the available collection jobs if any let serde_result = serde_json::to_string(&jobs); let serde_value = match serde_result { Ok(result) => result, Err(err) => { - error!("[server] Could not serialize jobs for {id}: {err:?}"); + error!("[server] Could not serialize jobs for {socket_data}: {err:?}"); continue; } }; - let send_result = sender.send(Message::Text(serde_value)).await; - - if send_result.is_err() { - error!( - "[server] Could not send jobs to ID {id}: {:?}", - send_result.unwrap_err() - ); + // Get the registered socket sender associated with the registered async task for the Client. + // This was registered when the client first checked into the server + if let Some(client_send) = state.command.read().await.get(&socket_data) { + // Only send message if we found the sender associated with endpoint ID + let send_result = client_send.send(Message::Text(serde_value)).await; + if send_result.is_err() { + error!( + "[server] Could not send jobs to ID {socket_data}: {:?}", + send_result.unwrap_err() + ); + } } } } }); } -/// Parse `Web Socket` message -fn parse_message(message: &Message, addr: &SocketAddr, db: &Database) -> ControlFlow<(), String> { +#[derive(PartialEq, Debug)] +enum MessageSource { + Client, + Server, + None, +} + +/// Parse websocket message. Currently messages are either Server messages (commands) or client messages (heartbeat, pulse) +async fn parse_message( + message: &Message, + addr: &SocketAddr, + path: &str, +) -> ControlFlow<(), (String, MessageSource)> { let ip = addr.ip().to_string(); match message { Message::Text(data) => { - if !verify_enrollment(data, &ip, db) { + if data.contains("\"targets\":") && ip == "127.0.0.1" { + let command = parse_command(data, path).await; + if command.is_err() { + error!("[server] Could not parse the server command"); + return ControlFlow::Break(()); + } + // Send the command the to targets + return ControlFlow::Continue((data.to_string(), MessageSource::Server)); + } + + if verify_enrollment(data, &ip, path).is_err() { return ControlFlow::Break(()); } - if data.contains("\"heartbeat\"") { - let id = parse_heartbeat(data, &ip); - return ControlFlow::Continue(id); - } else if data.contains("\"pulse\"") { - let id = parse_pulse(data, &ip); - return ControlFlow::Continue(id); + if data.contains("\"heartbeat\":") { + let id = parse_heartbeat(data, &ip, path).await; + return ControlFlow::Continue((id, MessageSource::Client)); + } else if data.contains("\"pulse\":") { + let id = parse_pulse(data, &ip, path).await; + return ControlFlow::Continue((id, MessageSource::Client)); } } Message::Binary(_) => { @@ -93,33 +211,34 @@ fn parse_message(message: &Message, addr: &SocketAddr, db: &Database) -> Control Message::Ping(_data) | Message::Pong(_data) => {} } - ControlFlow::Continue(String::new()) + // For unsupported messages return None + ControlFlow::Continue((String::new(), MessageSource::None)) } #[cfg(test)] mod tests { use super::parse_message; - use crate::db::tables::setup_db; use crate::socket::websocket::Message::Text; + use crate::socket::websocket::MessageSource; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::ops::ControlFlow::Continue; use std::path::PathBuf; - #[test] - fn test_parse_message() { + #[tokio::test] + async fn test_parse_message() { let message = Text(String::from( r#"{"endpoint_id":"3482136c-3176-4272-9bd7-b79f025307d6","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}}"#, )); let address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8000); let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - test_location.push("tests/test_data/endpoints.redb"); + test_location.push("tests/test_data"); let path = test_location.display().to_string(); - let db = setup_db(&path).unwrap(); - let control = parse_message(&message, &address, &db); - if let Continue(value) = control { + let control = parse_message(&message, &address, &path).await; + if let Continue((value, source)) = control { assert_eq!(value, "3482136c-3176-4272-9bd7-b79f025307d6"); + assert_eq!(source, MessageSource::Client) } } } diff --git a/server/src/uploads/mod.rs b/server/src/uploads/mod.rs new file mode 100644 index 00000000..4b15d986 --- /dev/null +++ b/server/src/uploads/mod.rs @@ -0,0 +1,2 @@ +pub(crate) mod upload; +pub(crate) mod uris; diff --git a/server/src/uploads/upload.rs b/server/src/uploads/upload.rs new file mode 100644 index 00000000..2dc9f784 --- /dev/null +++ b/server/src/uploads/upload.rs @@ -0,0 +1,199 @@ +use crate::{ + artifacts::jobs::JobInfo, + filestore::jobs::update_job, + server::ServerState, + utils::{ + filesystem::{create_dirs, write_file}, + uuid::generate_uuid, + }, +}; +use axum::{ + extract::{Multipart, State}, + http::StatusCode, +}; +use log::{error, warn}; + +/// Process uploaded data +pub(crate) async fn upload_collection( + State(state): State, + mut multipart: Multipart, +) -> Result<(), StatusCode> { + let mut endpoint_id = String::new(); + let path = state.config.endpoint_server.storage; + + while let Some(field) = multipart.next_field().await.unwrap() { + let name = field.name().unwrap_or_default().to_string(); + + if name == "endpoint-id" { + endpoint_id = field.text().await.unwrap_or_default(); + } else if name == "job-info" { + let data = field.text().await.unwrap_or_default(); + let endpoint_path = format!("{path}/{endpoint_id}"); + update_job_file(&endpoint_path, &data).await?; + } else if name == "collection" { + let filename_option = field.file_name(); + let filename = if let Some(result) = filename_option { + result.to_string() + } else { + warn!("[server] Filename not provided in upload. Generated a random one!"); + format!("{}.jsonl.gz", generate_uuid()) + }; + + let data = field.bytes().await.unwrap_or_default(); + let endpoint_dir = format!("{path}/{endpoint_id}"); + write_collection(&endpoint_dir, &filename, &data).await?; + } + } + Ok(()) +} + +/// Update the Job DB using the uploaded job-info data +async fn update_job_file(path: &str, data: &str) -> Result<(), StatusCode> { + if path.is_empty() { + error!("[server] No endpoint path provided cannot update jobs.json"); + return Err(StatusCode::BAD_REQUEST); + } + + let job_result = serde_json::from_str(data); + let job: JobInfo = match job_result { + Ok(result) => result, + Err(err) => { + error!("[server] Cannot deserialize Job Info for Endpoint ID {path}: {err:?}"); + return Err(StatusCode::BAD_REQUEST); + } + }; + + let status = update_job(job, path).await; + if status.is_err() { + error!( + "[server] Could not update Job for {path}: {:?}", + status.unwrap_err() + ); + return Err(StatusCode::BAD_REQUEST); + } + + Ok(()) +} + +/// Write data to endpoint storage directory +async fn write_collection( + endpoint_dir: &str, + filename: &str, + data: &[u8], +) -> Result<(), StatusCode> { + // Endpoint storage directory should have been created upon enrollment. But check in case + let collections = format!("{endpoint_dir}/collections"); + let status = create_dirs(&collections).await; + if status.is_err() { + error!( + "[server] Could not create {collections} storage directory: {:?}", + status.unwrap_err() + ); + return Err(StatusCode::INTERNAL_SERVER_ERROR); + } + + // Only decompress data smaller than 2GB + let max_size = 2147483648; + if data.len() < max_size { + let decom_name = filename.trim_end_matches(".gz"); + let endpoint_path = format!("{collections}/{decom_name}"); + // Write the data to endpoint directory, but decompress first + let status = write_file(data, &endpoint_path, true).await; + if status.is_err() { + error!( + "[server] Could not write data to {endpoint_path} storage directory: {:?}", + status.unwrap_err() + ); + } else { + return Ok(()); + } + + warn!("[server] Could not decompress and write data to {collections}. Trying compressed data!"); + } + + let endpoint_path = format!("{collections}/{filename}"); + + // Write the compressed data to endpoint directory + let status = write_file(data, &endpoint_path, false).await; + if status.is_err() { + error!( + "[server] Could not write data to {endpoint_path} storage directory: {:?}", + status.unwrap_err() + ); + return Err(StatusCode::INTERNAL_SERVER_ERROR); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use crate::artifacts::jobs::{Action, JobInfo, JobType, Status}; + use crate::uploads::upload::write_collection; + use crate::utils::filesystem::{create_dirs, write_file}; + use crate::{ + uploads::upload::update_job_file, + utils::{config::read_config, uuid::generate_uuid}, + }; + use std::collections::HashMap; + use std::path::PathBuf; + + #[tokio::test] + async fn test_update_job_file() { + let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + test_location.push("tests/test_data/server.toml"); + create_dirs("./tmp/uploads").await.unwrap(); + + let mut value = JobInfo { + id: 1, + collection: String::from("asdfasdfasdfasd=="), + created: 1000, + started: 10001, + finished: 20000, + name: String::from("processes"), + status: Status::NotStarted, + duration: 0, + start_time: 0, + action: Action::Start, + job_type: JobType::Collection, + }; + + let mut jobs = HashMap::new(); + jobs.insert(1, value.clone()); + + write_file( + &serde_json::to_vec(&jobs).unwrap(), + "./tmp/uploads/jobs.json", + false, + ) + .await + .unwrap(); + + value.status = Status::Failed; + + update_job_file("./tmp/uploads", &serde_json::to_string(&value).unwrap()) + .await + .unwrap(); + } + + #[tokio::test] + async fn test_write_collction() { + let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + test_location.push("tests/test_data/server.toml"); + + let config = read_config(&test_location.display().to_string()) + .await + .unwrap(); + let endpoint_id = generate_uuid(); + + let path = format!("{}/{endpoint_id}", config.endpoint_server.storage); + let filename = "test.jsonl.gz"; + let data = [ + 31, 139, 8, 0, 89, 135, 7, 101, 0, 255, 5, 128, 177, 9, 0, 32, 16, 3, 87, 209, 27, 195, + 121, 20, 44, 2, 129, 111, 190, 16, 119, 15, 143, 123, 36, 179, 6, 237, 210, 158, 252, + 0, 132, 255, 53, 22, 19, 0, 0, 0, + ]; + + write_collection(&path, filename, &data).await.unwrap(); + } +} diff --git a/server/src/uploads/uris.rs b/server/src/uploads/uris.rs new file mode 100644 index 00000000..77809490 --- /dev/null +++ b/server/src/uploads/uris.rs @@ -0,0 +1,53 @@ +use super::upload::upload_collection; +use crate::server::ServerState; +use axum::{routing::post, Router}; + +/// Setup upload routes +pub(crate) fn upload_routes(base: &str) -> Router { + Router::new().route(&format!("{base}/upload"), post(upload_collection)) +} + +#[cfg(test)] +mod tests { + use crate::{server::ServerState, uploads::uris::upload_routes, utils::config::read_config}; + use axum::{ + body::Body, + http::{Method, Request, StatusCode}, + }; + use std::{collections::HashMap, path::PathBuf, sync::Arc}; + use tokio::sync::RwLock; + use tower::util::ServiceExt; + + #[tokio::test] + async fn test_upload_routes() { + let base = "/endpoint/v1"; + let route = upload_routes(base); + + let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + test_location.push("tests/test_data/server.toml"); + + let config = read_config(&test_location.display().to_string()) + .await + .unwrap(); + + let command = Arc::new(RwLock::new(HashMap::new())); + let server_state = ServerState { config, command }; + + let res = route + .with_state(server_state) + .oneshot( + Request::builder() + .method(Method::POST) + .uri(format!("{base}/upload")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(res.status(), StatusCode::BAD_REQUEST); + + let body = hyper::body::to_bytes(res.into_body()).await.unwrap(); + assert_eq!(body, "Invalid `boundary` for `multipart/form-data` request") + } +} diff --git a/server/src/utils/config.rs b/server/src/utils/config.rs index f56d313b..798b349a 100644 --- a/server/src/utils/config.rs +++ b/server/src/utils/config.rs @@ -50,8 +50,11 @@ pub(crate) fn generate_config() -> ArtemisConfig { } /// Compare and verify enrollment key against server TOML config -pub(crate) fn verify_enroll_key(key: &str, config_path: &str) -> Result { - let config = read_config(config_path)?; +pub(crate) async fn verify_enroll_key( + key: &str, + config_path: &str, +) -> Result { + let config = read_config(config_path).await?; if key != config.enroll_key { return Ok(false); @@ -60,14 +63,15 @@ pub(crate) fn verify_enroll_key(key: &str, config_path: &str) -> Result Result { - let config = read_config(config_path)?; +/// Return only the storage path from the server config +pub(crate) async fn storage_path(config_path: &str) -> Result { + let config = read_config(config_path).await?; Ok(config.endpoint_server.storage) } /// Read the server TOML config file -pub(crate) fn read_config(path: &str) -> Result { - let buffer = read_file(path)?; +pub(crate) async fn read_config(path: &str) -> Result { + let buffer = read_file(path).await?; let config_result = toml::from_str(from_utf8(&buffer).unwrap_or_default()); let config = match config_result { @@ -84,7 +88,7 @@ pub(crate) fn read_config(path: &str) -> Result #[cfg(test)] mod tests { use super::generate_config; - use crate::utils::config::{read_config, verify_enroll_key}; + use crate::utils::config::{read_config, storage_path, verify_enroll_key}; use std::path::PathBuf; #[test] @@ -93,22 +97,37 @@ mod tests { assert!(!result.metadata.name.is_empty()); } - #[test] - fn test_read_config() { + #[tokio::test] + async fn test_read_config() { let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_location.push("tests/test_data/server.toml"); - let result = read_config(&test_location.display().to_string()).unwrap(); + let result = read_config(&test_location.display().to_string()) + .await + .unwrap(); assert_eq!(result.enroll_key, "arandomkey"); assert_eq!(result.endpoint_server.address, "127.0.0.1") } - #[test] - fn test_verify_enroll_key() { + #[tokio::test] + async fn test_verify_enroll_key() { let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_location.push("tests/test_data/server.toml"); - let result = verify_enroll_key("arandomkey", &test_location.display().to_string()).unwrap(); + let result = verify_enroll_key("arandomkey", &test_location.display().to_string()) + .await + .unwrap(); assert!(result); } + + #[tokio::test] + async fn test_storage_path() { + let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + test_location.push("tests/test_data/server.toml"); + + let result = storage_path(&test_location.display().to_string()) + .await + .unwrap(); + assert_eq!(result, "./tmp"); + } } diff --git a/server/src/utils/error.rs b/server/src/utils/error.rs index 5306e364..27fad103 100644 --- a/server/src/utils/error.rs +++ b/server/src/utils/error.rs @@ -7,6 +7,7 @@ pub enum UtilServerError { NotFile, ReadFile, CreateDirectory, + GzipDecompress, } impl fmt::Display for UtilServerError { @@ -16,7 +17,8 @@ impl fmt::Display for UtilServerError { UtilServerError::BadToml => write!(f, "Failed to parse TOML data"), UtilServerError::NotFile => write!(f, "Not a file"), UtilServerError::ReadFile => write!(f, "Could not read file"), - UtilServerError::CreateDirectory => write!(f, "Could create directory"), + UtilServerError::CreateDirectory => write!(f, "Could not create directory"), + UtilServerError::GzipDecompress => write!(f, "Could decompress data"), } } } diff --git a/server/src/utils/filesystem.rs b/server/src/utils/filesystem.rs index 6d5b184d..01724ee3 100644 --- a/server/src/utils/filesystem.rs +++ b/server/src/utils/filesystem.rs @@ -1,9 +1,10 @@ use crate::utils::error::UtilServerError; -use log::error; -use std::{ - fs::{create_dir_all, read}, - path::Path, -}; +use flate2::bufread; +use log::{error, info}; +use std::path::Path; +use tokio::fs::{create_dir_all, read, OpenOptions}; +use tokio::io::Error; +use tokio::{fs::File, io::AsyncWriteExt}; /// Check if path is a file pub(crate) fn is_file(path: &str) -> bool { @@ -14,14 +15,33 @@ pub(crate) fn is_file(path: &str) -> bool { false } +/// Get size of a file +pub(crate) fn file_size(path: &str) -> u64 { + let file = Path::new(path); + if let Ok(value) = file.symlink_metadata() { + return value.len(); + } + + 0 +} + +/// Check if path is a directory +pub(crate) fn is_directory(path: &str) -> bool { + let file = Path::new(path); + if file.is_dir() { + return true; + } + false +} + /// Read a file into memory -pub(crate) fn read_file(path: &str) -> Result, UtilServerError> { +pub(crate) async fn read_file(path: &str) -> Result, UtilServerError> { // Verify provided path is a file if !is_file(path) { return Err(UtilServerError::NotFile); } - let read_result = read(path); + let read_result = read(path).await; match read_result { Ok(result) => Ok(result), Err(err) => { @@ -31,9 +51,46 @@ pub(crate) fn read_file(path: &str) -> Result, UtilServerError> { } } +/// Write data to a file asynchronously. Supports gzip decompression +pub(crate) async fn write_file(data: &[u8], path: &str, decompress: bool) -> Result<(), Error> { + // Decompression is synchronous + if decompress { + use std::{fs::File, io::copy}; + + let mut file = File::create(path)?; + let mut data = bufread::GzDecoder::new(data); + copy(&mut data, &mut file)?; + return Ok(()); + } + + let mut file = File::create(path).await?; + + file.write_all(data).await?; + + info!("[server] Wrote {} bytes to {path}", data.len()); + + Ok(()) +} + +/// Append a line to a file. Automatically adds a newline +pub(crate) async fn append_file(data: &str, path: &str, limit: &u64) -> Result<(), Error> { + let size = file_size(path); + let append_file = &size < limit; + + let mut file = OpenOptions::new() + .create(true) + .append(append_file) + .open(path) + .await?; + + file.write_all(format!("{data}\n").as_bytes()).await?; + + Ok(()) +} + /// Create a directory and all its parents -pub(crate) fn create_dirs(path: &str) -> Result<(), UtilServerError> { - let result = create_dir_all(path); +pub(crate) async fn create_dirs(path: &str) -> Result<(), UtilServerError> { + let result = create_dir_all(path).await; if result.is_err() { error!( "[server] Failed to directory {path}: {:?}", @@ -48,19 +105,48 @@ pub(crate) fn create_dirs(path: &str) -> Result<(), UtilServerError> { #[cfg(test)] mod tests { use super::read_file; - use crate::utils::filesystem::{create_dirs, is_file}; + use crate::utils::filesystem::{create_dirs, is_directory, is_file, write_file}; use std::path::PathBuf; - #[test] - fn test_read_file() { + #[tokio::test] + async fn test_read_file() { let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_location.push("tests/test_data/server.toml"); let config_path = test_location.display().to_string(); - let results = read_file(&config_path).unwrap(); + let results = read_file(&config_path).await.unwrap(); assert!(!results.is_empty()); } + #[tokio::test] + async fn test_write_file() { + create_dirs("./tmp").await.unwrap(); + + let test = b"hello world!"; + write_file(test, "./tmp/test", false).await.unwrap(); + } + + #[tokio::test] + async fn test_append_file() { + create_dirs("./tmp").await.unwrap(); + + let test = b"hello world!"; + write_file(test, "./tmp/test", false).await.unwrap(); + } + + #[tokio::test] + async fn test_write_file_decompress() { + let data = [ + 31, 139, 8, 0, 215, 132, 7, 101, 0, 255, 5, 128, 65, 9, 0, 0, 8, 3, 171, 104, 55, 5, + 31, 7, 131, 125, 172, 63, 110, 65, 245, 50, 211, 1, 109, 194, 180, 3, 12, 0, 0, 0, + ]; + create_dirs("./tmp").await.unwrap(); + + let path = "./tmp/data.txt"; + + write_file(&data, &path, true).await.unwrap(); + } + #[test] fn test_is_file() { let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); @@ -72,7 +158,17 @@ mod tests { } #[test] - fn test_create_dirs() { - create_dirs(&"./tmp/atest").unwrap(); + fn test_is_directory() { + let mut test_location = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + test_location.push("tests/test_data"); + let config_path = test_location.display().to_string(); + let results = is_directory(&config_path); + + assert!(results); + } + + #[tokio::test] + async fn test_create_dirs() { + create_dirs(&"./tmp/atest").await.unwrap(); } } diff --git a/server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/enroll.json b/server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/enroll.json new file mode 100644 index 00000000..c09064b8 --- /dev/null +++ b/server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/enroll.json @@ -0,0 +1,8 @@ +{ + "hostname": "hello", + "platform": "linux", + "tags": [], + "notes": [], + "checkin": 1695086319, + "id": "3482136c-3176-4272-9bd7-b79f025307d6" +} \ No newline at end of file diff --git a/server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/heartbeat.jsonl b/server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/heartbeat.jsonl new file mode 100644 index 00000000..c8c9b905 --- /dev/null +++ b/server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/heartbeat.jsonl @@ -0,0 +1,10 @@ +{"endpoint_id":"3482136c-3176-4272-9bd7-b79f025307d6","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}} +{"endpoint_id":"3482136c-3176-4272-9bd7-b79f025307d6","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}} +{"endpoint_id":"3482136c-3176-4272-9bd7-b79f025307d6","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}} +{"endpoint_id":"3482136c-3176-4272-9bd7-b79f025307d6","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}} +{"endpoint_id":"3482136c-3176-4272-9bd7-b79f025307d6","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}} +{"endpoint_id":"3482136c-3176-4272-9bd7-b79f025307d6","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}} +{"endpoint_id":"3482136c-3176-4272-9bd7-b79f025307d6","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}} +{"endpoint_id":"3482136c-3176-4272-9bd7-b79f025307d6","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}} +{"endpoint_id":"3482136c-3176-4272-9bd7-b79f025307d6","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}} +{"endpoint_id":"3482136c-3176-4272-9bd7-b79f025307d6","hostname":"hello","platform":"Darwin","boot_time":0,"os_version":"12.0","uptime":110,"kernel_version":"12.1","heartbeat":true,"timestamp":1111111,"jobs_running":0,"cpu":[{"frequency":0,"cpu_usage":25.70003890991211,"name":"1","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":25.076454162597656,"name":"2","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":8.922499656677246,"name":"3","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":6.125399112701416,"name":"4","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":4.081260681152344,"name":"5","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":3.075578451156616,"name":"6","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":2.0113024711608887,"name":"7","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.5097296237945557,"name":"8","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.288386583328247,"name":"9","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10},{"frequency":0,"cpu_usage":1.1674108505249023,"name":"10","vendor_id":"Apple","brand":"Apple M1 Max","physical_core_count":10}],"disks":[{"disk_type":"SSD","file_system":"97112102115","mount_point":"/","total_space":494384795648 ,"available_space":295755320592 ,"removable":false},{"disk_type":"SSD","file_system":"97112102115","mount_point":"/System/Volumes/Data","total_space":494384795648 ,"available_space":295755320592 ,"removable":false}],"memory":{"available_memory":20146110464 ,"free_memory":6238076928 ,"free_swap":0,"total_memory":34359738368 ,"total_swap":0,"used_memory":18717523968 ,"used_swap":0}} diff --git a/server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/jobs.json b/server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/jobs.json new file mode 100644 index 00000000..a210b730 --- /dev/null +++ b/server/tests/test_data/3482136c-3176-4272-9bd7-b79f025307d6/jobs.json @@ -0,0 +1,15 @@ +{ + "1": { + "id": 1, + "name": "randomjob", + "created": 10, + "started": 0, + "finished": 0, + "status": "NotStarted", + "duration": 0, + "start_time": 0, + "action": "Start", + "job_type": "Collection", + "collection": "c3lzdGVtID0gIndpbmRvd3MiCgpbb3V0cHV0XQpuYW1lID0gInByZWZldGNoX2NvbGxlY3Rpb24iCmRpcmVjdG9yeSA9ICIuL3RtcCIKZm9ybWF0ID0gImpzb24iCmNvbXByZXNzID0gZmFsc2UKZW5kcG9pbnRfaWQgPSAiNmM1MWIxMjMtMTUyMi00NTcyLTlmMmEtMGJkNWFiZDgxYjgyIgpjb2xsZWN0aW9uX2lkID0gMQpvdXRwdXQgPSAibG9jYWwiCgpbW2FydGlmYWN0c11dCmFydGlmYWN0X25hbWUgPSAicHJlZmV0Y2giClthcnRpZmFjdHMucHJlZmV0Y2hdCmFsdF9kcml2ZSA9ICdDJwo=" + } +} \ No newline at end of file diff --git a/server/tests/test_data/jobs.redb b/server/tests/test_data/jobs.redb index e6458a7173901087bd9c66c26ef3b09124d883ce..8369b8408405f930b072a30274d073ad1946cb6a 100644 GIT binary patch delta 847 zcmZo@aB65soFK!-$jHFJz&KG+ZlVK+1T&Bg0twRjC+y})+LZ55X>>|UtDmSSA;|)f zVko)w#WjA3-#gn`=Q4wj|NhQ0QBmGNer?~}mD6YFYE=7W&F~1g3p5pEE(04>)1mY2 z#^;2Yr-%ek+PwWKQ!L0pcBq`Y`sqzN^O~itA8i+2`}C(a8_+<3-E;D_4*S{K*qmLc zddls^vF(P+EC=`|3Ggs6GEU|b_{8{cGov6g2jlx@Mak`ol8lf4PZkiAna(1~B%%24 zKLj)YEr7A1LjM^QfMP(c|Cw1C7&w4TMalpFr#r|pi)@#WW8q<%o*>W6u~}8}Pd!MG zb9?T5=ISbfCjJGRINd>!g=4#b9E$=g*n2$w$I zT5M)5R+QwK7Qi7S0Wtvu4yi``)`(mawdVBnk2#n3>Oov3ZUsyvu(SYC$iTqFz_{6w zQJ#6bl`+Q&9swo>m~;LD72jh8JBJ(Q90Mh*>46J4yxH6`^D>JwwqJ1I=wRg0!SEh- zX9B&f#5HXKhm^qg*4aBP%etE%U2xL;l)U{k#C`H0_ra1K!hJALF$3Kx4tD1;p0*7f tZ5ue-HgL6V;BMOhec_vPjH{dOhnX~kmwo}2JyI*f!d&$Yj$iN6PgCvu9fhGwE%)7+4 z`^e`!`x#fC^*=CA*&e9Oa)57=01s0u!(={zPmI4dGYT?u2(JH+3f?yhN^TdFWPJaB zI*TNe#Pk*=78w=>1qO!2jDjpI3=A9$Kn2qsWSK>H|3P&!2rw}G+ny-P%+AF4Z?mA} z?|P6t=l0zB%+*z>226KQWZ~E@AjhJ>%DBEA=p>-CSRekM9KfhBy+WCTr`=AO4T#yd z+bMG#r zi>-n93;T4T01h=96H|j^BMXxh-IS!nWL=X)V+-BHlw?C)vm}G0WDCROv_zA{#G=H! zl>FT6)XJ>vFjROrrX_F) zNx&Sx=G6a}hZbvg_`jOBT|VTQ#da+Nj%)ne3?#1bY!(pIVV)Mi!J;b%^o9x$YXb2j zU|66#jYzBIc)(VJQxedm#nRh(RX?>ol>F`Vr`~n;1CmdvCPTi^Xkek zeo@*M+ZE37faGrQhcPoa05y66aR3nO0z=jah+hMfCFLPXbO?%awQb;N+rZhj0m$HP R+rZPdfwye~-?9z-7XTO~4ZQ#W