From 4b62c8968f2307e4b2d4e37fd6182c3d6cd9c83c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Bj=C3=A4reholt?= Date: Sat, 11 May 2024 13:42:14 +0200 Subject: [PATCH] fix: fixed compilation warnings & deprecations --- aw-client-rust/src/blocking.rs | 1 - aw-client-rust/src/lib.rs | 1 - aw-datastore/src/datastore.rs | 22 +++------------- aw-query/src/datatype.rs | 1 - aw-query/src/functions.rs | 3 --- aw-server/src/endpoints/bucket.rs | 42 +++++++++++++++---------------- aw-sync/src/sync.rs | 4 +++ aw-sync/src/sync_wrapper.rs | 1 - aw-sync/src/util.rs | 1 - 9 files changed, 29 insertions(+), 47 deletions(-) diff --git a/aw-client-rust/src/blocking.rs b/aw-client-rust/src/blocking.rs index 7d6b6a97..4e765fac 100644 --- a/aw-client-rust/src/blocking.rs +++ b/aw-client-rust/src/blocking.rs @@ -1,5 +1,4 @@ use std::future::Future; -use std::vec::Vec; use std::{collections::HashMap, error::Error}; use chrono::{DateTime, Utc}; diff --git a/aw-client-rust/src/lib.rs b/aw-client-rust/src/lib.rs index ed00606d..624bad8d 100644 --- a/aw-client-rust/src/lib.rs +++ b/aw-client-rust/src/lib.rs @@ -7,7 +7,6 @@ extern crate tokio; pub mod blocking; -use std::vec::Vec; use std::{collections::HashMap, error::Error}; use chrono::{DateTime, Utc}; diff --git a/aw-datastore/src/datastore.rs b/aw-datastore/src/datastore.rs index 3ace2278..c33a3021 100644 --- a/aw-datastore/src/datastore.rs +++ b/aw-datastore/src/datastore.rs @@ -2,8 +2,6 @@ use std::collections::HashMap; use chrono::DateTime; use chrono::Duration; -use chrono::NaiveDateTime; -use chrono::TimeZone; use chrono::Utc; use rusqlite::Connection; @@ -233,10 +231,7 @@ impl DatastoreInstance { Some(starttime_ns) => { let seconds: i64 = starttime_ns / 1_000_000_000; let subnanos: u32 = (starttime_ns % 1_000_000_000) as u32; - Some(TimeZone::from_utc_datetime( - &Utc, - &NaiveDateTime::from_timestamp_opt(seconds, subnanos).unwrap(), - )) + Some(DateTime::from_timestamp(seconds, subnanos).unwrap()) } None => None, }; @@ -246,10 +241,7 @@ impl DatastoreInstance { Some(endtime_ns) => { let seconds: i64 = endtime_ns / 1_000_000_000; let subnanos: u32 = (endtime_ns % 1_000_000_000) as u32; - Some(TimeZone::from_utc_datetime( - &Utc, - &NaiveDateTime::from_timestamp_opt(seconds, subnanos).unwrap(), - )) + Some(DateTime::from_timestamp(seconds, subnanos).unwrap()) } None => None, }; @@ -689,10 +681,7 @@ impl DatastoreInstance { Ok(Event { id: Some(id), - timestamp: TimeZone::from_utc_datetime( - &Utc, - &NaiveDateTime::from_timestamp_opt(time_seconds, time_subnanos).unwrap(), - ), + timestamp: DateTime::from_timestamp(time_seconds, time_subnanos).unwrap(), duration: Duration::nanoseconds(duration_ns), data, }) @@ -784,10 +773,7 @@ impl DatastoreInstance { Ok(Event { id: Some(id), - timestamp: TimeZone::from_utc_datetime( - &Utc, - &NaiveDateTime::from_timestamp_opt(time_seconds, time_subnanos).unwrap(), - ), + timestamp: DateTime::from_timestamp(time_seconds, time_subnanos).unwrap(), duration: Duration::nanoseconds(duration_ns), data, }) diff --git a/aw-query/src/datatype.rs b/aw-query/src/datatype.rs index a1162197..c31355dd 100644 --- a/aw-query/src/datatype.rs +++ b/aw-query/src/datatype.rs @@ -1,5 +1,4 @@ use std::collections::HashMap; -use std::convert::{TryFrom, TryInto}; use std::fmt; use super::functions; diff --git a/aw-query/src/functions.rs b/aw-query/src/functions.rs index c060b9a6..4a52c4a2 100644 --- a/aw-query/src/functions.rs +++ b/aw-query/src/functions.rs @@ -116,9 +116,6 @@ pub fn fill_env(env: &mut VarEnv) { } mod qfunctions { - use std::convert::TryFrom; - use std::convert::TryInto; - use aw_datastore::Datastore; use aw_models::Event; use aw_transform::classify::Rule; diff --git a/aw-server/src/endpoints/bucket.rs b/aw-server/src/endpoints/bucket.rs index a3ed96e2..b53d12af 100644 --- a/aw-server/src/endpoints/bucket.rs +++ b/aw-server/src/endpoints/bucket.rs @@ -30,7 +30,7 @@ pub fn buckets_get( #[get("/")] pub fn bucket_get( - bucket_id: String, + bucket_id: &str, state: &State, ) -> Result, HttpErrorJson> { let datastore = endpoints_get_lock!(state.datastore); @@ -46,13 +46,13 @@ pub fn bucket_get( /// This is useful for watchers which are known/assumed to run locally but might not know their hostname (like aw-watcher-web). #[post("/", data = "", format = "application/json")] pub fn bucket_new( - bucket_id: String, + bucket_id: &str, message: Json, state: &State, ) -> Result<(), HttpErrorJson> { let mut bucket = message.into_inner(); if bucket.id != bucket_id { - bucket.id = bucket_id; + bucket.id = bucket_id.to_string(); } if bucket.hostname == "!local" { bucket.hostname = gethostname() @@ -72,7 +72,7 @@ pub fn bucket_new( #[get("//events?&&")] pub fn bucket_events_get( - bucket_id: String, + bucket_id: &str, start: Option, end: Option, limit: Option, @@ -104,7 +104,7 @@ pub fn bucket_events_get( None => None, }; let datastore = endpoints_get_lock!(state.datastore); - let res = datastore.get_events(&bucket_id, starttime, endtime, limit); + let res = datastore.get_events(bucket_id, starttime, endtime, limit); match res { Ok(events) => Ok(Json(events)), Err(err) => Err(err.into()), @@ -115,13 +115,13 @@ pub fn bucket_events_get( // See: https://api.rocket.rs/master/rocket/struct.Route.html#resolving-collisions #[get("//events/?<_unused..>")] pub fn bucket_events_get_single( - bucket_id: String, + bucket_id: &str, event_id: i64, _unused: Option, state: &State, ) -> Result, HttpErrorJson> { let datastore = endpoints_get_lock!(state.datastore); - let res = datastore.get_event(&bucket_id, event_id); + let res = datastore.get_event(bucket_id, event_id); match res { Ok(events) => Ok(Json(events)), Err(err) => Err(err.into()), @@ -130,12 +130,12 @@ pub fn bucket_events_get_single( #[post("//events", data = "", format = "application/json")] pub fn bucket_events_create( - bucket_id: String, + bucket_id: &str, events: Json>, state: &State, ) -> Result>, HttpErrorJson> { let datastore = endpoints_get_lock!(state.datastore); - let res = datastore.insert_events(&bucket_id, &events); + let res = datastore.insert_events(bucket_id, &events); match res { Ok(events) => Ok(Json(events)), Err(err) => Err(err.into()), @@ -148,14 +148,14 @@ pub fn bucket_events_create( format = "application/json" )] pub fn bucket_events_heartbeat( - bucket_id: String, + bucket_id: &str, heartbeat_json: Json, pulsetime: f64, state: &State, ) -> Result, HttpErrorJson> { let heartbeat = heartbeat_json.into_inner(); let datastore = endpoints_get_lock!(state.datastore); - match datastore.heartbeat(&bucket_id, heartbeat, pulsetime) { + match datastore.heartbeat(bucket_id, heartbeat, pulsetime) { Ok(e) => Ok(Json(e)), Err(err) => Err(err.into()), } @@ -163,11 +163,11 @@ pub fn bucket_events_heartbeat( #[get("//events/count")] pub fn bucket_event_count( - bucket_id: String, + bucket_id: &str, state: &State, ) -> Result, HttpErrorJson> { let datastore = endpoints_get_lock!(state.datastore); - let res = datastore.get_event_count(&bucket_id, None, None); + let res = datastore.get_event_count(bucket_id, None, None); match res { Ok(eventcount) => Ok(Json(eventcount as u64)), Err(err) => Err(err.into()), @@ -176,12 +176,12 @@ pub fn bucket_event_count( #[delete("//events/")] pub fn bucket_events_delete_by_id( - bucket_id: String, + bucket_id: &str, event_id: i64, state: &State, ) -> Result<(), HttpErrorJson> { let datastore = endpoints_get_lock!(state.datastore); - match datastore.delete_events_by_id(&bucket_id, vec![event_id]) { + match datastore.delete_events_by_id(bucket_id, vec![event_id]) { Ok(_) => Ok(()), Err(err) => Err(err.into()), } @@ -189,31 +189,31 @@ pub fn bucket_events_delete_by_id( #[get("//export")] pub fn bucket_export( - bucket_id: String, + bucket_id: &str, state: &State, ) -> Result { let datastore = endpoints_get_lock!(state.datastore); let mut export = BucketsExport { buckets: HashMap::new(), }; - let mut bucket = match datastore.get_bucket(&bucket_id) { + let mut bucket = match datastore.get_bucket(bucket_id) { Ok(bucket) => bucket, Err(err) => return Err(err.into()), }; /* TODO: Replace expect with http error */ let events = datastore - .get_events(&bucket_id, None, None, None) + .get_events(bucket_id, None, None, None) .expect("Failed to get events for bucket"); bucket.events = Some(TryVec::new(events)); - export.buckets.insert(bucket_id, bucket); + export.buckets.insert(bucket_id.into(), bucket); Ok(export.into()) } #[delete("/")] -pub fn bucket_delete(bucket_id: String, state: &State) -> Result<(), HttpErrorJson> { +pub fn bucket_delete(bucket_id: &str, state: &State) -> Result<(), HttpErrorJson> { let datastore = endpoints_get_lock!(state.datastore); - match datastore.delete_bucket(&bucket_id) { + match datastore.delete_bucket(bucket_id) { Ok(_) => Ok(()), Err(err) => Err(err.into()), } diff --git a/aw-sync/src/sync.rs b/aw-sync/src/sync.rs index 405118a9..228dc471 100644 --- a/aw-sync/src/sync.rs +++ b/aw-sync/src/sync.rs @@ -282,6 +282,10 @@ pub fn sync_datastores( let bucket_to = get_or_create_sync_bucket(&bucket_from, ds_to, is_push); sync_one(ds_from, ds_to, bucket_from, bucket_to); } + + // Close + ds_from.close(); + ds_to.close(); } /// Syncs a single bucket from one datastore to another diff --git a/aw-sync/src/sync_wrapper.rs b/aw-sync/src/sync_wrapper.rs index 4e786e4f..b83f90bf 100644 --- a/aw-sync/src/sync_wrapper.rs +++ b/aw-sync/src/sync_wrapper.rs @@ -1,4 +1,3 @@ -use std::boxed::Box; use std::error::Error; use std::fs; use std::net::TcpStream; diff --git a/aw-sync/src/util.rs b/aw-sync/src/util.rs index dde96555..05ad88d3 100644 --- a/aw-sync/src/util.rs +++ b/aw-sync/src/util.rs @@ -1,4 +1,3 @@ -use std::boxed::Box; use std::error::Error; use std::ffi::OsStr; use std::fs;