From b18f84a7b1c0bebcc8611111ecfe8d085636e51e Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Tue, 14 Nov 2023 15:46:22 +0100 Subject: [PATCH 01/25] feat(data-formats): move StoredItem from rendezvous The `StoredItem` struct is no longer needed just in the rendezvous server, we are making it public and moving it to the data-formats lib so that it can be easily included in other modules. Signed-off-by: Irene Diez --- data-formats/src/lib.rs | 1 + data-formats/src/serializable.rs | 35 ++++++++++++++++++++++ rendezvous-server/src/handlers_to0.rs | 3 +- rendezvous-server/src/main.rs | 42 +-------------------------- 4 files changed, 38 insertions(+), 43 deletions(-) diff --git a/data-formats/src/lib.rs b/data-formats/src/lib.rs index c11495fcf..ea34168fd 100644 --- a/data-formats/src/lib.rs +++ b/data-formats/src/lib.rs @@ -22,6 +22,7 @@ pub mod cborparser; mod serializable; pub use serializable::DeserializableMany; pub use serializable::Serializable; +pub use serializable::StoredItem; pub fn interoperable_kdf_available() -> bool { #[cfg(feature = "use_noninteroperable_kdf")] diff --git a/data-formats/src/serializable.rs b/data-formats/src/serializable.rs index 89ad9fbfd..5e9375163 100644 --- a/data-formats/src/serializable.rs +++ b/data-formats/src/serializable.rs @@ -1,3 +1,6 @@ +use crate::cborparser::{ParsedArray, ParsedArrayBuilder, ParsedArraySize2}; +use crate::publickey::PublicKey; +use crate::types::COSESign; use crate::Error; pub trait Serializable { @@ -85,3 +88,35 @@ where ciborium::ser::into_writer(self, &mut writer).map_err(Error::from) } } + +#[derive(Clone, Debug)] +pub struct StoredItem { + pub public_key: PublicKey, + pub to1d: COSESign, +} + +impl Serializable for StoredItem { + fn deserialize_from_reader(reader: R) -> Result + where + R: std::io::Read, + { + let contents: ParsedArray = ParsedArray::deserialize_from_reader(reader)?; + + let public_key = contents.get(0)?; + let to1d = contents.get(1)?; + + Ok(StoredItem { public_key, to1d }) + } + + fn serialize_to_writer(&self, writer: W) -> Result<(), Error> + where + W: std::io::Write, + { + let mut contents: ParsedArrayBuilder = ParsedArrayBuilder::new(); + contents.set(0, &self.public_key)?; + contents.set(1, &self.to1d)?; + let contents = contents.build(); + + contents.serialize_to_writer(writer) + } +} diff --git a/rendezvous-server/src/handlers_to0.rs b/rendezvous-server/src/handlers_to0.rs index c6a1adb00..d2d32f9b2 100644 --- a/rendezvous-server/src/handlers_to0.rs +++ b/rendezvous-server/src/handlers_to0.rs @@ -5,13 +5,12 @@ use fdo_data_formats::{ constants::ErrorCode, messages::Message, types::{Nonce, TO1DataPayload}, + StoredItem, }; use fdo_http_wrapper::server::Error; use fdo_http_wrapper::server::RequestInformation; -use super::StoredItem; - pub(super) async fn hello( _user_data: super::RendezvousUDT, mut ses_with_store: RequestInformation, diff --git a/rendezvous-server/src/main.rs b/rendezvous-server/src/main.rs index 37f0f9edf..8721dd7c4 100644 --- a/rendezvous-server/src/main.rs +++ b/rendezvous-server/src/main.rs @@ -5,53 +5,13 @@ use openssl::x509::X509; use tokio::signal::unix::{signal, SignalKind}; use warp::Filter; -use fdo_data_formats::{ - cborparser::{ParsedArray, ParsedArrayBuilder}, - enhanced_types::X5Bag, - publickey::PublicKey, - types::{COSESign, Guid}, - ProtocolVersion, Serializable, -}; +use fdo_data_formats::{enhanced_types::X5Bag, types::Guid, ProtocolVersion, StoredItem}; use fdo_store::Store; use fdo_util::servers::{configuration::rendezvous_server::RendezvousServerSettings, settings_for}; mod handlers_to0; mod handlers_to1; -#[derive(Clone, Debug)] -struct StoredItem { - public_key: PublicKey, - to1d: COSESign, -} - -impl Serializable for StoredItem { - fn deserialize_from_reader(reader: R) -> Result - where - R: std::io::Read, - { - let contents: ParsedArray = - ParsedArray::deserialize_from_reader(reader)?; - - let public_key = contents.get(0)?; - let to1d = contents.get(1)?; - - Ok(StoredItem { public_key, to1d }) - } - - fn serialize_to_writer(&self, writer: W) -> Result<(), fdo_data_formats::Error> - where - W: std::io::Write, - { - let mut contents: ParsedArrayBuilder = - ParsedArrayBuilder::new(); - contents.set(0, &self.public_key)?; - contents.set(1, &self.to1d)?; - let contents = contents.build(); - - contents.serialize_to_writer(writer) - } -} - #[derive(Debug, Clone, Copy)] #[non_exhaustive] enum RendezvousStoreMetadataKey {} From 0bdb99540d6e43f65c8f2dba7f16cf146aaa74ed Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Fri, 29 Sep 2023 17:51:13 +0200 Subject: [PATCH 02/25] feat: add DB initialization module This provides an interface defining the methods that we expect to be used for OV handling operations. Added an implementation for sqlite and postgress. Added db creation files and tests. Signed-off-by: Irene Diez --- Cargo.lock | 104 ++++ Cargo.toml | 2 + db/Cargo.toml | 22 + db/src/lib.rs | 120 ++++ db/src/models.rs | 88 +++ db/src/postgres.rs | 272 +++++++++ db/src/schema.rs | 24 + db/src/sqlite.rs | 530 ++++++++++++++++++ .../2023-10-03-152801_create_db/down.sql | 3 + .../2023-10-03-152801_create_db/up.sql | 7 + .../2023-10-03-152801_create_db/down.sql | 1 + .../2023-10-03-152801_create_db/up.sql | 5 + .../2023-10-03-152801_create_db/down.sql | 3 + .../2023-10-03-152801_create_db/up.sql | 8 + .../2023-10-03-152801_create_db/down.sql | 1 + .../2023-10-03-152801_create_db/up.sql | 6 + .../2023-10-03-152801_create_db/down.sql | 3 + .../2023-10-03-152801_create_db/up.sql | 7 + .../2023-10-03-152801_create_db/down.sql | 1 + .../2023-10-03-152801_create_db/up.sql | 5 + 20 files changed, 1212 insertions(+) create mode 100644 db/Cargo.toml create mode 100644 db/src/lib.rs create mode 100644 db/src/models.rs create mode 100644 db/src/postgres.rs create mode 100644 db/src/schema.rs create mode 100644 db/src/sqlite.rs create mode 100644 migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/down.sql create mode 100644 migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql create mode 100644 migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/down.sql create mode 100644 migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/up.sql create mode 100644 migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/down.sql create mode 100644 migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql create mode 100644 migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/down.sql create mode 100644 migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/up.sql create mode 100644 migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/down.sql create mode 100644 migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql create mode 100644 migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/down.sql create mode 100644 migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/up.sql diff --git a/Cargo.lock b/Cargo.lock index 314a2bc79..498bef8ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -666,6 +666,43 @@ dependencies = [ "bindgen 0.68.1", ] +[[package]] +name = "diesel" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d98235fdc2f355d330a8244184ab6b4b33c28679c0b4158f63138e51d6cf7e88" +dependencies = [ + "bitflags 2.4.0", + "byteorder", + "diesel_derives", + "itoa", + "libsqlite3-sys", + "pq-sys", + "r2d2", + "time", +] + +[[package]] +name = "diesel_derives" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e054665eaf6d97d1e7125512bb2d35d07c73ac86cc6920174cb42d1ab697a554" +dependencies = [ + "diesel_table_macro_syntax", + "proc-macro2", + "quote", + "syn 2.0.18", +] + +[[package]] +name = "diesel_table_macro_syntax" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" +dependencies = [ + "syn 2.0.18", +] + [[package]] name = "diff" version = "0.1.13" @@ -697,6 +734,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + [[package]] name = "either" version = "1.8.1" @@ -872,6 +915,17 @@ dependencies = [ "uuid", ] +[[package]] +name = "fdo-db" +version = "0.4.12" +dependencies = [ + "anyhow", + "diesel", + "fdo-data-formats", + "fdo-http-wrapper", + "openssl", +] + [[package]] name = "fdo-http-wrapper" version = "0.4.13" @@ -1638,6 +1692,16 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +[[package]] +name = "libsqlite3-sys" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" +dependencies = [ + "pkg-config", + "vcpkg", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -2163,6 +2227,15 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "pq-sys" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd" +dependencies = [ + "vcpkg", +] + [[package]] name = "pretty_assertions" version = "1.3.0" @@ -2213,6 +2286,17 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "r2d2" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" +dependencies = [ + "log", + "parking_lot", + "scheduled-thread-pool", +] + [[package]] name = "rand" version = "0.8.5" @@ -2404,6 +2488,15 @@ dependencies = [ "windows-sys 0.42.0", ] +[[package]] +name = "scheduled-thread-pool" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +dependencies = [ + "parking_lot", +] + [[package]] name = "scoped-tls" version = "1.0.1" @@ -2856,8 +2949,10 @@ version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" dependencies = [ + "itoa", "serde", "time-core", + "time-macros", ] [[package]] @@ -2866,6 +2961,15 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +[[package]] +name = "time-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +dependencies = [ + "time-core", +] + [[package]] name = "tinyvec" version = "1.6.0" diff --git a/Cargo.toml b/Cargo.toml index 6e42d3a88..a05cbe018 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "http-wrapper", "store", "util", + "db", "client-linuxapp", "owner-onboarding-server", @@ -24,6 +25,7 @@ default-members = [ "http-wrapper", "store", "util", + "db", "client-linuxapp", "owner-onboarding-server", diff --git a/db/Cargo.toml b/db/Cargo.toml new file mode 100644 index 000000000..b3837f9da --- /dev/null +++ b/db/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "fdo-db" +version = "0.4.12" +edition = "2021" + + +[dependencies] +anyhow = "1.0" +diesel = { version = "2.1.0", features = ["sqlite", "postgres", "r2d2"] } +dotenvy = "0.15" + +fdo-data-formats = { path = "../data-formats", version = "0.4.12" } + +[dev-dependencies] +fdo-http-wrapper = { path = "../http-wrapper", version = "0.4.12", features = ["server"] } +openssl = "0.10.55" + +[features] +postgres = [] +sqlite = [] + +default = ["postgres", "sqlite"] \ No newline at end of file diff --git a/db/src/lib.rs b/db/src/lib.rs new file mode 100644 index 000000000..949210da0 --- /dev/null +++ b/db/src/lib.rs @@ -0,0 +1,120 @@ +pub mod models; +#[cfg(feature = "postgres")] +pub mod postgres; +pub mod schema; +#[cfg(feature = "sqlite")] +pub mod sqlite; + +use anyhow::Result; +use diesel::r2d2::ConnectionManager; +use diesel::r2d2::Pool; + +use fdo_data_formats::ownershipvoucher::OwnershipVoucher as OV; +use fdo_data_formats::StoredItem; +use models::ManufacturerOV; +use models::OwnerOV; +use models::RendezvousOV; + +pub trait DBStoreManufacturer +where + T: diesel::r2d2::R2D2Connection + 'static, +{ + /// Gets a connection pool + fn get_conn_pool() -> Pool>; + + /// Gets a connection to the db + fn get_connection() -> T; + + /// Inserts an OV + fn insert_ov(ov: &OV, ttl: Option, conn: &mut T) -> Result<()>; + + /// Gets an OV + fn get_ov(guid: &str, conn: &mut T) -> Result; + + /// Deletes an OV + fn delete_ov(guid: &str, conn: &mut T) -> Result<()>; + + /// Deletes all OVs whose ttl is less or equal to the given ttl + fn delete_ov_ttl_le(ttl: i64, conn: &mut T) -> Result<()>; + + /// Updates the ttl of an existing OV. + /// Option is set as the ttl type so that we can set NULL in the + /// database if 'None' is passed as the ttl. + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut T) -> Result<()>; +} + +pub trait DBStoreOwner +where + T: diesel::r2d2::R2D2Connection + 'static, +{ + /// Gets a connection pool + fn get_conn_pool() -> Pool>; + + /// Gets a connection to the db + fn get_connection() -> T; + + /// Inserts an OV + fn insert_ov(ov: &OV, to2: Option, to0: Option, conn: &mut T) -> Result<()>; + + /// Gets an OV + fn get_ov(guid: &str, conn: &mut T) -> Result; + + /// Deletes an OV + fn delete_ov(guid: &str, conn: &mut T) -> Result<()>; + + /// Selects all the OVs with the given to2_performed status + fn select_ov_to2_performed(to2_performed: bool, conn: &mut T) -> Result>; + + /// Selects all the OVs whose to0 is less than the given maximum + fn select_ov_to0_less_than(to0_max: i64, conn: &mut T) -> Result>; + + /// Selects all the OVs with the given to2_performed status and those whose + /// to0 is less that then given maximum + fn select_ov_to2_performed_and_ov_to0_less_than( + to2_performed: bool, + to0_max: i64, + conn: &mut T, + ) -> Result>; + + /// Updates the to0_accept_owner_wait_seconds field of an existing OV. + /// Option is set as the ttl type so that we can set NULL in the + /// database if 'None' is passed as the value. + fn update_ov_to0_wait_seconds( + guid: &str, + wait_seconds: Option, + conn: &mut T, + ) -> Result<()>; + + /// Updates the to0 performed status of an existing OV. + /// Option is set as the ttl type so that we can set NULL in the + /// database if 'None' is passed as the to0_performed + fn update_ov_to2(guid: &str, to0_performed: Option, conn: &mut T) -> Result<()>; +} + +pub trait DBStoreRendezvous +where + T: diesel::r2d2::R2D2Connection + 'static, +{ + /// Gets a connection pool + fn get_conn_pool() -> Pool>; + + /// Gets a connection to the db + fn get_connection() -> T; + + /// Inserts an OV + fn insert_ov(ov: &StoredItem, guid: &str, ttl: Option, conn: &mut T) -> Result<()>; + + /// Gets an OV + fn get_ov(guid: &str, conn: &mut T) -> Result; + + /// Deletes an OV + fn delete_ov(guid: &str, conn: &mut T) -> Result<()>; + + /// Deletes all OVs whose ttl is less or equal to the given ttl + fn delete_ov_ttl_le(ttl: i64, conn: &mut T) -> Result<()>; + + /// Updates the ttl of an existing OV. + /// Option is set as the ttl type so that we can set NULL in the + /// database if 'None' is passed as the ttl. + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut T) -> Result<()>; +} diff --git a/db/src/models.rs b/db/src/models.rs new file mode 100644 index 000000000..bdd5bcd7d --- /dev/null +++ b/db/src/models.rs @@ -0,0 +1,88 @@ +use diesel::prelude::*; +use std::fmt; + +#[derive(Queryable, Selectable, Identifiable)] +#[diesel(table_name = crate::schema::rendezvous_vouchers)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(guid))] +pub struct RendezvousOV { + pub guid: String, + pub contents: Vec, + pub ttl: Option, +} + +#[derive(Insertable)] +#[diesel(table_name = crate::schema::rendezvous_vouchers)] +pub struct NewRendezvousOV { + pub guid: String, + pub contents: Vec, + pub ttl: Option, +} + +#[derive(Queryable, Selectable, Identifiable, AsChangeset)] +#[diesel(table_name = crate::schema::owner_vouchers)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(guid))] +pub struct OwnerOV { + pub guid: String, + pub contents: Vec, + pub to2_performed: Option, + pub to0_accept_owner_wait_seconds: Option, +} + +#[derive(Insertable)] +#[diesel(table_name = crate::schema::owner_vouchers)] +pub struct NewOwnerOV { + pub guid: String, + pub contents: Vec, + pub to2_performed: Option, + pub to0_accept_owner_wait_seconds: Option, +} + +#[derive(Queryable, Selectable, Identifiable, AsChangeset)] +#[diesel(table_name = crate::schema::manufacturer_vouchers)] +#[diesel(treat_none_as_null = true)] +#[diesel(primary_key(guid))] +pub struct ManufacturerOV { + pub guid: String, + pub contents: Vec, + pub ttl: Option, +} + +#[derive(Insertable)] +#[diesel(table_name = crate::schema::manufacturer_vouchers)] +pub struct NewManufacturerOV { + pub guid: String, + pub contents: Vec, + pub ttl: Option, +} + +impl fmt::Display for RendezvousOV { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "GUID: {}, ttl: {:?}, contents: {:?}", + self.guid, self.ttl, self.contents + ) + } +} + +impl fmt::Display for OwnerOV { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "GUID: {}, to2_performed: {:?}, to0_accept_owner_wait_seconds {:?}, contents: {:?}", + self.guid, self.to2_performed, self.to0_accept_owner_wait_seconds, self.contents + ) + } +} + +impl fmt::Display for ManufacturerOV { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "GUID: {}, ttl: {:?}, contents: {:?}", + self.guid, self.ttl, self.contents + ) + } +} diff --git a/db/src/postgres.rs b/db/src/postgres.rs new file mode 100644 index 000000000..691c63f00 --- /dev/null +++ b/db/src/postgres.rs @@ -0,0 +1,272 @@ +use super::{DBStoreManufacturer, DBStoreOwner, DBStoreRendezvous}; +use crate::models::NewManufacturerOV; +use crate::schema::manufacturer_vouchers; +use crate::schema::owner_vouchers; +use crate::schema::rendezvous_vouchers; +use fdo_data_formats::StoredItem; + +use diesel::prelude::*; +use diesel::r2d2::ConnectionManager; +use diesel::r2d2::Pool; +use diesel::PgConnection; + +use std::env; + +use anyhow::Result; +use dotenvy::dotenv; + +use super::models::{ManufacturerOV, NewOwnerOV, NewRendezvousOV, OwnerOV, RendezvousOV}; + +use fdo_data_formats::ownershipvoucher::OwnershipVoucher as OV; +use fdo_data_formats::Serializable; + +pub struct PostgresManufacturerDB {} + +impl DBStoreManufacturer for PostgresManufacturerDB { + fn get_connection() -> PgConnection { + dotenv().ok(); + let database_url = env::var("POSTGRES_MANUFACTURER_DATABASE_URL") + .expect("POSTGRES_MANUFACTURER_DATABASE_URL must be set"); + PgConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + dotenv().ok(); + let database_url = env::var("POSTGRES_MANUFACTURER_DATABASE_URL") + .expect("POSTGRES_MANUFACTURER_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov(ov: &OV, ttl: Option, conn: &mut PgConnection) -> Result<()> { + let new_ov_manufacturer = NewManufacturerOV { + guid: ov.header().guid().to_string(), + contents: ov.serialize_data().expect("Error serializing OV"), + ttl, + }; + diesel::insert_into(super::schema::manufacturer_vouchers::table) + .values(new_ov_manufacturer) + .execute(conn) + .expect("Error saving OV"); + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut PgConnection) -> Result { + let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .first(conn) + .expect("Error geting manufacturer OVs"); + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut PgConnection) -> Result<()> { + diesel::delete(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + fn delete_ov_ttl_le(ttl: i64, conn: &mut PgConnection) -> Result<()> { + diesel::delete(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::ttl.le(ttl)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut PgConnection) -> Result<()> { + diesel::update(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .set(super::schema::manufacturer_vouchers::ttl.eq(ttl)) + .execute(conn)?; + Ok(()) + } +} + +pub struct PostgresOwnerDB {} + +impl DBStoreOwner for PostgresOwnerDB { + fn get_connection() -> PgConnection { + dotenv().ok(); + let database_url = env::var("POSTGRES_OWNER_DATABASE_URL") + .expect("POSTGRES_OWNER_DATABASE_URL must be set"); + PgConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + dotenv().ok(); + let database_url = env::var("POSTGRES_OWNER_DATABASE_URL") + .expect("POSTGRES_OWNER_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov( + ov: &OV, + to2: Option, + to0: Option, + conn: &mut PgConnection, + ) -> Result<()> { + let new_ov_owner = NewOwnerOV { + guid: ov.header().guid().to_string(), + contents: ov.serialize_data().expect("Error serializing OV"), + to2_performed: to2, + to0_accept_owner_wait_seconds: to0, + }; + diesel::insert_into(super::schema::owner_vouchers::table) + .values(new_ov_owner) + .execute(conn) + .expect("Error saving OV"); + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut PgConnection) -> Result { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .first(conn) + .expect("Error getting owner OV"); + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut PgConnection) -> Result<()> { + diesel::delete(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + #[allow(non_snake_case)] + fn select_ov_to2_performed( + to2_performed: bool, + conn: &mut PgConnection, + ) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .select(OwnerOV::as_select()) + .load(conn) + .expect("Error getting owner OVs"); + Ok(result) + } + + #[allow(non_snake_case)] + fn select_ov_to0_less_than(to0_max: i64, conn: &mut PgConnection) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.lt(to0_max)) + .select(OwnerOV::as_select()) + .load(conn) + .expect("Error getting owner OVs"); + Ok(result) + } + + fn select_ov_to2_performed_and_ov_to0_less_than( + to2_performed: bool, + to0_max: i64, + conn: &mut PgConnection, + ) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.lt(to0_max)) + .filter(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .select(OwnerOV::as_select()) + .load(conn)?; + Ok(result) + } + + fn update_ov_to0_wait_seconds( + guid: &str, + wait_seconds: Option, + conn: &mut PgConnection, + ) -> Result<()> { + diesel::update(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .set(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.eq(wait_seconds)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_to2( + guid: &str, + to2_performed: Option, + conn: &mut PgConnection, + ) -> Result<()> { + diesel::update(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .set(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .execute(conn)?; + Ok(()) + } +} + +pub struct PostgresRendezvousDB {} + +impl DBStoreRendezvous for PostgresRendezvousDB { + fn get_connection() -> PgConnection { + dotenv().ok(); + let database_url = env::var("POSTGRES_RENDEZVOUS_DATABASE_URL") + .expect("POSTGRES_RENDEZVOUS_DATABASE_URL must be set"); + PgConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + dotenv().ok(); + let database_url = env::var("POSTGRES_RENDEZVOUS_DATABASE_URL") + .expect("POSTGRES_RENDEZVOUS_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov( + ov: &StoredItem, + guid: &str, + ttl: Option, + conn: &mut PgConnection, + ) -> Result<()> { + let new_ov_rendezvous = NewRendezvousOV { + guid: guid.to_string(), + contents: ov.serialize_data()?, + ttl, + }; + diesel::insert_into(super::schema::rendezvous_vouchers::table) + .values(&new_ov_rendezvous) + .execute(conn) + .expect("Error saving OV"); + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut PgConnection) -> Result { + let result = super::schema::rendezvous_vouchers::dsl::rendezvous_vouchers + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .first(conn) + .expect("Error getting rendezvous OV"); + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut PgConnection) -> Result<()> { + diesel::delete(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + fn delete_ov_ttl_le(ttl: i64, conn: &mut PgConnection) -> Result<()> { + diesel::delete(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::ttl.le(ttl)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut PgConnection) -> Result<()> { + diesel::update(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .set(super::schema::rendezvous_vouchers::ttl.eq(ttl)) + .execute(conn)?; + Ok(()) + } +} diff --git a/db/src/schema.rs b/db/src/schema.rs new file mode 100644 index 000000000..65e68897c --- /dev/null +++ b/db/src/schema.rs @@ -0,0 +1,24 @@ +diesel::table! { + manufacturer_vouchers (guid) { + guid -> Text, + contents -> Binary, + ttl -> Nullable, + } +} + +diesel::table! { + owner_vouchers (guid) { + guid -> Text, + contents -> Binary, + to2_performed -> Nullable, + to0_accept_owner_wait_seconds -> Nullable, + } +} + +diesel::table! { + rendezvous_vouchers (guid) { + guid -> Text, + contents -> Binary, + ttl -> Nullable, + } +} diff --git a/db/src/sqlite.rs b/db/src/sqlite.rs new file mode 100644 index 000000000..f40067074 --- /dev/null +++ b/db/src/sqlite.rs @@ -0,0 +1,530 @@ +use super::{DBStoreManufacturer, DBStoreOwner, DBStoreRendezvous}; + +use diesel::prelude::*; +use diesel::r2d2::ConnectionManager; +use diesel::r2d2::Pool; +use diesel::SqliteConnection; + +use crate::models::ManufacturerOV; +use crate::models::NewManufacturerOV; +use crate::schema::manufacturer_vouchers; +use crate::schema::owner_vouchers; +use crate::schema::rendezvous_vouchers; + +use std::env; + +use anyhow::Result; +use dotenvy::dotenv; + +use super::models::{NewOwnerOV, NewRendezvousOV, OwnerOV, RendezvousOV}; + +use fdo_data_formats::ownershipvoucher::OwnershipVoucher as OV; +use fdo_data_formats::Serializable; +use fdo_data_formats::StoredItem; + +pub struct SqliteManufacturerDB {} + +impl DBStoreManufacturer for SqliteManufacturerDB { + fn get_connection() -> SqliteConnection { + dotenv().ok(); + let database_url = env::var("SQLITE_MANUFACTURER_DATABASE_URL") + .expect("SQLITE_MANUFACTURER_DATABASE_URL must be set"); + SqliteConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + dotenv().ok(); + let database_url = env::var("SQLITE_MANUFACTURER_DATABASE_URL") + .expect("SQLITE_MANUFACTURER_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov(ov: &OV, ttl: Option, conn: &mut SqliteConnection) -> Result<()> { + let new_ov_manufacturer = NewManufacturerOV { + guid: ov.header().guid().to_string(), + contents: ov.serialize_data().expect("Error serializing OV"), + ttl, + }; + diesel::insert_into(super::schema::manufacturer_vouchers::table) + .values(new_ov_manufacturer) + .execute(conn) + .expect("Error saving OV"); + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut SqliteConnection) -> Result { + let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .first(conn) + .expect("Error geting manufacturer OVs"); + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut SqliteConnection) -> Result<()> { + diesel::delete(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + fn delete_ov_ttl_le(ttl: i64, conn: &mut SqliteConnection) -> Result<()> { + diesel::delete(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::ttl.le(ttl)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut SqliteConnection) -> Result<()> { + diesel::update(manufacturer_vouchers::dsl::manufacturer_vouchers) + .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) + .set(super::schema::manufacturer_vouchers::ttl.eq(ttl)) + .execute(conn)?; + Ok(()) + } +} + +pub struct SqliteOwnerDB {} + +impl DBStoreOwner for SqliteOwnerDB { + fn get_connection() -> SqliteConnection { + dotenv().ok(); + let database_url = + env::var("SQLITE_OWNER_DATABASE_URL").expect("SQLITE_OWNER_DATABASE_URL must be set"); + SqliteConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + dotenv().ok(); + let database_url = + env::var("SQLITE_OWNER_DATABASE_URL").expect("SQLITE_OWNER_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov( + ov: &OV, + to2: Option, + to0: Option, + conn: &mut SqliteConnection, + ) -> Result<()> { + let new_ov_owner = NewOwnerOV { + guid: ov.header().guid().to_string(), + contents: ov.serialize_data().expect("Error serializing OV"), + to2_performed: to2, + to0_accept_owner_wait_seconds: to0, + }; + diesel::insert_into(super::schema::owner_vouchers::table) + .values(new_ov_owner) + .execute(conn) + .expect("Error saving OV"); + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut SqliteConnection) -> Result { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .first(conn) + .expect("Error getting owner OV"); + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut SqliteConnection) -> Result<()> { + diesel::delete(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + #[allow(non_snake_case)] + fn select_ov_to2_performed( + to2_performed: bool, + conn: &mut SqliteConnection, + ) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .select(OwnerOV::as_select()) + .load(conn) + .expect("Error getting owner OVs"); + Ok(result) + } + + #[allow(non_snake_case)] + fn select_ov_to0_less_than(to0_max: i64, conn: &mut SqliteConnection) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.lt(to0_max)) + .select(OwnerOV::as_select()) + .load(conn) + .expect("Error getting owner OVs"); + Ok(result) + } + + fn select_ov_to2_performed_and_ov_to0_less_than( + to2_performed: bool, + to0_max: i64, + conn: &mut SqliteConnection, + ) -> Result> { + let result = super::schema::owner_vouchers::dsl::owner_vouchers + .filter(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.lt(to0_max)) + .filter(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .select(OwnerOV::as_select()) + .load(conn)?; + Ok(result) + } + + fn update_ov_to0_wait_seconds( + guid: &str, + wait_seconds: Option, + conn: &mut SqliteConnection, + ) -> Result<()> { + diesel::update(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .set(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.eq(wait_seconds)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_to2( + guid: &str, + to2_performed: Option, + conn: &mut SqliteConnection, + ) -> Result<()> { + diesel::update(owner_vouchers::dsl::owner_vouchers) + .filter(super::schema::owner_vouchers::guid.eq(guid)) + .set(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .execute(conn)?; + Ok(()) + } +} + +pub struct SqliteRendezvousDB {} + +impl DBStoreRendezvous for SqliteRendezvousDB { + fn get_connection() -> SqliteConnection { + dotenv().ok(); + let database_url = env::var("SQLITE_RENDEZVOUS_DATABASE_URL") + .expect("SQLITE_RENDEZVOUS_DATABASE_URL must be set"); + SqliteConnection::establish(&database_url).expect("Error connecting to database") + } + + fn get_conn_pool() -> Pool> { + dotenv().ok(); + let database_url = env::var("SQLITE_RENDEZVOUS_DATABASE_URL") + .expect("SQLITE_RENDEZVOUS_DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Couldn't build db connection pool") + } + + fn insert_ov( + ov: &StoredItem, + guid: &str, + ttl: Option, + conn: &mut SqliteConnection, + ) -> Result<()> { + let new_ov_rendezvous = NewRendezvousOV { + guid: guid.to_string(), + contents: ov.serialize_data()?, + ttl, + }; + diesel::insert_into(super::schema::rendezvous_vouchers::table) + .values(&new_ov_rendezvous) + .execute(conn) + .expect("Error saving OV"); + Ok(()) + } + + fn get_ov(guid: &str, conn: &mut SqliteConnection) -> Result { + let result = super::schema::rendezvous_vouchers::dsl::rendezvous_vouchers + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .first(conn) + .expect("Error getting rendezvous OV"); + Ok(result) + } + + fn delete_ov(guid: &str, conn: &mut SqliteConnection) -> Result<()> { + diesel::delete(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .execute(conn)?; + Ok(()) + } + + fn delete_ov_ttl_le(ttl: i64, conn: &mut SqliteConnection) -> Result<()> { + diesel::delete(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::ttl.le(ttl)) + .execute(conn)?; + Ok(()) + } + + fn update_ov_ttl(guid: &str, ttl: Option, conn: &mut SqliteConnection) -> Result<()> { + diesel::update(rendezvous_vouchers::dsl::rendezvous_vouchers) + .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) + .set(super::schema::rendezvous_vouchers::ttl.eq(ttl)) + .execute(conn)?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::{SqliteManufacturerDB, SqliteOwnerDB, SqliteRendezvousDB}; + use crate::{schema::*, DBStoreManufacturer, DBStoreOwner, DBStoreRendezvous}; + use anyhow::Result; + use diesel::connection::SimpleConnection; + use diesel::prelude::*; + use fdo_data_formats::ownershipvoucher::OwnershipVoucher as OV; + use fdo_data_formats::publickey::PublicKey; + use fdo_data_formats::types::{COSESign, Guid, Nonce, RendezvousInfo, TO2SetupDevicePayload}; + use fdo_data_formats::StoredItem; + use openssl::ec::{EcGroup, EcKey}; + use openssl::nid::Nid; + use openssl::pkey::PKey; + use std::collections::HashMap; + use std::env; + + #[test] + fn test_manufacturer_database() -> Result<()> { + println!("Current directory: {:?}", env::current_dir()); + + // read test ovs from the integration tests dir + let mut ov_map = HashMap::new(); + let pool = SqliteManufacturerDB::get_conn_pool(); + + // last_guid used later to delete an ov with that key + let mut last_guid = String::new(); + for path in std::fs::read_dir("../integration-tests/vouchers/v101").expect("Dir not found") + { + let ov_path = path.expect("error getting path").path(); + let content = std::fs::read(ov_path).expect("OV couldn't be read"); + let ov = OV::from_pem_or_raw(&content).expect("Error serializing OV"); + last_guid = ov.header().guid().to_string(); + ov_map.insert(ov.header().guid().to_string(), ov); + } + + // get a connection from the pool + let conn = &mut pool.get().unwrap(); + // sqlite does not enable this by default, not needed at this point, + // but I've left it here so that we don't forget + conn.batch_execute("PRAGMA foreign_keys = ON")?; + + for (_, ov) in ov_map.clone().into_iter() { + SqliteManufacturerDB::insert_ov(&ov, Some(5000_i64), conn)?; + } + + // we should have 3 ovs + let count: i64 = manufacturer_vouchers::dsl::manufacturer_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 3); + + // select ov by guid + let ov_db = SqliteManufacturerDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.guid, last_guid); + + // update ttl of an OV + SqliteManufacturerDB::update_ov_ttl(&last_guid, Some(12345), conn)?; + let ov_db = SqliteManufacturerDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.ttl, Some(12345)); + + // delete an ov by guid, we should have 2 at the end + SqliteManufacturerDB::delete_ov(&last_guid, conn)?; + let count: i64 = manufacturer_vouchers::dsl::manufacturer_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 2); + + // delete manufacturer ovs with ttl <= 4000, we shouldn't delete any of them + SqliteManufacturerDB::delete_ov_ttl_le(4000_i64, conn)?; + let count: i64 = manufacturer_vouchers::dsl::manufacturer_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 2); + + // delete manufacturer ovs with ttl <= 5000, we should delete the remaining 2 ovs + SqliteManufacturerDB::delete_ov_ttl_le(5000_i64, conn)?; + let count: i64 = manufacturer_vouchers::dsl::manufacturer_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 0); + Ok(()) + } + + #[test] + fn test_owner_database() -> Result<()> { + println!("Current directory: {:?}", env::current_dir()); + + // read test ovs from the integration tests dir + let mut ov_map = HashMap::new(); + let pool = SqliteOwnerDB::get_conn_pool(); + + // last_guid used later to delete an ov with that key + let mut last_guid = String::new(); + for path in std::fs::read_dir("../integration-tests/vouchers/v101").expect("Dir not found") + { + let ov_path = path.expect("error getting path").path(); + let content = std::fs::read(ov_path).expect("OV couldn't be read"); + let ov = OV::from_pem_or_raw(&content).expect("Error serializing OV"); + last_guid = ov.header().guid().to_string(); + ov_map.insert(ov.header().guid().to_string(), ov); + } + + // get a connection from the pool + let conn = &mut pool.get().unwrap(); + // sqlite does not enable this by default, not needed at this point, + // but I've left it here so that we don't forget + conn.batch_execute("PRAGMA foreign_keys = ON")?; + + let mut to2_done = true; + for (_, ov) in ov_map.clone().into_iter() { + if to2_done { + SqliteOwnerDB::insert_ov(&ov, Some(to2_done), Some(2000_i64), conn)?; + } else { + SqliteOwnerDB::insert_ov(&ov, Some(to2_done), Some(3000_i64), conn)?; + } + to2_done = !to2_done; + } + + // we should have 3 ovs + let count: i64 = owner_vouchers::dsl::owner_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 3); + + // select ov by guid + let ov_db = SqliteOwnerDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.guid, last_guid); + + // select the owner ovs with to2 performed = true, we should have 2 + let result = SqliteOwnerDB::select_ov_to2_performed(true, conn)?; + assert_eq!(result.len(), 2); + + // select the owner ovs with to0 less than 2500, we should have 2 + let result = SqliteOwnerDB::select_ov_to0_less_than(2500_i64, conn)?; + assert_eq!(result.len(), 2); + + // update the wait_seconds field and to2 + SqliteOwnerDB::update_ov_to0_wait_seconds(&last_guid.to_string(), Some(1234), conn)?; + SqliteOwnerDB::update_ov_to2(&last_guid.to_string(), None, conn)?; + + let ov_db = SqliteOwnerDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.to0_accept_owner_wait_seconds, Some(1234)); + assert_eq!(ov_db.to2_performed, None); + + // delete an ov from the owner, we should have 2 left + SqliteOwnerDB::delete_ov(&last_guid.to_string(), conn)?; + let count: i64 = owner_vouchers::dsl::owner_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 2); + + Ok(()) + } + + #[test] + fn test_rendezvous_database() -> Result<()> { + println!("Current directory: {:?}", env::current_dir()); + + // read test ovs from the integration tests dir + let mut ov_map = HashMap::new(); + let pool = SqliteRendezvousDB::get_conn_pool(); + + // last_guid used later to delete an ov with that key + let mut last_guid = String::new(); + // private key + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1)?; + let key = EcKey::generate(&group)?; + let private_key = PKey::from_ec_key(key.clone())?; + for path in std::fs::read_dir("../integration-tests/vouchers/v101").expect("Dir not found") + { + let ov_path = path.expect("error getting path").path(); + let content = std::fs::read(ov_path).expect("OV couldn't be read"); + let ov = OV::from_pem_or_raw(&content).expect("Error serializing OV"); + last_guid = ov.header().guid().to_string(); + let pubkey: PublicKey = ov + .device_certificate_chain() + .unwrap() + .insecure_verify_without_root_verification() + .unwrap() + .clone() + .try_into() + .unwrap(); + let new_payload = TO2SetupDevicePayload::new( + RendezvousInfo::new(Vec::new()).unwrap(), + Guid::new().unwrap(), + Nonce::new().unwrap(), + pubkey.clone(), + ); + let cose = COSESign::new(&new_payload, None, &private_key).unwrap(); + let tmp = StoredItem { + public_key: pubkey, + to1d: cose, + }; + ov_map.insert(ov.header().guid().to_string(), tmp); + } + + // get a connection from the pool + let conn = &mut pool.get().unwrap(); + // sqlite does not enable this by default, not needed at this point, + // but I've left it here so that we don't forget + conn.batch_execute("PRAGMA foreign_keys = ON")?; + + for (guid, ov) in ov_map.clone().into_iter() { + SqliteRendezvousDB::insert_ov(&ov, &guid, Some(5000_i64), conn)?; + } + + // we should have 3 ovs + let count: i64 = rendezvous_vouchers::dsl::rendezvous_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 3); + + // get an ov by guid + let ov_db = SqliteRendezvousDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.guid, last_guid); + + // update ttl of an ov + SqliteRendezvousDB::update_ov_ttl(&last_guid, None, conn)?; + let ov_db = SqliteRendezvousDB::get_ov(&last_guid, conn)?; + assert_eq!(ov_db.ttl, None); + + // delete an ov by guid, we should have 2 at the end + SqliteRendezvousDB::delete_ov(&last_guid, conn)?; + let count: i64 = rendezvous_vouchers::dsl::rendezvous_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 2); + + // delete rendezvous ovs with ttl <= 4000, we shouldn't delete any of them + SqliteRendezvousDB::delete_ov_ttl_le(4000_i64, conn)?; + let count: i64 = rendezvous_vouchers::dsl::rendezvous_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 2); + + // delete rendezvous ovs with ttl <= 5000, we should delete the remaining 2 ovs + SqliteRendezvousDB::delete_ov_ttl_le(5000_i64, conn)?; + let count: i64 = rendezvous_vouchers::dsl::rendezvous_vouchers + .count() + .get_result(conn) + .unwrap(); + assert_eq!(count, 0); + Ok(()) + } +} diff --git a/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/down.sql b/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..79f509d4d --- /dev/null +++ b/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` + +DROP TABLE manufacturer_vouchers; diff --git a/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql b/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..2233ba87a --- /dev/null +++ b/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,7 @@ +-- Your SQL goes here + +CREATE TABLE manufacturer_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents bytea NOT NULL, + ttl bigint +); diff --git a/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/down.sql b/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..e30e9d7d9 --- /dev/null +++ b/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/down.sql @@ -0,0 +1 @@ +DROP TABLE manufacturer_vouchers; diff --git a/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/up.sql b/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..a9abe4c0d --- /dev/null +++ b/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE manufacturer_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents blob NOT NULL, + ttl bigint +); diff --git a/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/down.sql b/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..d82e56978 --- /dev/null +++ b/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` + +DROP TABLE owner_vouchers; diff --git a/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql b/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..98c25d85a --- /dev/null +++ b/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,8 @@ +-- Your SQL goes here + +CREATE TABLE owner_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents bytea NOT NULL, + to2_performed boolean, + to0_accept_owner_wait_seconds bigint +); diff --git a/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/down.sql b/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..771908a38 --- /dev/null +++ b/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/down.sql @@ -0,0 +1 @@ +DROP TABLE owner_vouchers; diff --git a/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/up.sql b/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..b3f27ef6f --- /dev/null +++ b/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE owner_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents blob NOT NULL, + to2_performed bool, + to0_accept_owner_wait_seconds bigint +); diff --git a/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/down.sql b/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..c75aa021f --- /dev/null +++ b/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` + +DROP TABLE rendezvous_vouchers; diff --git a/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql b/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..046166b84 --- /dev/null +++ b/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,7 @@ +-- Your SQL goes here + +CREATE TABLE rendezvous_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents bytea NOT NULL, + ttl bigint +); diff --git a/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/down.sql b/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/down.sql new file mode 100644 index 000000000..67864a8e9 --- /dev/null +++ b/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/down.sql @@ -0,0 +1 @@ +DROP TABLE rendezvous_vouchers; diff --git a/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/up.sql b/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/up.sql new file mode 100644 index 000000000..c55b893f1 --- /dev/null +++ b/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE rendezvous_vouchers ( + guid varchar(36) NOT NULL PRIMARY KEY, + contents blob NOT NULL, + ttl bigint +); From 299592f0fd3dc5a9bfcd15ffb19d7c5652c744df Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Mon, 2 Oct 2023 14:15:36 +0200 Subject: [PATCH 03/25] ci: update to handle DB tests Adds new dependencies and creates a sqlite database to be used in the tests. Signed-off-by: Irene Diez --- .github/workflows/ci.yml | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5e4313fc0..8d8cab5c7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -87,7 +87,7 @@ jobs: steps: - name: Install deps run: | - dnf install -y make gcc openssl openssl-devel findutils golang git tpm2-tss-devel swtpm swtpm-tools git clevis clevis-luks cryptsetup cryptsetup-devel clang-devel cracklib-dicts + dnf install -y make gcc openssl openssl-devel findutils golang git tpm2-tss-devel swtpm swtpm-tools git clevis clevis-luks cryptsetup cryptsetup-devel clang-devel cracklib-dicts sqlite sqlite-devel libpq libpq-devel - uses: actions/checkout@v3 with: persist-credentials: false @@ -112,7 +112,19 @@ jobs: env: FDO_PRIVILEGED: true PER_DEVICE_SERVICEINFO: false - run: cargo test --workspace + SQLITE_MANUFACTURER_DATABASE_URL: ../ci-manufacturer-db.sqlite + SQLITE_OWNER_DATABASE_URL: ../ci-owner-db.sqlite + SQLITE_RENDEZVOUS_DATABASE_URL: ../ci-rendezvous-db.sqlite + run: | + # prep for database tests + cargo install --force diesel_cli --no-default-features --features sqlite + diesel migration run --migration-dir ./migrations_manufacturing_server_sqlite --database-url ./ci-manufacturer-db.sqlite + diesel migration run --migration-dir ./migrations_owner_onboarding_server_sqlite --database-url ./ci-owner-db.sqlite + diesel migration run --migration-dir ./migrations_rendezvous_server_sqlite --database-url ./ci-rendezvous-db.sqlite + # run tests + cargo test --workspace + # delete sqlite databases + rm -f ./ci-manufacturer-db.sqlite ./ci-owner-db.sqlite ./ci-rendezvous-db.sqlite - name: Check aio run: | mkdir aio-dir/ From 8f378267d338fea6adee9e9be7d690a91262a89b Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Mon, 20 Nov 2023 16:33:30 +0100 Subject: [PATCH 04/25] feat: add the Manufacturer DB store This adds the store interface methods for the SqliteManufacturerStore. Signed-off-by: Irene Diez --- Cargo.lock | 5 +- store/Cargo.toml | 8 +++ store/src/db.rs | 157 +++++++++++++++++++++++++++++++++++++++++++++++ store/src/lib.rs | 6 ++ 4 files changed, 175 insertions(+), 1 deletion(-) create mode 100644 store/src/db.rs diff --git a/Cargo.lock b/Cargo.lock index 498bef8ac..8e0bd93ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -672,7 +672,7 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d98235fdc2f355d330a8244184ab6b4b33c28679c0b4158f63138e51d6cf7e88" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "byteorder", "diesel_derives", "itoa", @@ -924,6 +924,7 @@ dependencies = [ "fdo-data-formats", "fdo-http-wrapper", "openssl", + "dotenvy", ] [[package]] @@ -1071,7 +1072,9 @@ name = "fdo-store" version = "0.4.13" dependencies = [ "async-trait", + "diesel", "fdo-data-formats", + "fdo-db", "log", "serde", "serde_cbor", diff --git a/store/Cargo.toml b/store/Cargo.toml index 3a8026f96..1774dadc3 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -20,5 +20,13 @@ time = "0.3" xattr = { version = "1.0", default-features = false, optional = true } # We *need* xattrs to store TTL serde_cbor = { version = "0.11", optional = true } +# database +fdo-db = { path = "../db", version = "0.4.12"} + +diesel = { version = "2.1.0", features = ["sqlite", "postgres", "r2d2"], optional = true } + [features] directory = ["xattr", "serde_cbor"] +db = ["diesel"] + +default = ["directory", "db"] \ No newline at end of file diff --git a/store/src/db.rs b/store/src/db.rs new file mode 100644 index 000000000..c70c62b72 --- /dev/null +++ b/store/src/db.rs @@ -0,0 +1,157 @@ +use async_trait::async_trait; +use fdo_data_formats::ownershipvoucher::OwnershipVoucher; +use fdo_db::*; +use std::marker::PhantomData; + +use crate::Store; +use crate::StoreError; +use crate::{FilterType, MetadataLocalKey, MetadataValue, ValueIter}; +use fdo_data_formats::Serializable; + +struct SqliteManufacturerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl SqliteManufacturerStore where K: std::string::ToString {} + +pub(super) fn initialize() -> Result>, StoreError> +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + Ok(Box::new(SqliteManufacturerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })) +} + +pub struct SqliteManufacturerStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for SqliteManufacturerStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for SqliteManufacturerStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::sqlite::SqliteManufacturerDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert"); + fdo_db::sqlite::SqliteManufacturerDB::update_ov_ttl(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Unspecified(format!( + "Unable to update OV with guid {} with {val}: {e:?}", + key.to_string() + )) + }) + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteManufacturerDB::update_ov_ttl(&key.to_string(), None, conn).map_err( + |e| { + StoreError::Unspecified(format!( + "Unable to set 'None' metadata on OV {}: {e:?}", + key.to_string() + )) + }, + ) + } + + async fn query_data(&self) -> crate::QueryResult { + // NOTE: this function is only used in the owner onboarding server + // when we need to filter the OVs that haven't done the To2 and still + // have ttl. It is not used in the manufacturing server. + // This is why we are returning dummy things to comply with the trait. + Ok(Box::new(SqliteManufacturerStoreFilterType { + neqs: Vec::new(), + lts: Vec::new(), + })) + } + + async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); + fdo_db::sqlite::SqliteManufacturerDB::insert_ov(&ov, None, conn).map_err(|e| { + StoreError::Unspecified(format!( + "Error inserting OV with guid {}: {e:?}", + ov.header().guid().to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteManufacturerDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Unspecified(format!( + "Error deleting OV with guid {}: {e:?}", + key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let now = time::OffsetDateTime::now_utc().unix_timestamp(); + fdo_db::sqlite::SqliteManufacturerDB::delete_ov_ttl_le(now, conn).map_err(|e| { + StoreError::Unspecified(format!("Error deleting OVs with ttl <= {now}: {e:?}")) + }) + } +} diff --git a/store/src/lib.rs b/store/src/lib.rs index 5bf5a9a27..978fd1df4 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -220,8 +220,12 @@ mod directory; pub enum StoreConfig { #[cfg(feature = "directory")] Directory { path: std::path::PathBuf }, + #[cfg(feature = "db")] + DataBase {}, } +mod db; + impl StoreConfig { pub fn initialize(&self) -> Result>, StoreError> where @@ -234,6 +238,8 @@ impl StoreConfig { match self { #[cfg(feature = "directory")] StoreConfig::Directory { path } => directory::initialize(path), + #[cfg(feature = "db")] + StoreConfig::DataBase {} => db::initialize(), } } } From ed65dfe01cf4b3316215773758edabc7c5c8930b Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Tue, 24 Oct 2023 17:43:46 +0200 Subject: [PATCH 05/25] feat: don't allow db settings in per-device config Signed-off-by: Irene Diez --- util/src/servers/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/util/src/servers/mod.rs b/util/src/servers/mod.rs index 836e47494..fb4fc702b 100644 --- a/util/src/servers/mod.rs +++ b/util/src/servers/mod.rs @@ -74,6 +74,9 @@ pub fn settings_per_device(guid: &str) -> Result { path.push(file_name); path.to_string_lossy().into_owned() } + StoreConfig::DataBase {} => { + bail!("Per-device settings with database not implemented yet"); + } }; let config = Config::builder() .add_source(config::File::from(Path::new(&path_per_device_store))) From 55189dee898227798b33cdbb15b2694d150e7261 Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Wed, 25 Oct 2023 18:08:46 +0200 Subject: [PATCH 06/25] feat: add the Owner DB store configuration This adds the store trait for the Sqlite Owner DB. Signed-off-by: Irene Diez --- Cargo.lock | 1 + store/Cargo.toml | 4 +- store/src/db.rs | 156 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 160 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 8e0bd93ad..0ba152f2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1071,6 +1071,7 @@ dependencies = [ name = "fdo-store" version = "0.4.13" dependencies = [ + "anyhow", "async-trait", "diesel", "fdo-data-formats", diff --git a/store/Cargo.toml b/store/Cargo.toml index 1774dadc3..cb838d327 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -7,6 +7,8 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +anyhow = { version = "1", optional = true} + fdo-data-formats = { path = "../data-formats", version = "0.4.13" } thiserror = "1" @@ -27,6 +29,6 @@ diesel = { version = "2.1.0", features = ["sqlite", "postgres", "r2d2"], optiona [features] directory = ["xattr", "serde_cbor"] -db = ["diesel"] +db = ["diesel", "anyhow"] default = ["directory", "db"] \ No newline at end of file diff --git a/store/src/db.rs b/store/src/db.rs index c70c62b72..d9276a482 100644 --- a/store/src/db.rs +++ b/store/src/db.rs @@ -1,3 +1,4 @@ +use anyhow::bail; use async_trait::async_trait; use fdo_data_formats::ownershipvoucher::OwnershipVoucher; use fdo_db::*; @@ -155,3 +156,158 @@ where }) } } + +struct SqliteOwnerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl SqliteOwnerStore where K: std::string::ToString {} + +pub struct SqliteOwnerStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for SqliteOwnerStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for SqliteOwnerStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::sqlite::SqliteOwnerDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + match metadata_key.to_key() { + "fdo.to2_performed" => { + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert string to bool"); + fdo_db::sqlite::SqliteOwnerDB::update_ov_to2(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Unspecified(format!( + "Unable to update OV (guid {}) to2 with value {val}: {e:?}", + &key.to_string() + )) + }) + } + "fdo.to0_accept_owner_wait_seconds" => { + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert string to i64"); + fdo_db::sqlite::SqliteOwnerDB::update_ov_to0_wait_seconds( + &key.to_string(), + Some(val), + conn, + ) + .map_err(|e| { + StoreError::Unspecified(format!( + "Unable to update OV (guid {}) to0 with value {val}: {e:?}", + &key.to_string() + )) + }) + } + _ => Err(StoreError::Unspecified(format!( + "Unable to hanlde metadata key {}", + metadata_key.to_key() + ))), + } + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteOwnerDB::update_ov_to0_wait_seconds(&key.to_string(), None, conn) + .map_err(|e| { + StoreError::Unspecified(format!( + "Unable to set 'None' to0 metadata on OV {}: {e:?}", + key.to_string() + )) + })?; + fdo_db::sqlite::SqliteOwnerDB::update_ov_to2(&key.to_string(), None, conn).map_err(|e| { + StoreError::Unspecified(format!( + "Unable to set 'None' to2 metadata on OV {}: {e:?}", + key.to_string() + )) + }) + } + + async fn query_data(&self) -> crate::QueryResult { + todo!(); + } + + async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); + fdo_db::sqlite::SqliteOwnerDB::insert_ov(&ov, None, None, conn).map_err(|e| { + StoreError::Unspecified(format!( + "Error inserting OV with guid {}: {e:?}", + ov.header().guid().to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteOwnerDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Unspecified(format!( + "Error deleting OV with guid {}: {e:?}", + &key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + // This is not used in the owner onboarding server since the OVs there + // do not have a ttl. + Ok(()) + } +} From e4f9de1d2045c2655f62643370b76c88d96f4fb5 Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Fri, 27 Oct 2023 11:24:02 +0200 Subject: [PATCH 07/25] feat: add the Rendezvous DB store configuration This implements the store trait for the Sqlite Rendezvous DB. Signed-off-by: Irene Diez --- store/src/db.rs | 135 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/store/src/db.rs b/store/src/db.rs index d9276a482..bd6a2c65a 100644 --- a/store/src/db.rs +++ b/store/src/db.rs @@ -311,3 +311,138 @@ where Ok(()) } } + +struct SqliteRendezvousStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl SqliteRendezvousStore where K: std::string::ToString {} + +pub struct SqliteRendezvousStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for SqliteRendezvousStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for SqliteRendezvousStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::sqlite::SqliteRendezvousDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert"); + fdo_db::sqlite::SqliteRendezvousDB::update_ov_ttl(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Unspecified(format!( + "Unable to update OV with guid {} with {val}: {e:?}", + key.to_string() + )) + }) + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteRendezvousDB::update_ov_ttl(&key.to_string(), None, conn).map_err( + |e| { + StoreError::Unspecified(format!( + "Unable to set 'None' ttl on OV {}: {e:?}", + key.to_string() + )) + }, + ) + } + + async fn query_data(&self) -> crate::QueryResult { + // NOTE: this function is only used in the owner onboarding server + // when we need to filter the OVs that haven't done the To2 and still + // have ttl. It is not used in the rendezvous server. + // This is why we are returning dummy things to comply with the trait. + Ok(Box::new(SqliteRendezvousStoreFilterType { + neqs: Vec::new(), + lts: Vec::new(), + })) + } + + async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); + fdo_db::sqlite::SqliteRendezvousDB::insert_ov(&ov, None, conn).map_err(|e| { + StoreError::Unspecified(format!( + "Error inserting OV with guid {}: {e:?}", + ov.header().guid().to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::sqlite::SqliteRendezvousDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Unspecified(format!( + "Error deleting OV with guid {}: {e:?}", + key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let now = time::OffsetDateTime::now_utc().unix_timestamp(); + fdo_db::sqlite::SqliteRendezvousDB::delete_ov_ttl_le(now, conn).map_err(|e| { + StoreError::Unspecified(format!("Error deleting OVs with ttl <= {now}: {e:?}")) + }) + } +} From 202edc0c3f39ba22eccab879748b65ba9228384f Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Mon, 30 Oct 2023 17:19:39 +0100 Subject: [PATCH 08/25] feat(store): change store initializations based on DB type When selecting to use the DB as the store type in the configuration files, this requires to select the DB type (sqlite or postgres) and the server type. Signed-off-by: Irene Diez --- store/src/db.rs | 28 +++++++++++++++++++++++----- store/src/lib.rs | 21 +++++++++++++++++++-- util/src/servers/mod.rs | 7 +++++-- 3 files changed, 47 insertions(+), 9 deletions(-) diff --git a/store/src/db.rs b/store/src/db.rs index bd6a2c65a..288896ea0 100644 --- a/store/src/db.rs +++ b/store/src/db.rs @@ -4,6 +4,8 @@ use fdo_data_formats::ownershipvoucher::OwnershipVoucher; use fdo_db::*; use std::marker::PhantomData; +use crate::DBType; +use crate::ServerType; use crate::Store; use crate::StoreError; use crate::{FilterType, MetadataLocalKey, MetadataValue, ValueIter}; @@ -16,17 +18,33 @@ struct SqliteManufacturerStore { impl SqliteManufacturerStore where K: std::string::ToString {} -pub(super) fn initialize() -> Result>, StoreError> +pub(super) fn initialize( + db_type: DBType, + server_type: &ServerType, +) -> Result>, StoreError> where OT: crate::StoreOpenMode, K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, V: Serializable + Send + Sync + Clone + 'static, MKT: crate::MetadataLocalKey + 'static, { - Ok(Box::new(SqliteManufacturerStore { - phantom_k: PhantomData, - phantom_v: PhantomData, - })) + match db_type { + DBType::Postgres => todo!(), + DBType::Sqlite => match server_type { + ServerType::Manufacturer => Ok(Box::new(SqliteManufacturerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + ServerType::Owner => Ok(Box::new(SqliteOwnerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + ServerType::Rendezvous => Ok(Box::new(SqliteRendezvousStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + }, + } } pub struct SqliteManufacturerStoreFilterType { diff --git a/store/src/lib.rs b/store/src/lib.rs index 978fd1df4..40df43516 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -216,12 +216,27 @@ pub trait Store: Send + Sync { #[cfg(feature = "directory")] mod directory; +#[derive(Debug, Serialize, Deserialize)] +pub enum DBType { + Sqlite, + Postgres, +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum ServerType { + Manufacturer, + Owner, + Rendezvous, +} + #[derive(Debug, Serialize, Deserialize)] pub enum StoreConfig { #[cfg(feature = "directory")] Directory { path: std::path::PathBuf }, #[cfg(feature = "db")] - DataBase {}, + Sqlite(ServerType), + #[cfg(feature = "db")] + Postgres(ServerType), } mod db; @@ -239,7 +254,9 @@ impl StoreConfig { #[cfg(feature = "directory")] StoreConfig::Directory { path } => directory::initialize(path), #[cfg(feature = "db")] - StoreConfig::DataBase {} => db::initialize(), + StoreConfig::Sqlite(server) => db::initialize(DBType::Sqlite, server), + #[cfg(feature = "db")] + StoreConfig::Postgres(server) => db::initialize(DBType::Postgres, server), } } } diff --git a/util/src/servers/mod.rs b/util/src/servers/mod.rs index fb4fc702b..8805238b8 100644 --- a/util/src/servers/mod.rs +++ b/util/src/servers/mod.rs @@ -74,8 +74,11 @@ pub fn settings_per_device(guid: &str) -> Result { path.push(file_name); path.to_string_lossy().into_owned() } - StoreConfig::DataBase {} => { - bail!("Per-device settings with database not implemented yet"); + StoreConfig::Sqlite(_) => { + bail!("Per-device settings with sqlite database not implemented"); + } + StoreConfig::Postgres(_) => { + bail!("Per-device settings with Postgres database not implemented"); } }; let config = Config::builder() From 4982d096d8c7d73db21df0ce3deaaaecf68fe420 Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Wed, 22 Nov 2023 11:47:30 +0100 Subject: [PATCH 09/25] fix(data-formats): error when trying to parse an empty slice When we are trying to parse raw data to make an ownership voucher we did not check whether the slice was empty or not, causing a panic in such case. This change explicitly checks if the slice is empty or not, and in the former case returns an error. Signed-off-by: Irene Diez --- data-formats/src/errors.rs | 2 ++ data-formats/src/ownershipvoucher.rs | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/data-formats/src/errors.rs b/data-formats/src/errors.rs index bb7077c02..5320ae2d9 100644 --- a/data-formats/src/errors.rs +++ b/data-formats/src/errors.rs @@ -71,4 +71,6 @@ pub enum Error { UnsupportedVersion(Option), #[error("TPM/TSS error: {0:?}")] TssError(#[from] tss_esapi::Error), + #[error("Empty data")] + EmptyData, } diff --git a/data-formats/src/ownershipvoucher.rs b/data-formats/src/ownershipvoucher.rs index 409126076..e1dd9fb29 100644 --- a/data-formats/src/ownershipvoucher.rs +++ b/data-formats/src/ownershipvoucher.rs @@ -175,6 +175,9 @@ impl OwnershipVoucher { } pub fn from_pem(data: &[u8]) -> Result { + if data.is_empty() { + return Err(Error::EmptyData); + } let parsed = pem::parse(data)?; if parsed.tag() != VOUCHER_PEM_TAG { return Err(Error::InvalidPemTag(parsed.tag().to_string())); @@ -183,6 +186,9 @@ impl OwnershipVoucher { } pub fn many_from_pem(data: &[u8]) -> Result> { + if data.is_empty() { + return Err(Error::EmptyData); + } pem::parse_many(data)? .into_iter() .map(|parsed| { @@ -196,6 +202,9 @@ impl OwnershipVoucher { } pub fn from_pem_or_raw(data: &[u8]) -> Result { + if data.is_empty() { + return Err(Error::EmptyData); + } if data[0] == data[1] && data[0] == b'-' { Self::from_pem(data) } else { From 1eeea64d125cbfcc5759c254aebe4408be61e03d Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Thu, 2 Nov 2023 17:59:37 +0100 Subject: [PATCH 10/25] feat(owner-tool): add an option to export OVs from the Manufacturer DB This adds the `export_manufacturer_vouchers` option to the Owner CLI, which allows to export OVs from the Manufacturer Server's DB to a directory. The options are to export a single OV by GUID or all of them. Updated the DB trait to add the `select *` query that yields all the OVs in the Manufacturer. Signed-off-by: Irene Diez --- Cargo.lock | 1 + db/src/lib.rs | 3 ++ db/src/postgres.rs | 10 ++++- db/src/sqlite.rs | 10 ++++- owner-tool/Cargo.toml | 1 + owner-tool/src/main.rs | 84 +++++++++++++++++++++++++++++++++++++++++- 6 files changed, 106 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0ba152f2e..7cf074db8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1018,6 +1018,7 @@ dependencies = [ "anyhow", "clap 4.3.0", "fdo-data-formats", + "fdo-db", "fdo-http-wrapper", "fdo-util", "hex", diff --git a/db/src/lib.rs b/db/src/lib.rs index 949210da0..68414aaf3 100644 --- a/db/src/lib.rs +++ b/db/src/lib.rs @@ -31,6 +31,9 @@ where /// Gets an OV fn get_ov(guid: &str, conn: &mut T) -> Result; + /// Returns all the OVs in the DB + fn get_all_ovs(conn: &mut T) -> Result>; + /// Deletes an OV fn delete_ov(guid: &str, conn: &mut T) -> Result<()>; diff --git a/db/src/postgres.rs b/db/src/postgres.rs index 691c63f00..95598434c 100644 --- a/db/src/postgres.rs +++ b/db/src/postgres.rs @@ -58,7 +58,15 @@ impl DBStoreManufacturer for PostgresManufacturerDB { let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) .first(conn) - .expect("Error geting manufacturer OVs"); + .expect("Error geting manufacturer OV"); + Ok(result) + } + + fn get_all_ovs(conn: &mut PgConnection) -> Result> { + let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers + .select(ManufacturerOV::as_select()) + .load(conn) + .expect("Error getting manufacturer OVs"); Ok(result) } diff --git a/db/src/sqlite.rs b/db/src/sqlite.rs index f40067074..b949350d8 100644 --- a/db/src/sqlite.rs +++ b/db/src/sqlite.rs @@ -60,7 +60,15 @@ impl DBStoreManufacturer for SqliteManufacturerDB { let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) .first(conn) - .expect("Error geting manufacturer OVs"); + .expect(&format!("Error geting manufacturer OV {guid}")); + Ok(result) + } + + fn get_all_ovs(conn: &mut SqliteConnection) -> Result> { + let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers + .select(ManufacturerOV::as_select()) + .load(conn) + .expect("Error getting manufacturer OVs"); Ok(result) } diff --git a/owner-tool/Cargo.toml b/owner-tool/Cargo.toml index 56a0ddd7e..bef1b0579 100644 --- a/owner-tool/Cargo.toml +++ b/owner-tool/Cargo.toml @@ -19,5 +19,6 @@ tss-esapi = { version = "7.4", features = ["generate-bindings"] } fdo-util = { path = "../util", version = "0.4.13" } fdo-data-formats = { path = "../data-formats", version = "0.4.13" } fdo-http-wrapper = { path = "../http-wrapper", version = "0.4.13", features = ["client"] } +fdo-db = { path = "../db", version = "0.4.12"} hex = "0.4" diff --git a/owner-tool/src/main.rs b/owner-tool/src/main.rs index f22d94b6f..abb6970be 100644 --- a/owner-tool/src/main.rs +++ b/owner-tool/src/main.rs @@ -1,7 +1,9 @@ -use std::{convert::TryFrom, fs, io::Write, path::Path, str::FromStr}; +use std::{convert::TryFrom, env, fs, io::Write, path::Path, str::FromStr}; use anyhow::{bail, Context, Error, Result}; use clap::{ArgAction, Args, Parser, Subcommand, ValueEnum}; +use fdo_db::models::ManufacturerOV; +use fdo_db::{postgres::PostgresManufacturerDB, sqlite::SqliteManufacturerDB, DBStoreManufacturer}; use openssl::{ asn1::{Asn1Integer, Asn1Time}, bn::BigNum, @@ -14,6 +16,7 @@ use openssl::{ x509::{X509Builder, X509NameBuilder, X509NameRef, X509}, }; use serde_yaml::Value; +use std::fs::File; use tss_esapi::{structures::Public as TssPublic, traits::UnMarshall}; use fdo_data_formats::{ @@ -42,6 +45,8 @@ enum Commands { DumpDeviceCredential(DumpDeviceCredentialArguments), /// Extends an ownership voucher for a new owner ExtendOwnershipVoucher(ExtendOwnershipVoucherArguments), + /// Exports a single or all the ownership vouchers present in the Manufacturer DB + ExportManufacturerVouchers(ExportManufacturerVouchersArguments), } #[derive(Args)] @@ -99,6 +104,24 @@ struct ExtendOwnershipVoucherArguments { new_owner_cert: String, } +#[derive(Args)] +struct ExportManufacturerVouchersArguments { + /// Type of the Manufacturer DB holding the OVs + db_type: DBType, + /// DB connection URL or path to the DB file + db_url: String, + /// Path to dir where the OVs will be exported + path: String, + /// GUID of the voucher to be exported, if no GUID is given all the OVs will be exported + guid: Option, +} + +#[derive(Copy, Clone, ValueEnum)] +enum DBType { + Sqlite, + Postgres, +} + #[tokio::main] async fn main() -> Result<()> { fdo_util::add_version!(); @@ -109,6 +132,7 @@ async fn main() -> Result<()> { Commands::DumpOwnershipVoucher(args) => dump_voucher(&args), Commands::DumpDeviceCredential(args) => dump_devcred(&args), Commands::ExtendOwnershipVoucher(args) => extend_voucher(&args), + Commands::ExportManufacturerVouchers(args) => export_manufacturer_vouchers(&args), } } @@ -588,3 +612,61 @@ fn extend_voucher(args: &ExtendOwnershipVoucherArguments) -> Result<(), Error> { Ok(()) } + +fn _write_ov_to_disk(db_ov: &ManufacturerOV, path: &Path) -> Result<()> { + let new_path = path.join(&db_ov.guid); + let file = File::create(new_path)?; + let ov = OwnershipVoucher::from_pem_or_raw(&db_ov.contents).expect("Error serializing OV"); + OwnershipVoucher::serialize_to_writer(&ov, &file)?; + Ok(()) +} + +fn export_manufacturer_vouchers(args: &ExportManufacturerVouchersArguments) -> Result<()> { + let path = Path::new(&args.path); + if !path.is_dir() { + bail!("Please provide a path to a valid directory."); + } + match &args.guid { + Some(guid) => { + // export single + let db_ov = match args.db_type { + DBType::Sqlite => { + env::set_var("SQLITE_MANUFACTURER_DATABASE_URL", &args.db_url); + let pool = SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get()?; + SqliteManufacturerDB::get_ov(guid, conn)? + } + DBType::Postgres => { + env::set_var("POSTGRES_MANUFACTURER_DATABASE_URL", &args.db_url); + let pool = PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get()?; + PostgresManufacturerDB::get_ov(guid, conn)? + } + }; + _write_ov_to_disk(&db_ov, path)?; + println!("OV {guid} exported.") + } + None => { + // export all + let db_ovs = match args.db_type { + DBType::Sqlite => { + env::set_var("SQLITE_MANUFACTURER_DATABASE_URL", &args.db_url); + let pool = SqliteManufacturerDB::get_conn_pool(); + let conn = &mut pool.get()?; + SqliteManufacturerDB::get_all_ovs(conn)? + } + DBType::Postgres => { + env::set_var("POSTGRES_MANUFACTURER_DATABASE_URL", &args.db_url); + let pool = PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get()?; + PostgresManufacturerDB::get_all_ovs(conn)? + } + }; + for db_ov in db_ovs { + _write_ov_to_disk(&db_ov, path)?; + } + println!("OV/s exported."); + } + } + Ok(()) +} From 4da0a49bf9d7f272b86b07e009b0dde3bfc5600d Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Mon, 6 Nov 2023 17:43:05 +0100 Subject: [PATCH 11/25] feat(owner-tool): add an option to import OVs to the Owner DB Signed-off-by: Irene Diez --- owner-tool/src/main.rs | 124 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/owner-tool/src/main.rs b/owner-tool/src/main.rs index abb6970be..7dcefadd1 100644 --- a/owner-tool/src/main.rs +++ b/owner-tool/src/main.rs @@ -3,6 +3,9 @@ use std::{convert::TryFrom, env, fs, io::Write, path::Path, str::FromStr}; use anyhow::{bail, Context, Error, Result}; use clap::{ArgAction, Args, Parser, Subcommand, ValueEnum}; use fdo_db::models::ManufacturerOV; +use fdo_db::postgres::PostgresOwnerDB; +use fdo_db::sqlite::SqliteOwnerDB; +use fdo_db::DBStoreOwner; use fdo_db::{postgres::PostgresManufacturerDB, sqlite::SqliteManufacturerDB, DBStoreManufacturer}; use openssl::{ asn1::{Asn1Integer, Asn1Time}, @@ -47,6 +50,8 @@ enum Commands { ExtendOwnershipVoucher(ExtendOwnershipVoucherArguments), /// Exports a single or all the ownership vouchers present in the Manufacturer DB ExportManufacturerVouchers(ExportManufacturerVouchersArguments), + /// Imports into the Owner DB a single ownership voucher or all the ownership vouchers present at a given path + ImportOwnershipVouchers(ImportOwnershipVouchersArguments), } #[derive(Args)] @@ -122,6 +127,16 @@ enum DBType { Postgres, } +#[derive(Args)] +struct ImportOwnershipVouchersArguments { + /// Type of the Owner DB to import the OVs + db_type: DBType, + /// DB connection URL or path to DB file + db_url: String, + /// Path to the OV to be imported, or path to a directory where all the OVs to be imported are located + source_path: String, +} + #[tokio::main] async fn main() -> Result<()> { fdo_util::add_version!(); @@ -133,6 +148,7 @@ async fn main() -> Result<()> { Commands::DumpDeviceCredential(args) => dump_devcred(&args), Commands::ExtendOwnershipVoucher(args) => extend_voucher(&args), Commands::ExportManufacturerVouchers(args) => export_manufacturer_vouchers(&args), + Commands::ImportOwnershipVouchers(args) => import_ownership_vouchers(&args), } } @@ -670,3 +686,111 @@ fn export_manufacturer_vouchers(args: &ExportManufacturerVouchersArguments) -> R } Ok(()) } + +fn import_ownership_vouchers(args: &ImportOwnershipVouchersArguments) -> Result<()> { + let source_path = Path::new(&args.source_path); + let mut error_buff: Vec = vec![]; + if source_path.is_dir() { + // Import all the OVs in a directory, we will read them one by one and + // insert them, if there is an error, we will copy it in a buffer and + // log it afterwards. + for path in fs::read_dir(source_path)? { + let ov_path = match &path { + Ok(path) => path.path(), + Err(e) => { + error_buff.push(format!("Error {e} with path {:?}", &path)); + continue; + } + }; + let content = match fs::read(&ov_path) { + Ok(value) => value, + Err(e) => { + error_buff.push(format!("Error {e} reading path {:?}", &ov_path)); + continue; + } + }; + let ov = match OwnershipVoucher::from_pem_or_raw(&content) { + Ok(value) => value, + Err(e) => { + error_buff.push(format!( + "Error {e} serializing OV contents at path {:?}", + &ov_path + )); + continue; + } + }; + let ret = match args.db_type { + DBType::Postgres => { + env::set_var("POSTGRES_OWNER_DATABASE_URL", &args.db_url); + let pool = PostgresOwnerDB::get_conn_pool(); + let conn = &mut match pool.get() { + Ok(val) => val, + Err(e) => { + error_buff.push(format!( + "Error {e} getting a connection from the DB pool with OV {} from path {:?}", + ov.header().guid().to_string(), + &ov_path + )); + continue; + } + }; + PostgresOwnerDB::insert_ov(&ov, None, None, conn) + } + DBType::Sqlite => { + env::set_var("SQLITE_OWNER_DATABASE_URL", &args.db_url); + let pool = SqliteOwnerDB::get_conn_pool(); + let conn = &mut match pool.get() { + Ok(val) => val, + Err(e) => { + error_buff.push(format!( + "Error {e} getting a connection from the DB pool with OV {} from path {:?}", + ov.header().guid().to_string(), + &ov_path + )); + continue; + } + }; + SqliteOwnerDB::insert_ov(&ov, None, None, conn) + } + }; + if ret.is_err() { + error_buff.push(format!( + "Error {:?} inserting OV {} from path {:?}", + ret.err(), + ov.header().guid().to_string(), + &ov_path + )); + } + } + if !error_buff.is_empty() { + println!( + "Unable to import all OVs. OV import operations yielded the following error/s:" + ); + for error in error_buff { + println!("- {error}"); + } + } else { + println!("OV import finished.") + } + } else { + // import a single OV + let content = fs::read(&args.source_path)?; + let ov = OwnershipVoucher::from_pem_or_raw(&content)?; + match args.db_type { + DBType::Postgres => { + env::set_var("POSTGRES_OWNER_DATABASE_URL", &args.db_url); + let pool = PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get()?; + PostgresOwnerDB::insert_ov(&ov, None, None, conn)?; + } + DBType::Sqlite => { + env::set_var("SQLITE_OWNER_DATABASE_URL", &args.db_url); + let pool = SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool.get()?; + SqliteOwnerDB::insert_ov(&ov, None, None, conn)?; + } + } + println!("OV import finished."); + } + Ok(()) +} From 35369a92c6f7123820f1e38993fceb23214b1488 Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Tue, 7 Nov 2023 12:51:32 +0100 Subject: [PATCH 12/25] fix(db): do not bail on non-critical errors We are going to propagate the errors unless those are related to getting a connection to the DB. Signed-off-by: Irene Diez --- db/src/postgres.rs | 31 +++++++++++-------------------- db/src/sqlite.rs | 31 +++++++++++-------------------- 2 files changed, 22 insertions(+), 40 deletions(-) diff --git a/db/src/postgres.rs b/db/src/postgres.rs index 95598434c..169705411 100644 --- a/db/src/postgres.rs +++ b/db/src/postgres.rs @@ -44,29 +44,26 @@ impl DBStoreManufacturer for PostgresManufacturerDB { fn insert_ov(ov: &OV, ttl: Option, conn: &mut PgConnection) -> Result<()> { let new_ov_manufacturer = NewManufacturerOV { guid: ov.header().guid().to_string(), - contents: ov.serialize_data().expect("Error serializing OV"), + contents: ov.serialize_data()?, ttl, }; diesel::insert_into(super::schema::manufacturer_vouchers::table) .values(new_ov_manufacturer) - .execute(conn) - .expect("Error saving OV"); + .execute(conn)?; Ok(()) } fn get_ov(guid: &str, conn: &mut PgConnection) -> Result { let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) - .first(conn) - .expect("Error geting manufacturer OV"); + .first(conn)?; Ok(result) } fn get_all_ovs(conn: &mut PgConnection) -> Result> { let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers .select(ManufacturerOV::as_select()) - .load(conn) - .expect("Error getting manufacturer OVs"); + .load(conn)?; Ok(result) } @@ -122,22 +119,20 @@ impl DBStoreOwner for PostgresOwnerDB { ) -> Result<()> { let new_ov_owner = NewOwnerOV { guid: ov.header().guid().to_string(), - contents: ov.serialize_data().expect("Error serializing OV"), + contents: ov.serialize_data()?, to2_performed: to2, to0_accept_owner_wait_seconds: to0, }; diesel::insert_into(super::schema::owner_vouchers::table) .values(new_ov_owner) - .execute(conn) - .expect("Error saving OV"); + .execute(conn)?; Ok(()) } fn get_ov(guid: &str, conn: &mut PgConnection) -> Result { let result = super::schema::owner_vouchers::dsl::owner_vouchers .filter(super::schema::owner_vouchers::guid.eq(guid)) - .first(conn) - .expect("Error getting owner OV"); + .first(conn)?; Ok(result) } @@ -156,8 +151,7 @@ impl DBStoreOwner for PostgresOwnerDB { let result = super::schema::owner_vouchers::dsl::owner_vouchers .filter(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) .select(OwnerOV::as_select()) - .load(conn) - .expect("Error getting owner OVs"); + .load(conn)?; Ok(result) } @@ -166,8 +160,7 @@ impl DBStoreOwner for PostgresOwnerDB { let result = super::schema::owner_vouchers::dsl::owner_vouchers .filter(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.lt(to0_max)) .select(OwnerOV::as_select()) - .load(conn) - .expect("Error getting owner OVs"); + .load(conn)?; Ok(result) } @@ -243,16 +236,14 @@ impl DBStoreRendezvous for PostgresRendezvousDB { }; diesel::insert_into(super::schema::rendezvous_vouchers::table) .values(&new_ov_rendezvous) - .execute(conn) - .expect("Error saving OV"); + .execute(conn)?; Ok(()) } fn get_ov(guid: &str, conn: &mut PgConnection) -> Result { let result = super::schema::rendezvous_vouchers::dsl::rendezvous_vouchers .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) - .first(conn) - .expect("Error getting rendezvous OV"); + .first(conn)?; Ok(result) } diff --git a/db/src/sqlite.rs b/db/src/sqlite.rs index b949350d8..10f1021ad 100644 --- a/db/src/sqlite.rs +++ b/db/src/sqlite.rs @@ -46,29 +46,26 @@ impl DBStoreManufacturer for SqliteManufacturerDB { fn insert_ov(ov: &OV, ttl: Option, conn: &mut SqliteConnection) -> Result<()> { let new_ov_manufacturer = NewManufacturerOV { guid: ov.header().guid().to_string(), - contents: ov.serialize_data().expect("Error serializing OV"), + contents: ov.serialize_data()?, ttl, }; diesel::insert_into(super::schema::manufacturer_vouchers::table) .values(new_ov_manufacturer) - .execute(conn) - .expect("Error saving OV"); + .execute(conn)?; Ok(()) } fn get_ov(guid: &str, conn: &mut SqliteConnection) -> Result { let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers .filter(super::schema::manufacturer_vouchers::guid.eq(guid)) - .first(conn) - .expect(&format!("Error geting manufacturer OV {guid}")); + .first(conn)?; Ok(result) } fn get_all_ovs(conn: &mut SqliteConnection) -> Result> { let result = super::schema::manufacturer_vouchers::dsl::manufacturer_vouchers .select(ManufacturerOV::as_select()) - .load(conn) - .expect("Error getting manufacturer OVs"); + .load(conn)?; Ok(result) } @@ -124,22 +121,20 @@ impl DBStoreOwner for SqliteOwnerDB { ) -> Result<()> { let new_ov_owner = NewOwnerOV { guid: ov.header().guid().to_string(), - contents: ov.serialize_data().expect("Error serializing OV"), + contents: ov.serialize_data()?, to2_performed: to2, to0_accept_owner_wait_seconds: to0, }; diesel::insert_into(super::schema::owner_vouchers::table) .values(new_ov_owner) - .execute(conn) - .expect("Error saving OV"); + .execute(conn)?; Ok(()) } fn get_ov(guid: &str, conn: &mut SqliteConnection) -> Result { let result = super::schema::owner_vouchers::dsl::owner_vouchers .filter(super::schema::owner_vouchers::guid.eq(guid)) - .first(conn) - .expect("Error getting owner OV"); + .first(conn)?; Ok(result) } @@ -158,8 +153,7 @@ impl DBStoreOwner for SqliteOwnerDB { let result = super::schema::owner_vouchers::dsl::owner_vouchers .filter(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) .select(OwnerOV::as_select()) - .load(conn) - .expect("Error getting owner OVs"); + .load(conn)?; Ok(result) } @@ -168,8 +162,7 @@ impl DBStoreOwner for SqliteOwnerDB { let result = super::schema::owner_vouchers::dsl::owner_vouchers .filter(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.lt(to0_max)) .select(OwnerOV::as_select()) - .load(conn) - .expect("Error getting owner OVs"); + .load(conn)?; Ok(result) } @@ -245,16 +238,14 @@ impl DBStoreRendezvous for SqliteRendezvousDB { }; diesel::insert_into(super::schema::rendezvous_vouchers::table) .values(&new_ov_rendezvous) - .execute(conn) - .expect("Error saving OV"); + .execute(conn)?; Ok(()) } fn get_ov(guid: &str, conn: &mut SqliteConnection) -> Result { let result = super::schema::rendezvous_vouchers::dsl::rendezvous_vouchers .filter(super::schema::rendezvous_vouchers::guid.eq(guid)) - .first(conn) - .expect("Error getting rendezvous OV"); + .first(conn)?; Ok(result) } From 1ddbe3a900aa225398051c8e9d2457c399f64640 Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Tue, 7 Nov 2023 12:56:44 +0100 Subject: [PATCH 13/25] fix(db): remove dotenv handling Signed-off-by: Irene Diez --- Cargo.lock | 7 ------- db/Cargo.toml | 1 - db/src/postgres.rs | 7 ------- db/src/sqlite.rs | 7 ------- 4 files changed, 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7cf074db8..3c7c7583f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -734,12 +734,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" -[[package]] -name = "dotenvy" -version = "0.15.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" - [[package]] name = "either" version = "1.8.1" @@ -924,7 +918,6 @@ dependencies = [ "fdo-data-formats", "fdo-http-wrapper", "openssl", - "dotenvy", ] [[package]] diff --git a/db/Cargo.toml b/db/Cargo.toml index b3837f9da..44217d305 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -7,7 +7,6 @@ edition = "2021" [dependencies] anyhow = "1.0" diesel = { version = "2.1.0", features = ["sqlite", "postgres", "r2d2"] } -dotenvy = "0.15" fdo-data-formats = { path = "../data-formats", version = "0.4.12" } diff --git a/db/src/postgres.rs b/db/src/postgres.rs index 169705411..53217822f 100644 --- a/db/src/postgres.rs +++ b/db/src/postgres.rs @@ -13,7 +13,6 @@ use diesel::PgConnection; use std::env; use anyhow::Result; -use dotenvy::dotenv; use super::models::{ManufacturerOV, NewOwnerOV, NewRendezvousOV, OwnerOV, RendezvousOV}; @@ -24,14 +23,12 @@ pub struct PostgresManufacturerDB {} impl DBStoreManufacturer for PostgresManufacturerDB { fn get_connection() -> PgConnection { - dotenv().ok(); let database_url = env::var("POSTGRES_MANUFACTURER_DATABASE_URL") .expect("POSTGRES_MANUFACTURER_DATABASE_URL must be set"); PgConnection::establish(&database_url).expect("Error connecting to database") } fn get_conn_pool() -> Pool> { - dotenv().ok(); let database_url = env::var("POSTGRES_MANUFACTURER_DATABASE_URL") .expect("POSTGRES_MANUFACTURER_DATABASE_URL must be set"); let manager = ConnectionManager::::new(database_url); @@ -94,14 +91,12 @@ pub struct PostgresOwnerDB {} impl DBStoreOwner for PostgresOwnerDB { fn get_connection() -> PgConnection { - dotenv().ok(); let database_url = env::var("POSTGRES_OWNER_DATABASE_URL") .expect("POSTGRES_OWNER_DATABASE_URL must be set"); PgConnection::establish(&database_url).expect("Error connecting to database") } fn get_conn_pool() -> Pool> { - dotenv().ok(); let database_url = env::var("POSTGRES_OWNER_DATABASE_URL") .expect("POSTGRES_OWNER_DATABASE_URL must be set"); let manager = ConnectionManager::::new(database_url); @@ -206,14 +201,12 @@ pub struct PostgresRendezvousDB {} impl DBStoreRendezvous for PostgresRendezvousDB { fn get_connection() -> PgConnection { - dotenv().ok(); let database_url = env::var("POSTGRES_RENDEZVOUS_DATABASE_URL") .expect("POSTGRES_RENDEZVOUS_DATABASE_URL must be set"); PgConnection::establish(&database_url).expect("Error connecting to database") } fn get_conn_pool() -> Pool> { - dotenv().ok(); let database_url = env::var("POSTGRES_RENDEZVOUS_DATABASE_URL") .expect("POSTGRES_RENDEZVOUS_DATABASE_URL must be set"); let manager = ConnectionManager::::new(database_url); diff --git a/db/src/sqlite.rs b/db/src/sqlite.rs index 10f1021ad..4058a6187 100644 --- a/db/src/sqlite.rs +++ b/db/src/sqlite.rs @@ -14,7 +14,6 @@ use crate::schema::rendezvous_vouchers; use std::env; use anyhow::Result; -use dotenvy::dotenv; use super::models::{NewOwnerOV, NewRendezvousOV, OwnerOV, RendezvousOV}; @@ -26,14 +25,12 @@ pub struct SqliteManufacturerDB {} impl DBStoreManufacturer for SqliteManufacturerDB { fn get_connection() -> SqliteConnection { - dotenv().ok(); let database_url = env::var("SQLITE_MANUFACTURER_DATABASE_URL") .expect("SQLITE_MANUFACTURER_DATABASE_URL must be set"); SqliteConnection::establish(&database_url).expect("Error connecting to database") } fn get_conn_pool() -> Pool> { - dotenv().ok(); let database_url = env::var("SQLITE_MANUFACTURER_DATABASE_URL") .expect("SQLITE_MANUFACTURER_DATABASE_URL must be set"); let manager = ConnectionManager::::new(database_url); @@ -96,14 +93,12 @@ pub struct SqliteOwnerDB {} impl DBStoreOwner for SqliteOwnerDB { fn get_connection() -> SqliteConnection { - dotenv().ok(); let database_url = env::var("SQLITE_OWNER_DATABASE_URL").expect("SQLITE_OWNER_DATABASE_URL must be set"); SqliteConnection::establish(&database_url).expect("Error connecting to database") } fn get_conn_pool() -> Pool> { - dotenv().ok(); let database_url = env::var("SQLITE_OWNER_DATABASE_URL").expect("SQLITE_OWNER_DATABASE_URL must be set"); let manager = ConnectionManager::::new(database_url); @@ -208,14 +203,12 @@ pub struct SqliteRendezvousDB {} impl DBStoreRendezvous for SqliteRendezvousDB { fn get_connection() -> SqliteConnection { - dotenv().ok(); let database_url = env::var("SQLITE_RENDEZVOUS_DATABASE_URL") .expect("SQLITE_RENDEZVOUS_DATABASE_URL must be set"); SqliteConnection::establish(&database_url).expect("Error connecting to database") } fn get_conn_pool() -> Pool> { - dotenv().ok(); let database_url = env::var("SQLITE_RENDEZVOUS_DATABASE_URL") .expect("SQLITE_RENDEZVOUS_DATABASE_URL must be set"); let manager = ConnectionManager::::new(database_url); From 1e430f5b1c697669de3e5da2d4b7d196a7f2763f Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Thu, 9 Nov 2023 17:06:50 +0100 Subject: [PATCH 14/25] feat(store): add query_ovs_db method to store trait The `query_data` method makes no sense when using databases since it uses different types of lists to query the required data, when that's a single query in a database; thereby we are adding the `query_ovs_db` method which explicitly queries the required data from the database. We are also adding a new error type `StoreError::MethodNotAvailable` that will be yielded when we call `query_data` from a database store implementation, or when we call the new `query_ovs_db` method from a directory store implementation. Signed-off-by: Irene Diez --- store/src/db.rs | 52 +++++++++++++++++++++++++++++++----------- store/src/directory.rs | 5 ++++ store/src/lib.rs | 13 ++++++++++- 3 files changed, 56 insertions(+), 14 deletions(-) diff --git a/store/src/db.rs b/store/src/db.rs index 288896ea0..1d224af38 100644 --- a/store/src/db.rs +++ b/store/src/db.rs @@ -1,4 +1,3 @@ -use anyhow::bail; use async_trait::async_trait; use fdo_data_formats::ownershipvoucher::OwnershipVoucher; use fdo_db::*; @@ -134,11 +133,11 @@ where // NOTE: this function is only used in the owner onboarding server // when we need to filter the OVs that haven't done the To2 and still // have ttl. It is not used in the manufacturing server. - // This is why we are returning dummy things to comply with the trait. - Ok(Box::new(SqliteManufacturerStoreFilterType { - neqs: Vec::new(), - lts: Vec::new(), - })) + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + Err(StoreError::MethodNotAvailable) } async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { @@ -296,7 +295,33 @@ where } async fn query_data(&self) -> crate::QueryResult { - todo!(); + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + let mut ret = vec![]; + let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); + let conn = &mut pool + .get() + .map_err(|e| StoreError::Unspecified(format!("Error connecting to DB {e:?}")))?; + let db_ovs = fdo_db::sqlite::SqliteOwnerDB::select_ov_to2_performed_and_ov_to0_less_than( + false, + time::OffsetDateTime::now_utc().unix_timestamp(), + conn, + ) + .map_err(|e| { + StoreError::Unspecified(format!( + "Error selecting OVs filtering by to2 and to0: {e:?}" + )) + })?; + for db_ov in db_ovs { + ret.push( + OwnershipVoucher::from_pem_or_raw(&db_ov.contents).map_err(|e| { + StoreError::Unspecified(format!("Error parsing OV contents from DB: {e:?}")) + })?, + ); + } + Ok(ret) } async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { @@ -325,7 +350,8 @@ where async fn perform_maintenance(&self) -> Result<(), StoreError> { // This is not used in the owner onboarding server since the OVs there - // do not have a ttl. + // do not have a ttl, but we still need to return Ok since the method + // will be called. Ok(()) } } @@ -424,11 +450,11 @@ where // NOTE: this function is only used in the owner onboarding server // when we need to filter the OVs that haven't done the To2 and still // have ttl. It is not used in the rendezvous server. - // This is why we are returning dummy things to comply with the trait. - Ok(Box::new(SqliteRendezvousStoreFilterType { - neqs: Vec::new(), - lts: Vec::new(), - })) + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + Err(StoreError::MethodNotAvailable) } async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { diff --git a/store/src/directory.rs b/store/src/directory.rs index 5ac5c8628..a7b31239d 100644 --- a/store/src/directory.rs +++ b/store/src/directory.rs @@ -1,3 +1,4 @@ +use fdo_data_formats::ownershipvoucher::OwnershipVoucher; use std::collections::HashSet; use std::convert::TryInto; use std::fs::{self, File}; @@ -297,6 +298,10 @@ where })) } + async fn query_ovs_db(&self) -> Result, StoreError> { + Err(StoreError::MethodNotAvailable) + } + async fn store_data(&self, key: K, value: V) -> Result<(), StoreError> { let finalpath = self.get_path(&key); let mut path = finalpath.clone(); diff --git a/store/src/lib.rs b/store/src/lib.rs index 40df43516..39d02a5bc 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -4,7 +4,7 @@ use core::pin::Pin; use serde::{Deserialize, Serialize}; use thiserror::Error; -use fdo_data_formats::Serializable; +use fdo_data_formats::{ownershipvoucher::OwnershipVoucher, Serializable}; #[derive(Debug, Error)] pub enum StoreError { @@ -12,6 +12,8 @@ pub enum StoreError { Unspecified(String), #[error("Configuration error: {0}")] Configuration(String), + #[error("Method not available")] + MethodNotAvailable, } mod private { @@ -184,6 +186,15 @@ pub trait Store: Send + Sync { Self: 'async_trait, OT: Writable; + fn query_ovs_db<'life0, 'async_trait>( + &'life0 self, + ) -> Pin< + Box, StoreError>> + 'async_trait + Send>, + > + where + 'life0: 'async_trait, + Self: 'async_trait; + fn store_data<'life0, 'async_trait>( &'life0 self, key: K, From 6df89ca6ff4bb4d0c33a1a8d71138d7143d2e6df Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Thu, 9 Nov 2023 17:37:22 +0100 Subject: [PATCH 15/25] feat(owner-server): add DB handling to report_to_rendezvous When the Owner server needs to report OVs to the Rendezvous server, check whether we have the old `query_data` method available or the new `query_ovs_db` method to choose the most effective one. Signed-off-by: Irene Diez --- owner-onboarding-server/src/main.rs | 91 +++++++++++++++++++---------- 1 file changed, 59 insertions(+), 32 deletions(-) diff --git a/owner-onboarding-server/src/main.rs b/owner-onboarding-server/src/main.rs index 4ef0d826b..f1221aa78 100644 --- a/owner-onboarding-server/src/main.rs +++ b/owner-onboarding-server/src/main.rs @@ -27,7 +27,7 @@ use fdo_data_formats::{ publickey::PublicKey, types::{Guid, TO2AddressEntry}, }; -use fdo_store::Store; +use fdo_store::{Store, StoreError}; use fdo_util::servers::{ configuration::{owner_onboarding_server::OwnerOnboardingServerSettings, AbsolutePathBuf}, settings_for, OwnershipVoucherStoreMetadataKey, @@ -72,42 +72,69 @@ fn load_private_key(path: &AbsolutePathBuf) -> Result> { Ok(PKey::private_key_from_der(&contents)?) } -async fn report_to_rendezvous(udt: OwnerServiceUDT) -> Result<()> { - let mut ft = udt.ownership_voucher_store.query_data().await?; - ft.neq( - &fdo_store::MetadataKey::Local(OwnershipVoucherStoreMetadataKey::To2Performed), - &true, - ); - ft.lt( - &fdo_store::MetadataKey::Local(OwnershipVoucherStoreMetadataKey::To0AcceptOwnerWaitSeconds), - time::OffsetDateTime::now_utc().unix_timestamp(), - ); +async fn _handle_report_to_rendezvous(udt: &OwnerServiceUDT, ov: &OwnershipVoucher) -> Result<()> { + match report_ov_to_rendezvous(ov, &udt.owner_addresses, &udt.owner_key).await { + Ok(wait_seconds) => { + udt.ownership_voucher_store + .store_metadata( + ov.header().guid(), + &fdo_store::MetadataKey::Local( + OwnershipVoucherStoreMetadataKey::To0AcceptOwnerWaitSeconds, + ), + &time::Duration::new(wait_seconds.into(), 0), + ) + .await?; + Ok(()) + } + Err(e) => { + log::warn!( + "OV({}): failed to report to rendezvous: {}", + ov.header().guid().to_string(), + e + ); + Ok(()) + } + } +} - let ov_iter = ft.query().await?; - if let Some(ovs) = ov_iter { - for ov in ovs { - match report_ov_to_rendezvous(&ov, &udt.owner_addresses, &udt.owner_key).await { - Ok(wait_seconds) => { - udt.ownership_voucher_store - .store_metadata( - ov.header().guid(), - &fdo_store::MetadataKey::Local( - OwnershipVoucherStoreMetadataKey::To0AcceptOwnerWaitSeconds, - ), - &time::Duration::new(wait_seconds.into(), 0), - ) - .await?; +async fn report_to_rendezvous(udt: OwnerServiceUDT) -> Result<()> { + match udt.ownership_voucher_store.query_data().await { + Ok(mut ft) => { + ft.neq( + &fdo_store::MetadataKey::Local(OwnershipVoucherStoreMetadataKey::To2Performed), + &true, + ); + ft.lt( + &fdo_store::MetadataKey::Local( + OwnershipVoucherStoreMetadataKey::To0AcceptOwnerWaitSeconds, + ), + time::OffsetDateTime::now_utc().unix_timestamp(), + ); + let ov_iter = ft.query().await?; + if let Some(ovs) = ov_iter { + for ov in ovs { + _handle_report_to_rendezvous(&udt, &ov).await?; + } + } + } + Err(StoreError::MethodNotAvailable) => { + match udt.ownership_voucher_store.query_ovs_db().await { + Ok(ovs) => { + for ov in ovs { + _handle_report_to_rendezvous(&udt, &ov).await? + } } + Err(StoreError::Unspecified(txt)) => { + log::warn!("DB error: {txt:?}") + } + Err(StoreError::MethodNotAvailable) => bail!("Unreachable"), Err(e) => { - log::warn!( - "OV({}): failed to report to rendezvous: {}", - ov.header().guid().to_string(), - e - ); + log::warn!("DB error: {e:?}") } - }; + } } - } + Err(e) => log::warn!("Error querying data: {e:?}"), + }; Ok(()) } From 1a9d3687b1748caf74ede99d0a2f7d637ec533f8 Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Fri, 10 Nov 2023 12:40:04 +0100 Subject: [PATCH 16/25] feat(store): add Database error type Replace all the `StoreError::Unspecified` errors with the new `StoreError::Database` when there is an internal database error. Signed-off-by: Irene Diez --- store/src/db.rs | 38 +++++++++++++++++++------------------- store/src/lib.rs | 2 ++ 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/store/src/db.rs b/store/src/db.rs index 1d224af38..976393bb4 100644 --- a/store/src/db.rs +++ b/store/src/db.rs @@ -105,7 +105,7 @@ where .expect("Unable to convert"); fdo_db::sqlite::SqliteManufacturerDB::update_ov_ttl(&key.to_string(), Some(val), conn) .map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Unable to update OV with guid {} with {val}: {e:?}", key.to_string() )) @@ -121,7 +121,7 @@ where let conn = &mut pool.get().expect("Couldn't establish a connection"); fdo_db::sqlite::SqliteManufacturerDB::update_ov_ttl(&key.to_string(), None, conn).map_err( |e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Unable to set 'None' metadata on OV {}: {e:?}", key.to_string() )) @@ -146,7 +146,7 @@ where let raw = V::serialize_data(&value).expect("Error serializing data"); let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); fdo_db::sqlite::SqliteManufacturerDB::insert_ov(&ov, None, conn).map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Error inserting OV with guid {}: {e:?}", ov.header().guid().to_string() )) @@ -157,7 +157,7 @@ where let pool = fdo_db::sqlite::SqliteManufacturerDB::get_conn_pool(); let conn = &mut pool.get().expect("Couldn't establish a connection"); fdo_db::sqlite::SqliteManufacturerDB::delete_ov(&key.to_string(), conn).map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Error deleting OV with guid {}: {e:?}", key.to_string() )) @@ -169,7 +169,7 @@ where let conn = &mut pool.get().expect("Couldn't establish a connection"); let now = time::OffsetDateTime::now_utc().unix_timestamp(); fdo_db::sqlite::SqliteManufacturerDB::delete_ov_ttl_le(now, conn).map_err(|e| { - StoreError::Unspecified(format!("Error deleting OVs with ttl <= {now}: {e:?}")) + StoreError::Database(format!("Error deleting OVs with ttl <= {now}: {e:?}")) }) } } @@ -242,7 +242,7 @@ where .expect("Unable to convert string to bool"); fdo_db::sqlite::SqliteOwnerDB::update_ov_to2(&key.to_string(), Some(val), conn) .map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Unable to update OV (guid {}) to2 with value {val}: {e:?}", &key.to_string() )) @@ -259,14 +259,14 @@ where conn, ) .map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Unable to update OV (guid {}) to0 with value {val}: {e:?}", &key.to_string() )) }) } _ => Err(StoreError::Unspecified(format!( - "Unable to hanlde metadata key {}", + "Unable to handle metadata key {}", metadata_key.to_key() ))), } @@ -281,13 +281,13 @@ where let conn = &mut pool.get().expect("Couldn't establish a connection"); fdo_db::sqlite::SqliteOwnerDB::update_ov_to0_wait_seconds(&key.to_string(), None, conn) .map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Unable to set 'None' to0 metadata on OV {}: {e:?}", key.to_string() )) })?; fdo_db::sqlite::SqliteOwnerDB::update_ov_to2(&key.to_string(), None, conn).map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Unable to set 'None' to2 metadata on OV {}: {e:?}", key.to_string() )) @@ -303,14 +303,14 @@ where let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); let conn = &mut pool .get() - .map_err(|e| StoreError::Unspecified(format!("Error connecting to DB {e:?}")))?; + .map_err(|e| StoreError::Database(format!("Error connecting to DB {e:?}")))?; let db_ovs = fdo_db::sqlite::SqliteOwnerDB::select_ov_to2_performed_and_ov_to0_less_than( false, time::OffsetDateTime::now_utc().unix_timestamp(), conn, ) .map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Error selecting OVs filtering by to2 and to0: {e:?}" )) })?; @@ -330,7 +330,7 @@ where let raw = V::serialize_data(&value).expect("Error serializing data"); let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); fdo_db::sqlite::SqliteOwnerDB::insert_ov(&ov, None, None, conn).map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Error inserting OV with guid {}: {e:?}", ov.header().guid().to_string() )) @@ -341,7 +341,7 @@ where let pool = fdo_db::sqlite::SqliteOwnerDB::get_conn_pool(); let conn = &mut pool.get().expect("Couldn't establish a connection"); fdo_db::sqlite::SqliteOwnerDB::delete_ov(&key.to_string(), conn).map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Error deleting OV with guid {}: {e:?}", &key.to_string() )) @@ -422,7 +422,7 @@ where .expect("Unable to convert"); fdo_db::sqlite::SqliteRendezvousDB::update_ov_ttl(&key.to_string(), Some(val), conn) .map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Unable to update OV with guid {} with {val}: {e:?}", key.to_string() )) @@ -438,7 +438,7 @@ where let conn = &mut pool.get().expect("Couldn't establish a connection"); fdo_db::sqlite::SqliteRendezvousDB::update_ov_ttl(&key.to_string(), None, conn).map_err( |e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Unable to set 'None' ttl on OV {}: {e:?}", key.to_string() )) @@ -463,7 +463,7 @@ where let raw = V::serialize_data(&value).expect("Error serializing data"); let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); fdo_db::sqlite::SqliteRendezvousDB::insert_ov(&ov, None, conn).map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Error inserting OV with guid {}: {e:?}", ov.header().guid().to_string() )) @@ -474,7 +474,7 @@ where let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); let conn = &mut pool.get().expect("Couldn't establish a connection"); fdo_db::sqlite::SqliteRendezvousDB::delete_ov(&key.to_string(), conn).map_err(|e| { - StoreError::Unspecified(format!( + StoreError::Database(format!( "Error deleting OV with guid {}: {e:?}", key.to_string() )) @@ -486,7 +486,7 @@ where let conn = &mut pool.get().expect("Couldn't establish a connection"); let now = time::OffsetDateTime::now_utc().unix_timestamp(); fdo_db::sqlite::SqliteRendezvousDB::delete_ov_ttl_le(now, conn).map_err(|e| { - StoreError::Unspecified(format!("Error deleting OVs with ttl <= {now}: {e:?}")) + StoreError::Database(format!("Error deleting OVs with ttl <= {now}: {e:?}")) }) } } diff --git a/store/src/lib.rs b/store/src/lib.rs index 39d02a5bc..ed272fbb0 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -14,6 +14,8 @@ pub enum StoreError { Configuration(String), #[error("Method not available")] MethodNotAvailable, + #[error("Internal database error: {0}")] + Database(String), } mod private { From e219eeac644a00e86a2f2abe1b3dc35b7801f555 Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Mon, 13 Nov 2023 17:33:25 +0100 Subject: [PATCH 17/25] fix(db): change select_ov_to2_performed_and_ov_to0_less_than query This changes the filters so that they correctly take into account NULL values. In order to set a NULL value we need to use None in rust, but for filtering instead of using None we need to filter by `is_null()`. Signed-off-by: Irene Diez --- db/src/postgres.rs | 12 ++++++++++-- db/src/sqlite.rs | 12 ++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/db/src/postgres.rs b/db/src/postgres.rs index 53217822f..e8f171291 100644 --- a/db/src/postgres.rs +++ b/db/src/postgres.rs @@ -165,8 +165,16 @@ impl DBStoreOwner for PostgresOwnerDB { conn: &mut PgConnection, ) -> Result> { let result = super::schema::owner_vouchers::dsl::owner_vouchers - .filter(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.lt(to0_max)) - .filter(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .filter( + super::schema::owner_vouchers::to0_accept_owner_wait_seconds + .lt(to0_max) + .or(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.is_null()), + ) + .filter( + super::schema::owner_vouchers::to2_performed + .eq(to2_performed) + .or(super::schema::owner_vouchers::to2_performed.is_null()), + ) .select(OwnerOV::as_select()) .load(conn)?; Ok(result) diff --git a/db/src/sqlite.rs b/db/src/sqlite.rs index 4058a6187..78997e0e3 100644 --- a/db/src/sqlite.rs +++ b/db/src/sqlite.rs @@ -167,8 +167,16 @@ impl DBStoreOwner for SqliteOwnerDB { conn: &mut SqliteConnection, ) -> Result> { let result = super::schema::owner_vouchers::dsl::owner_vouchers - .filter(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.lt(to0_max)) - .filter(super::schema::owner_vouchers::to2_performed.eq(to2_performed)) + .filter( + super::schema::owner_vouchers::to0_accept_owner_wait_seconds + .lt(to0_max) + .or(super::schema::owner_vouchers::to0_accept_owner_wait_seconds.is_null()), + ) + .filter( + super::schema::owner_vouchers::to2_performed + .eq(to2_performed) + .or(super::schema::owner_vouchers::to2_performed.is_null()), + ) .select(OwnerOV::as_select()) .load(conn)?; Ok(result) From 4c9598a6634dac35b75368b04117e7ee86b88202 Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Tue, 14 Nov 2023 11:33:11 +0100 Subject: [PATCH 18/25] fix(store): expect StoredItem in rendezvous store This changes the `store_data` method so that it expects a Value of type `StoredItem` for the serialization and deserialization of data. Signed-off-by: Irene Diez --- store/src/db.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/store/src/db.rs b/store/src/db.rs index 976393bb4..07af4cfb2 100644 --- a/store/src/db.rs +++ b/store/src/db.rs @@ -1,5 +1,6 @@ use async_trait::async_trait; use fdo_data_formats::ownershipvoucher::OwnershipVoucher; +use fdo_data_formats::StoredItem; use fdo_db::*; use std::marker::PhantomData; @@ -457,17 +458,18 @@ where Err(StoreError::MethodNotAvailable) } - async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { + async fn store_data(&self, key: K, value: V) -> Result<(), StoreError> { let pool = fdo_db::sqlite::SqliteRendezvousDB::get_conn_pool(); let conn = &mut pool.get().expect("Couldn't establish a connection"); let raw = V::serialize_data(&value).expect("Error serializing data"); - let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); - fdo_db::sqlite::SqliteRendezvousDB::insert_ov(&ov, None, conn).map_err(|e| { - StoreError::Database(format!( - "Error inserting OV with guid {}: {e:?}", - ov.header().guid().to_string() - )) - }) + let stored = StoredItem::deserialize_data(&raw).expect("Error converting StoredItem"); + fdo_db::sqlite::SqliteRendezvousDB::insert_ov(&stored, &key.to_string(), None, conn) + .map_err(|e| { + StoreError::Database(format!( + "Error inserting StoredItem with guid {}: {e:?}", + &key.to_string() + )) + }) } async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { From 8af33813d55f5a2484b6ee14bc4c8d91ab3544c0 Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Wed, 15 Nov 2023 17:28:25 +0100 Subject: [PATCH 19/25] chore(containers): update DB dependencies Signed-off-by: Irene Diez --- contrib/containers/admin-cli | 1 + contrib/containers/build | 2 +- contrib/containers/manufacturing-server | 1 + contrib/containers/owner-onboarding-server | 1 + contrib/containers/rendezvous-server | 1 + contrib/containers/serviceinfo-api-server | 1 + 6 files changed, 6 insertions(+), 1 deletion(-) diff --git a/contrib/containers/admin-cli b/contrib/containers/admin-cli index 17bf9939b..038be2f05 100644 --- a/contrib/containers/admin-cli +++ b/contrib/containers/admin-cli @@ -1,4 +1,5 @@ FROM quay.io/centos/centos:stream9 ARG BUILDID COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-admin-tool /usr/local/bin +RUN yum install -y sqlite sqlite-devel libpq libpq-devel ENTRYPOINT ["fdo-admin-tool"] diff --git a/contrib/containers/build b/contrib/containers/build index f725dc81e..5f3dc9945 100644 --- a/contrib/containers/build +++ b/contrib/containers/build @@ -1,6 +1,6 @@ FROM quay.io/centos/centos:stream9 RUN yum update -y -RUN yum install -y --enablerepo=crb cargo gcc golang openssl-devel tpm2-tss-devel cryptsetup-devel clang-devel +RUN yum install -y --enablerepo=crb cargo gcc golang openssl-devel tpm2-tss-devel cryptsetup-devel clang-devel sqlite sqlite-devel libpq libpq-devel WORKDIR /usr/src COPY . . RUN cargo build --release --features openssl-kdf/deny_custom diff --git a/contrib/containers/manufacturing-server b/contrib/containers/manufacturing-server index 7c6cd63d6..c20d43648 100644 --- a/contrib/containers/manufacturing-server +++ b/contrib/containers/manufacturing-server @@ -4,5 +4,6 @@ COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-manufacturing-serve RUN mkdir -p /etc/fdo/sessions RUN mkdir -p /etc/fdo/keys RUN mkdir -p /etc/fdo/manufacturing-server.conf.d +RUN yum install -y sqlite sqlite-devel libpq libpq-devel ENV LOG_LEVEL=trace ENTRYPOINT ["fdo-manufacturing-server"] diff --git a/contrib/containers/owner-onboarding-server b/contrib/containers/owner-onboarding-server index f3df20ee1..eaaf9a621 100644 --- a/contrib/containers/owner-onboarding-server +++ b/contrib/containers/owner-onboarding-server @@ -4,5 +4,6 @@ COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-owner-onboarding-se RUN mkdir -p /etc/fdo/sessions RUN mkdir -p /etc/fdo/keys RUN mkdir -p /etc/fdo/owner-onboarding-server.conf.d +RUN yum install -y sqlite sqlite-devel libpq libpq-devel ENV LOG_LEVEL=trace ENTRYPOINT ["fdo-owner-onboarding-server"] diff --git a/contrib/containers/rendezvous-server b/contrib/containers/rendezvous-server index 8319d12fc..b2e0bc33e 100644 --- a/contrib/containers/rendezvous-server +++ b/contrib/containers/rendezvous-server @@ -4,5 +4,6 @@ COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-rendezvous-server / RUN mkdir -p /etc/fdo/sessions RUN mkdir -p /etc/fdo/keys RUN mkdir -p /etc/fdo/rendezvous-server.conf.d +RUN yum install -y sqlite sqlite-devel libpq libpq-devel ENV LOG_LEVEL=trace ENTRYPOINT ["fdo-rendezvous-server"] diff --git a/contrib/containers/serviceinfo-api-server b/contrib/containers/serviceinfo-api-server index bb1e844c7..ecd90cf11 100644 --- a/contrib/containers/serviceinfo-api-server +++ b/contrib/containers/serviceinfo-api-server @@ -4,5 +4,6 @@ COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-serviceinfo-api-ser RUN mkdir -p /etc/fdo/sessions RUN mkdir -p /etc/fdo/device_specific_serviceinfo RUN mkdir -p /etc/fdo/serviceinfo-api-server.conf.d +RUN yum install -y sqlite sqlite-devel libpq libpq-devel ENV LOG_LEVEL=trace ENTRYPOINT ["fdo-serviceinfo-api-server"] From 55d85d39e7fcccdfa77b6ecbe773b1cac374556e Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Mon, 20 Nov 2023 17:35:00 +0100 Subject: [PATCH 20/25] feat(store): add postgresql store variant Signed-off-by: Irene Diez --- store/src/db.rs | 484 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 476 insertions(+), 8 deletions(-) diff --git a/store/src/db.rs b/store/src/db.rs index 07af4cfb2..552588455 100644 --- a/store/src/db.rs +++ b/store/src/db.rs @@ -11,13 +11,6 @@ use crate::StoreError; use crate::{FilterType, MetadataLocalKey, MetadataValue, ValueIter}; use fdo_data_formats::Serializable; -struct SqliteManufacturerStore { - phantom_k: PhantomData, - phantom_v: PhantomData, -} - -impl SqliteManufacturerStore where K: std::string::ToString {} - pub(super) fn initialize( db_type: DBType, server_type: &ServerType, @@ -29,7 +22,20 @@ where MKT: crate::MetadataLocalKey + 'static, { match db_type { - DBType::Postgres => todo!(), + DBType::Postgres => match server_type { + ServerType::Manufacturer => Ok(Box::new(PostgresManufacturerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + ServerType::Owner => Ok(Box::new(PostgresOwnerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + ServerType::Rendezvous => Ok(Box::new(PostgresRendezvousStore { + phantom_k: PhantomData, + phantom_v: PhantomData, + })), + }, DBType::Sqlite => match server_type { ServerType::Manufacturer => Ok(Box::new(SqliteManufacturerStore { phantom_k: PhantomData, @@ -47,6 +53,13 @@ where } } +struct SqliteManufacturerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl SqliteManufacturerStore where K: std::string::ToString {} + pub struct SqliteManufacturerStoreFilterType { neqs: Vec, lts: Vec, @@ -492,3 +505,458 @@ where }) } } + +struct PostgresManufacturerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl PostgresManufacturerStore where K: std::string::ToString {} + +pub struct PostgresManufacturerStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for PostgresManufacturerStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for PostgresManufacturerStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::postgres::PostgresManufacturerDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert"); + fdo_db::postgres::PostgresManufacturerDB::update_ov_ttl(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV with guid {} with {val}: {e:?}", + key.to_string() + )) + }) + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresManufacturerDB::update_ov_ttl(&key.to_string(), None, conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to set 'None' metadata on OV {}: {e:?}", + key.to_string() + )) + }) + } + + async fn query_data(&self) -> crate::QueryResult { + // NOTE: this function is only used in the owner onboarding server + // when we need to filter the OVs that haven't done the To2 and still + // have ttl. It is not used in the manufacturing server. + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + Err(StoreError::MethodNotAvailable) + } + + async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); + fdo_db::postgres::PostgresManufacturerDB::insert_ov(&ov, None, conn).map_err(|e| { + StoreError::Database(format!( + "Error inserting OV with guid {}: {e:?}", + ov.header().guid().to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresManufacturerDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Database(format!( + "Error deleting OV with guid {}: {e:?}", + key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresManufacturerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let now = time::OffsetDateTime::now_utc().unix_timestamp(); + fdo_db::postgres::PostgresManufacturerDB::delete_ov_ttl_le(now, conn).map_err(|e| { + StoreError::Database(format!("Error deleting OVs with ttl <= {now}: {e:?}")) + }) + } +} + +struct PostgresOwnerStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl PostgresOwnerStore where K: std::string::ToString {} + +pub struct PostgresOwnerStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for PostgresOwnerStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for PostgresOwnerStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::postgres::PostgresOwnerDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + match metadata_key.to_key() { + "fdo.to2_performed" => { + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert string to bool"); + fdo_db::postgres::PostgresOwnerDB::update_ov_to2(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV (guid {}) to2 with value {val}: {e:?}", + &key.to_string() + )) + }) + } + "fdo.to0_accept_owner_wait_seconds" => { + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert string to i64"); + fdo_db::postgres::PostgresOwnerDB::update_ov_to0_wait_seconds( + &key.to_string(), + Some(val), + conn, + ) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV (guid {}) to0 with value {val}: {e:?}", + &key.to_string() + )) + }) + } + _ => Err(StoreError::Unspecified(format!( + "Unable to handle metadata key {}", + metadata_key.to_key() + ))), + } + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresOwnerDB::update_ov_to0_wait_seconds(&key.to_string(), None, conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to set 'None' to0 metadata on OV {}: {e:?}", + key.to_string() + )) + })?; + fdo_db::postgres::PostgresOwnerDB::update_ov_to2(&key.to_string(), None, conn).map_err( + |e| { + StoreError::Database(format!( + "Unable to set 'None' to2 metadata on OV {}: {e:?}", + key.to_string() + )) + }, + ) + } + + async fn query_data(&self) -> crate::QueryResult { + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + let mut ret = vec![]; + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool + .get() + .map_err(|e| StoreError::Database(format!("Error connecting to DB {e:?}")))?; + let db_ovs = + fdo_db::postgres::PostgresOwnerDB::select_ov_to2_performed_and_ov_to0_less_than( + false, + time::OffsetDateTime::now_utc().unix_timestamp(), + conn, + ) + .map_err(|e| { + StoreError::Database(format!( + "Error selecting OVs filtering by to2 and to0: {e:?}" + )) + })?; + for db_ov in db_ovs { + ret.push( + OwnershipVoucher::from_pem_or_raw(&db_ov.contents).map_err(|e| { + StoreError::Unspecified(format!("Error parsing OV contents from DB: {e:?}")) + })?, + ); + } + Ok(ret) + } + + async fn store_data(&self, _key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let ov = OwnershipVoucher::from_pem_or_raw(&raw).expect("Error converting OV"); + fdo_db::postgres::PostgresOwnerDB::insert_ov(&ov, None, None, conn).map_err(|e| { + StoreError::Database(format!( + "Error inserting OV with guid {}: {e:?}", + ov.header().guid().to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresOwnerDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresOwnerDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Database(format!( + "Error deleting OV with guid {}: {e:?}", + &key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + // This is not used in the owner onboarding server since the OVs there + // do not have a ttl, but we still need to return Ok since the method + // will be called. + Ok(()) + } +} + +struct PostgresRendezvousStore { + phantom_k: PhantomData, + phantom_v: PhantomData, +} + +impl PostgresRendezvousStore where K: std::string::ToString {} + +pub struct PostgresRendezvousStoreFilterType { + neqs: Vec, + lts: Vec, +} + +#[async_trait] +impl FilterType for PostgresRendezvousStoreFilterType +where + V: Serializable + Send + Sync + Clone + 'static, + MKT: MetadataLocalKey, +{ + fn neq(&mut self, _key: &crate::MetadataKey, _expected: &dyn MetadataValue) { + self.neqs = Vec::new(); + } + fn lt(&mut self, _key: &crate::MetadataKey, _max: i64) { + self.lts = Vec::new(); + } + async fn query(&self) -> Result, StoreError> { + let values = Vec::new(); + Ok(Some(ValueIter { + index: 0, + values, + errored: false, + })) + } +} + +#[async_trait] +impl Store for PostgresRendezvousStore +where + OT: crate::StoreOpenMode, + K: std::str::FromStr + std::string::ToString + Send + Sync + 'static, + V: Serializable + Send + Sync + Clone + 'static, + MKT: crate::MetadataLocalKey + 'static, +{ + async fn load_data(&self, key: &K) -> Result, StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let ov_db = fdo_db::postgres::PostgresRendezvousDB::get_ov(&key.to_string(), conn) + .expect("Error selecting OV"); + Ok(Some(V::deserialize_data(&ov_db.contents).map_err(|e| { + StoreError::Unspecified(format!("Error deserializing value: {e:?}")) + })?)) + } + + async fn store_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + metadata_value: &dyn MetadataValue, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let val = metadata_value + .to_text() + .parse::() + .expect("Unable to convert"); + fdo_db::postgres::PostgresRendezvousDB::update_ov_ttl(&key.to_string(), Some(val), conn) + .map_err(|e| { + StoreError::Database(format!( + "Unable to update OV with guid {} with {val}: {e:?}", + key.to_string() + )) + }) + } + + async fn destroy_metadata( + &self, + key: &K, + _metadata_key: &crate::MetadataKey, + ) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresRendezvousDB::update_ov_ttl(&key.to_string(), None, conn).map_err( + |e| { + StoreError::Database(format!( + "Unable to set 'None' ttl on OV {}: {e:?}", + key.to_string() + )) + }, + ) + } + + async fn query_data(&self) -> crate::QueryResult { + // NOTE: this function is only used in the owner onboarding server + // when we need to filter the OVs that haven't done the To2 and still + // have ttl. It is not used in the rendezvous server. + Err(StoreError::MethodNotAvailable) + } + + async fn query_ovs_db(&self) -> Result, StoreError> { + Err(StoreError::MethodNotAvailable) + } + + async fn store_data(&self, key: K, value: V) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let raw = V::serialize_data(&value).expect("Error serializing data"); + let stored = StoredItem::deserialize_data(&raw).expect("Error converting StoredItem"); + fdo_db::postgres::PostgresRendezvousDB::insert_ov(&stored, &key.to_string(), None, conn) + .map_err(|e| { + StoreError::Database(format!( + "Error inserting StoredItem with guid {}: {e:?}", + &key.to_string() + )) + }) + } + + async fn destroy_data(&self, key: &K) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + fdo_db::postgres::PostgresRendezvousDB::delete_ov(&key.to_string(), conn).map_err(|e| { + StoreError::Database(format!( + "Error deleting OV with guid {}: {e:?}", + key.to_string() + )) + }) + } + + async fn perform_maintenance(&self) -> Result<(), StoreError> { + let pool = fdo_db::postgres::PostgresRendezvousDB::get_conn_pool(); + let conn = &mut pool.get().expect("Couldn't establish a connection"); + let now = time::OffsetDateTime::now_utc().unix_timestamp(); + fdo_db::postgres::PostgresRendezvousDB::delete_ov_ttl_le(now, conn).map_err(|e| { + StoreError::Database(format!("Error deleting OVs with ttl <= {now}: {e:?}")) + }) + } +} From 48d8abd2b0335cee4659ff5fccbdd3d9c202ab1a Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Wed, 22 Nov 2023 17:39:39 +0100 Subject: [PATCH 21/25] docs: add database usage and configuration Signed-off-by: Irene Diez --- HOWTO.md | 212 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 207 insertions(+), 5 deletions(-) diff --git a/HOWTO.md b/HOWTO.md index c1dc23b10..890765f41 100644 --- a/HOWTO.md +++ b/HOWTO.md @@ -8,12 +8,15 @@ - How to get information about an OV - How to extend an OV with the Owner's Certificate - How to convert a PEM (plain-text) format OV to a COSE (binary) format OV + - How to export OVs from the Manufacturer Server (Database specific) + - How to import OVs into the Owner Onboarding Server (Database specific) - Configuration Files - `manufacturing-server.yml` - `rendezvous_info` field and `rendezvous-info.yml` - `owner-onboarding-server.yml` - `rendezvous-server.yml` - `serviceinfo-api-server.yml` +- Database management - How to run the servers: - Manufacturing Server - Owner Onboarding Server @@ -212,6 +215,61 @@ Use `fdo-owner-tool dump-ownership-voucher`: fdo-owner-tool dump-ownership-voucher your_ownership_voucher --outform cose > your_ownership_voucher.cose ``` +### How to export OVs from the Manufacturer Server (Database specific) + +Use `fdo-owner-tool export-manufacturer-vouchers`: + +``` +$ fdo-owner-tool export-manufacturer-vouchers --help +Exports a single or all the ownership vouchers present in the Manufacturer DB + +Usage: fdo-owner-tool export-manufacturer-vouchers [GUID] + +Arguments: + Type of the Manufacturer DB holding the OVs [possible values: sqlite, postgres] + DB connection URL, or path to the DB file + Path to dir where the OVs will be exported + [GUID] GUID of the voucher to be exported, if no GUID is given all the OVs will be exported +``` + +For example: + +```bash +fdo-owner-tool export-manufacturer-vouchers postgres \ +postgresql://test:test@localhost/test_manufacturer \ +/path/to/manufacturer-exports/ +``` + +### How to import OVs into the Owner Onboarding Server (Database specific) + +``` +$ fdo-owner-tool import-ownership-vouchers --help +Imports into the Owner DB a single ownership voucher or all the ownership vouchers present at a given path + +Usage: fdo-owner-tool import-ownership-vouchers + +Arguments: + Type of the Owner DB to import the OVs [possible values: sqlite, postgres] + DB connection URL or path to DB file + Path to the OV to be imported, or path to a directory where all the OVs to be imported are located + +Options: + -h, --help Print help +``` + +When importing OVs the tool will attempt to import each OV once, ignoring all +possible errors and then giving a summary of which OVs couldn't be imported. + +For example: + +``` +fdo-owner-tool import-ownership-vouchers postgres postgresql://test:test@localhost/test_owner /path/to/ovs/to/import/ +Unable to import all OVs. OV import operations yielded the following error/s: + +- Error Some(duplicate key value violates unique constraint "owner_vouchers_pkey") inserting OV d5bc48f8-b603-a1c0-e8b9-ae4d9bdf1570 from path "/path/to/ovs/to/import/d5bc48f8-b603-a1c0-e8b9-ae4d9bdf1570" +- Error Empty data serializing OV contents at path "/path/to/ovs/to/import/this-is-not-an-OV" +``` + ## Configuration Files This project uses @@ -274,7 +332,33 @@ Where: - `session_store_driver`: path to a directory that will hold session information. -- `ownership_voucher_store_driver`: path to a directory that will hold OVs. +- `ownership_voucher_store_driver`: this selects the ownership voucher storage + method. Select between `Directory`, `Sqlite` or `Postgres`. + - `Directory`: expects a `path` to the directory that will hold the OVs. + For example: + ``` + ownership_voucher_store_driver: + Directory: + path: /home/fedora/ownership_vouchers + ``` + - `Sqlite`: will use a Sqlite database to store the ownership vouchers. + When using this option you must set `Manufacturer` as the DB type as + shown below: + ``` + ownership_voucher_store_driver: + Sqlite: + Manufacturer + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. + - `Postgres`: will use a Postgres database to store the ownership vouchers. + When using this option you must set `Manufacturer` as the DB type as + shown below: + ``` + ownership_voucher_store_driver: + Postgres: + Manufacturer + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. - `public_key_store_driver:` [OPTIONAL] path to a directory that will hold the Manufacturer's public keys. - `bind`: IP address and port that this server will take. @@ -376,8 +460,33 @@ service_info_api_authentication: None Where: -- `ownership_voucher_store_driver`: path to a directory that will hold the OVs - owned by this server. +- `ownership_voucher_store_driver`: this selects the ownership voucher storage + method. Select between `Directory`, `Sqlite` or `Postgres`. + - `Directory`: expects a `path` to the directory that will hold the OVs. + For example: + ``` + ownership_voucher_store_driver: + Directory: + path: /home/fedora/ownership_vouchers + ``` + - `Sqlite`: will use a Sqlite database to store the ownership vouchers. + When using this option you must set `Owner` as the DB type as + shown below: + ``` + ownership_voucher_store_driver: + Sqlite: + Owner + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. + - `Postgres`: will use a Postgres database to store the ownership vouchers. + When using this option you must set `Owner` as the DB type as + shown below: + ``` + ownership_voucher_store_driver: + Postgres: + Owner + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. - `session_store_driver`: path to a directory that will hold session information. - `trusted_device_keys_path`: path to the Device Certificate Authority @@ -421,8 +530,34 @@ bind: "0.0.0.0:8082" Where: -- `storage_driver`: path to a directory that will hold OVs registered with the - Rendezvous Server. +- `storage_driver`: this selects the server's storage method. Select between + `Directory`, `Sqlite` or `Postgres`. + - `Directory`: expects a `path` to the directory that will serve as the + server's storage. + For example: + ``` + storage_driver: + Directory: + path: /home/fedora/rendezvous_storage + ``` + - `Sqlite`: will use a Sqlite database as the server's storage. + When using this option you must set `Rendezvous` as the DB type as + shown below: + ``` + storage_driver: + Sqlite: + Rendezvous + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. + - `Postgres`: will use a Sqlite database as the server's storage. + When using this option you must set `Rendezvous` as the DB type as + shown below: + ``` + storage_driver: + Postgres: + Rendezvous + ``` + Please refer to the [Database management section](#database-management) on how to initialize databases. - `session_store_driver`: path to a directory that will hold session information. - `trusted_manufacturer_keys_path`: path to the Manufacturer Certificate. @@ -515,6 +650,58 @@ Where: rebooted after onboarding has completed, boolean (default false). - `additional_service_info`: [OPTIONAL] +## Database management + +When using the `Sqlite` or `Postgres` storage driver configuration you are able +to use Sqlite or Postgres databases to serve as the storage driver of the +Manufacturing, Owner and/or Rendezvous servers. + +You are able to use different database systems for each server (e.g. Sqlite for +the Manufacturing server and Postgres for the rest), or even mix +database storage in some servers with filesystem storage in other servers +(e.g. filesystem storage for the Manufacturing server and Postgres for the +rest). + +### Dependencies + +Install the following packages: + +```bash +dnf install -y sqlite sqlite-devel libpq libpq-devel +``` + +and the `diesel` tool for schema management: + +```bash +cargo install --force diesel_cli --no-default-features --features "postgres sqlite" +``` + +### Creating the databases + +When using databases you need to initialize the database based on the FDO +server and database type that you'll be using. + +All the databases are initialized running + +```bash +diesel migration run --migration-dir $MIGRATION_DIRECTORY \ +--database-url $DATABASE_URL +``` + +where `$MIGRATION_DIRECTORY` is one of the `migration_*` directories that +matches your server type and database type combo +(`migrations_manufacturing_server_postgres`, +`migrations_manufacturing_server_sqlite`, +`migrations_owner_onboarding_server_postgres`, +`migrations_owner_onboarding_server_sqlite`, +`migrations_rendezvous_server_postgres`, +`migrations_rendezvous_server_sqlite`); the `$DATABASE_URL` is the Postgres +connection URL or a path to the location where the Sqlite database will be +located based on if you'll be using Postgres or Sqlite, respectively. + +> **NOTE:** if you are using Fedora IoT along with the Sqlite DB, you must +> create the DB in a writable location, for instance `/var/lib/fdo`. + ## How to run the servers Please mind how the configuration file must be specifically named (e.g. `-` VS @@ -541,6 +728,11 @@ Please mind how the configuration file must be specifically named (e.g. `-` VS file in [examples/systemd](https://github.com/fedora-iot/fido-device-onboard-rs/blob/main/examples/systemd/fdo-manufacturing-server.service). + If you are using a Sqlite or Postgres database for storage, before running + the server you must set the `SQLITE_MANUFACTURER_DATABASE_URL` or + `POSTGRES_MANUFACTURER_DATABASE_URL` environment variable with the proper + connection URL when using Sqlite or Postgres, respectively. + ### Owner Onboarding Server 1. Generate the required keys/certificates for the Owner, see [How to generate @@ -570,6 +762,11 @@ Please mind how the configuration file must be specifically named (e.g. `-` VS 4. Execute `fdo-owner-onboarding-server` or run it as a service, see sample file in [examples/systemd](https://github.com/fedora-iot/fido-device-onboard-rs/blob/main/examples/systemd/fdo-owner-onboarding-server.service). + If you are using a Sqlite or Postgres database for storage, before running + the server you must set the `SQLITE_OWNER_DATABASE_URL` or + `POSTGRES_OWNER_DATABASE_URL` environment variable with the proper + connection URL when using Sqlite or Postgres, respectively. + ### Rendezvous Server 1. Configure `rendezvous-server.yml`, see [Configuration @@ -583,6 +780,11 @@ Please mind how the configuration file must be specifically named (e.g. `-` VS 2. Execute `fdo-rendezvous-server` or run it as a service, see sample file in [examples/systemd](https://github.com/fedora-iot/fido-device-onboard-rs/blob/main/examples/systemd/fdo-rendezvous-server.service). + If you are using a Sqlite or Postgres database for storage, before running + the server you must set the `SQLITE_RENDEZVOUS_DATABASE_URL` or + `POSTGRES_RENDEZVOUS_DATABASE_URL` environment variable with the proper + connection URL when using Sqlite or Postgres, respectively. + ### Service Info API Server 1. Configure `serviceinfo-api-server.yml`, see [Configuration From 3fcef6bdf9874ca8ea7ae746ebcb9dc986b33ebb Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Wed, 3 Jan 2024 15:06:09 +0800 Subject: [PATCH 22/25] test: add postgres db test --- .github/workflows/ci.yml | 9 ++ test/fdo-postgres.sh | 196 +++++++++++++++++++++++++++++++++++++++ test/files/clients | 5 + 3 files changed, 210 insertions(+) create mode 100755 test/fdo-postgres.sh create mode 100644 test/files/clients diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8d8cab5c7..cc13b0bb0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -139,6 +139,15 @@ jobs: run: | git diff --exit-code + postgres_test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Run test + run: test/fdo-postgres.sh + commitlint: runs-on: ubuntu-latest steps: diff --git a/test/fdo-postgres.sh b/test/fdo-postgres.sh new file mode 100755 index 000000000..2a2950bed --- /dev/null +++ b/test/fdo-postgres.sh @@ -0,0 +1,196 @@ +#!/bin/bash +set -euox pipefail + +# Colorful output. +function greenprint { + echo -e "\033[1;32m${1}\033[0m" +} + +POSTGRES_IP=192.168.200.2 +FDO_MANUFACTURING_ADDRESS=192.168.200.50 +FDO_OWNER_ONBOARDING_ADDRESS=192.168.200.51 +FDO_RENDEZVOUS_ADDRESS=192.168.200.52 + +POSTGRES_USERNAME=postgres +POSTGRES_PASSWORD=foobar +POSTGRES_DB=postgres + +# Prepare stage repo network +greenprint "🔧 Prepare stage repo network" +sudo podman network inspect edge >/dev/null 2>&1 || sudo podman network create --driver=bridge --subnet=192.168.200.0/24 --gateway=192.168.200.254 edge + +# Build FDO and clients container image +greenprint "🔧 Build FDO and clients container image" +sudo buildah build -f contrib/containers/build -t fdo-build:latest . +sudo buildah build -f contrib/containers/manufacturing-server --build-arg BUILDID=latest -t manufacturing-server:latest . +sudo buildah build -f contrib/containers/rendezvous-server --build-arg BUILDID=latest -t rendezvous-server:latest . +sudo buildah build -f contrib/containers/owner-onboarding-server --build-arg BUILDID=latest -t owner-onboarding-server:latest . +sudo buildah build -f contrib/containers/aio --build-arg BUILDID=latest -t aio:latest . +sudo buildah build -f test/files/clients --build-arg BUILDID=latest -t clients:latest . +sudo buildah images + +########################################################## +## +## Prepare FDO containers +## +########################################################## +greenprint "🔧 Generate FDO key and configuration files" +sudo mkdir aio +sudo podman run --rm \ + -v "$PWD"/aio/:/aio:z \ + "localhost/aio:latest" \ + aio --directory aio generate-configs-and-keys --contact-hostname "$FDO_MANUFACTURING_ADDRESS" + +# Prepare FDO config files +greenprint "🔧 Prepare FDO key and configuration files for FDO containers" +sudo cp -r aio/keys test/fdo/ +sudo rm -rf aio + +# Set servers store driver to postgres +greenprint "🔧 Set servers store driver to postgres" +sudo pip3 install yq +# Configure manufacturing server db +yq -yi 'del(.ownership_voucher_store_driver.Directory)' test/fdo/manufacturing-server.yml +yq -yi '.ownership_voucher_store_driver += {"Postgres": "Manufacturer"}' test/fdo/manufacturing-server.yml +# Configure owner onboarding server db +yq -yi 'del(.ownership_voucher_store_driver.Directory)' test/fdo/owner-onboarding-server.yml +yq -yi '.ownership_voucher_store_driver += {"Postgres": "Owner"}' test/fdo/owner-onboarding-server.yml +# Configure rendezvous server db +yq -yi 'del(.storage_driver.Directory)' test/fdo/rendezvous-server.yml +yq -yi '.storage_driver += {"Postgres": "Rendezvous"}' test/fdo/rendezvous-server.yml + +# Prepare postgres db init sql script +greenprint "🔧 Prepare postgres db init sql script" +mkdir -p initdb +cp migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql initdb/manufacturing.sql +cp migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql initdb/owner-onboarding.sql +cp migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql initdb/rendezvous.sql + +greenprint "🔧 Starting postgres" +sudo podman run -d \ + --ip "$POSTGRES_IP" \ + --name postgres \ + --network edge \ + -e POSTGRES_PASSWORD="$POSTGRES_PASSWORD" \ + -v "$PWD"/initdb/:/docker-entrypoint-initdb.d/:z \ + "quay.io/xiaofwan/postgres" + +greenprint "🔧 Starting fdo manufacture server" +sudo podman run -d \ + --ip "$FDO_MANUFACTURING_ADDRESS" \ + --name manufacture-server \ + --network edge \ + -v "$PWD"/test/fdo/:/etc/fdo/:z \ + -p 8080:8080 \ + -e POSTGRES_MANUFACTURER_DATABASE_URL="postgresql://${POSTGRES_USERNAME}:${POSTGRES_PASSWORD}@${POSTGRES_IP}/${POSTGRES_DB}" \ + "localhost/manufacturing-server:latest" + +greenprint "🔧 Starting fdo owner onboarding server" +sudo podman run -d \ + --ip "$FDO_OWNER_ONBOARDING_ADDRESS" \ + --name owner-onboarding-server \ + --network edge \ + -v "$PWD"/test/fdo/:/etc/fdo/:z \ + -p 8081:8081 \ + -e POSTGRES_OWNER_DATABASE_URL="postgresql://${POSTGRES_USERNAME}:${POSTGRES_PASSWORD}@${POSTGRES_IP}/${POSTGRES_DB}" \ + "localhost/owner-onboarding-server:latest" + +greenprint "🔧 Starting fdo rendezvous server" +sudo podman run -d \ + --ip "$FDO_RENDEZVOUS_ADDRESS" \ + --name rendezvous-server \ + --network edge \ + -v "$PWD"/test/fdo/:/etc/fdo/:z \ + -p 8082:8082 \ + -e POSTGRES_RENDEZVOUS_DATABASE_URL="postgresql://${POSTGRES_USERNAME}:${POSTGRES_PASSWORD}@${POSTGRES_IP}/${POSTGRES_DB}" \ + "localhost/rendezvous-server:latest" + +# Wait for fdo containers to be up and running +until [ "$(curl -X POST http://${FDO_MANUFACTURING_ADDRESS}:8080/ping)" == "pong" ]; do + sleep 1; +done; + +until [ "$(curl -X POST http://${FDO_OWNER_ONBOARDING_ADDRESS}:8081/ping)" == "pong" ]; do + sleep 1; +done; + +until [ "$(curl -X POST http://${FDO_RENDEZVOUS_ADDRESS}:8082/ping)" == "pong" ]; do + sleep 1; +done; + + +greenprint "🔧 Check container running status" +sudo podman ps -a + +greenprint "🔧 Collecting container logs" +sudo podman logs postgres manufacture-server owner-onboarding-server rendezvous-server + +greenprint "🔧 Check db tables" +sudo podman exec \ + postgres \ + psql \ + --username="${POSTGRES_USERNAME}" \ + -c "\dt" | grep "3 rows" + +greenprint "🔧 Generate OV" +sudo podman run \ + --rm \ + --network edge \ + --privileged \ + localhost/clients \ + fdo-manufacturing-client no-plain-di --insecure --manufacturing-server-url "http://${FDO_MANUFACTURING_ADDRESS}:8080" + +greenprint "🔧 Check manufacturing server db for new OV" +sudo podman exec \ + postgres \ + psql \ + --username="${POSTGRES_USERNAME}" \ + -c "SELECT * FROM manufacturer_vouchers ;" | grep "1 row" + +greenprint "🔧 Check container running status" +sudo podman ps -a + +greenprint "🔧 Export OV" +mkdir export-ov +sudo podman run \ + --rm \ + --network edge \ + --privileged \ + -v "$PWD"/export-ov:/export-ov:z \ + localhost/clients \ + fdo-owner-tool export-manufacturer-vouchers postgres "postgresql://${POSTGRES_USERNAME}:${POSTGRES_PASSWORD}@${POSTGRES_IP}/${POSTGRES_DB}" /export-ov/ | grep "exported" +EXPORTED_FILE=$(ls -1 export-ov) +greenprint "🔧 Import OV into owner db" +sudo podman run \ + --rm \ + --network edge \ + --privileged \ + -v "$PWD"/export-ov:/export-ov:z \ + localhost/clients \ + fdo-owner-tool import-ownership-vouchers postgres "postgresql://${POSTGRES_USERNAME}:${POSTGRES_PASSWORD}@${POSTGRES_IP}/${POSTGRES_DB}" "/export-ov/${EXPORTED_FILE}" | grep "OV import finished" + +greenprint "🔧 Check owner db for imported OV" +sudo podman exec \ + postgres \ + psql \ + --username="${POSTGRES_USERNAME}" \ + -c "SELECT * FROM owner_vouchers ;" | grep "1 row" + +greenprint "🔧 Sleep 60 seconds to sync with rendezvous db" +sleep 60 + +greenprint "🔧 Check rendezvous db for synced OV" +sudo podman exec \ + postgres \ + psql \ + --username="${POSTGRES_USERNAME}" \ + -c "SELECT * FROM rendezvous_vouchers ;" | grep "1 row" + +greenprint "🔧 Check container running status" +sudo podman ps -a + +greenprint "🔧 Collecting container logs" +sudo podman logs rendezvous-server + +rm -rf initdb export-ov +exit 0 diff --git a/test/files/clients b/test/files/clients new file mode 100644 index 000000000..47567046b --- /dev/null +++ b/test/files/clients @@ -0,0 +1,5 @@ +FROM quay.io/centos/centos:stream9 +ARG BUILDID +COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-manufacturing-client /usr/local/bin +COPY --from=fdo-build:${BUILDID} /usr/src/target/release/fdo-owner-tool /usr/local/bin +RUN yum install -y postgresql libpq libpq-devel From 442ac2ceb095710d55cf8ce13e9a2db23e816614 Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Tue, 28 Nov 2023 16:55:17 +0100 Subject: [PATCH 23/25] chore: update spec to add sql files and unify migrations Since the migrations are unified in a single folder the tests have been updated accordingly. Signed-off-by: Irene Diez --- .github/workflows/ci.yml | 6 +++--- fido-device-onboard.spec | 13 +++++++++++++ .../2023-10-03-152801_create_db/down.sql | 0 .../2023-10-03-152801_create_db/up.sql | 0 .../2023-10-03-152801_create_db/down.sql | 0 .../2023-10-03-152801_create_db/up.sql | 0 .../2023-10-03-152801_create_db/down.sql | 0 .../2023-10-03-152801_create_db/up.sql | 0 .../2023-10-03-152801_create_db/down.sql | 0 .../2023-10-03-152801_create_db/up.sql | 0 .../2023-10-03-152801_create_db/down.sql | 0 .../2023-10-03-152801_create_db/up.sql | 0 .../2023-10-03-152801_create_db/down.sql | 0 .../2023-10-03-152801_create_db/up.sql | 0 test/fdo-postgres.sh | 6 +++--- 15 files changed, 19 insertions(+), 6 deletions(-) rename {migrations_manufacturing_server_postgres => migrations/migrations_manufacturing_server_postgres}/2023-10-03-152801_create_db/down.sql (100%) rename {migrations_manufacturing_server_postgres => migrations/migrations_manufacturing_server_postgres}/2023-10-03-152801_create_db/up.sql (100%) rename {migrations_manufacturing_server_sqlite => migrations/migrations_manufacturing_server_sqlite}/2023-10-03-152801_create_db/down.sql (100%) rename {migrations_manufacturing_server_sqlite => migrations/migrations_manufacturing_server_sqlite}/2023-10-03-152801_create_db/up.sql (100%) rename {migrations_owner_onboarding_server_postgres => migrations/migrations_owner_onboarding_server_postgres}/2023-10-03-152801_create_db/down.sql (100%) rename {migrations_owner_onboarding_server_postgres => migrations/migrations_owner_onboarding_server_postgres}/2023-10-03-152801_create_db/up.sql (100%) rename {migrations_owner_onboarding_server_sqlite => migrations/migrations_owner_onboarding_server_sqlite}/2023-10-03-152801_create_db/down.sql (100%) rename {migrations_owner_onboarding_server_sqlite => migrations/migrations_owner_onboarding_server_sqlite}/2023-10-03-152801_create_db/up.sql (100%) rename {migrations_rendezvous_server_postgres => migrations/migrations_rendezvous_server_postgres}/2023-10-03-152801_create_db/down.sql (100%) rename {migrations_rendezvous_server_postgres => migrations/migrations_rendezvous_server_postgres}/2023-10-03-152801_create_db/up.sql (100%) rename {migrations_rendezvous_server_sqlite => migrations/migrations_rendezvous_server_sqlite}/2023-10-03-152801_create_db/down.sql (100%) rename {migrations_rendezvous_server_sqlite => migrations/migrations_rendezvous_server_sqlite}/2023-10-03-152801_create_db/up.sql (100%) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cc13b0bb0..db41724fa 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -118,9 +118,9 @@ jobs: run: | # prep for database tests cargo install --force diesel_cli --no-default-features --features sqlite - diesel migration run --migration-dir ./migrations_manufacturing_server_sqlite --database-url ./ci-manufacturer-db.sqlite - diesel migration run --migration-dir ./migrations_owner_onboarding_server_sqlite --database-url ./ci-owner-db.sqlite - diesel migration run --migration-dir ./migrations_rendezvous_server_sqlite --database-url ./ci-rendezvous-db.sqlite + diesel migration run --migration-dir ./migrations/migrations_manufacturing_server_sqlite --database-url ./ci-manufacturer-db.sqlite + diesel migration run --migration-dir ./migrations/migrations_owner_onboarding_server_sqlite --database-url ./ci-owner-db.sqlite + diesel migration run --migration-dir ./migrations/migrations_rendezvous_server_sqlite --database-url ./ci-rendezvous-db.sqlite # run tests cargo test --workspace # delete sqlite databases diff --git a/fido-device-onboard.spec b/fido-device-onboard.spec index 390b89475..0d6d25c90 100644 --- a/fido-device-onboard.spec +++ b/fido-device-onboard.spec @@ -61,6 +61,13 @@ install -D -m 0755 -t %{buildroot}%{_bindir} target/release/fdo-owner-tool install -D -m 0755 -t %{buildroot}%{_bindir} target/release/fdo-admin-tool install -D -m 0644 -t %{buildroot}%{_unitdir} examples/systemd/* install -D -m 0644 -t %{buildroot}%{_docdir}/fdo examples/config/* +# db sql files +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_manufacturing_server_postgres migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/* +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_manufacturing_server_sqlite migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/* +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_owner_onboarding_server_postgres migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/* +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_owner_onboarding_server_sqlite migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/* +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_rendezvous_server_postgres migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/* +install -D -m 0644 -t %{buildroot}%{_docdir}/fdo/migrations/migrations_rendezvous_server_sqlite migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/* # duplicates as needed by AIO command so link them ln -s %{_bindir}/fdo-owner-tool %{buildroot}%{_libexecdir}/fdo/fdo-owner-tool ln -s %{_bindir}/fdo-admin-tool %{buildroot}%{_libexecdir}/fdo/fdo-admin-tool @@ -123,6 +130,8 @@ Requires: openssl-libs >= 3.0.1-12 %{_docdir}/fdo/device_specific_serviceinfo.yml %{_docdir}/fdo/serviceinfo-api-server.yml %{_docdir}/fdo/owner-onboarding-server.yml +%{_docdir}/fdo/migrations/migrations_owner_onboarding_server_postgres/* +%{_docdir}/fdo/migrations/migrations_owner_onboarding_server_sqlite/* %{_unitdir}/fdo-serviceinfo-api-server.service %{_unitdir}/fdo-owner-onboarding-server.service @@ -156,6 +165,8 @@ License: %combined_license %dir %{_localstatedir}/lib/fdo %dir %{_docdir}/fdo %{_docdir}/fdo/rendezvous-*.yml +%{_docdir}/fdo/migrations/migrations_rendezvous_server_postgres/* +%{_docdir}/fdo/migrations/migrations_rendezvous_server_sqlite/* %{_unitdir}/fdo-rendezvous-server.service %post -n fdo-rendezvous-server @@ -188,6 +199,8 @@ Requires: openssl-libs >= 3.0.1-12 %dir %{_localstatedir}/lib/fdo %dir %{_docdir}/fdo %{_docdir}/fdo/manufacturing-server.yml +%{_docdir}/fdo/migrations/migrations_manufacturing_server_postgres/* +%{_docdir}/fdo/migrations/migrations_manufacturing_server_sqlite/* %{_unitdir}/fdo-manufacturing-server.service %post -n fdo-manufacturing-server diff --git a/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/down.sql b/migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/down.sql similarity index 100% rename from migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/down.sql rename to migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/down.sql diff --git a/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql b/migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql similarity index 100% rename from migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql rename to migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql diff --git a/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/down.sql b/migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/down.sql similarity index 100% rename from migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/down.sql rename to migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/down.sql diff --git a/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/up.sql b/migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/up.sql similarity index 100% rename from migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/up.sql rename to migrations/migrations_manufacturing_server_sqlite/2023-10-03-152801_create_db/up.sql diff --git a/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/down.sql b/migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/down.sql similarity index 100% rename from migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/down.sql rename to migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/down.sql diff --git a/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql b/migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql similarity index 100% rename from migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql rename to migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql diff --git a/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/down.sql b/migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/down.sql similarity index 100% rename from migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/down.sql rename to migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/down.sql diff --git a/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/up.sql b/migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/up.sql similarity index 100% rename from migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/up.sql rename to migrations/migrations_owner_onboarding_server_sqlite/2023-10-03-152801_create_db/up.sql diff --git a/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/down.sql b/migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/down.sql similarity index 100% rename from migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/down.sql rename to migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/down.sql diff --git a/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql b/migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql similarity index 100% rename from migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql rename to migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql diff --git a/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/down.sql b/migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/down.sql similarity index 100% rename from migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/down.sql rename to migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/down.sql diff --git a/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/up.sql b/migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/up.sql similarity index 100% rename from migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/up.sql rename to migrations/migrations_rendezvous_server_sqlite/2023-10-03-152801_create_db/up.sql diff --git a/test/fdo-postgres.sh b/test/fdo-postgres.sh index 2a2950bed..28049ff78 100755 --- a/test/fdo-postgres.sh +++ b/test/fdo-postgres.sh @@ -62,9 +62,9 @@ yq -yi '.storage_driver += {"Postgres": "Rendezvous"}' test/fdo/rendezvous-serve # Prepare postgres db init sql script greenprint "🔧 Prepare postgres db init sql script" mkdir -p initdb -cp migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql initdb/manufacturing.sql -cp migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql initdb/owner-onboarding.sql -cp migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql initdb/rendezvous.sql +cp migrations/migrations_manufacturing_server_postgres/2023-10-03-152801_create_db/up.sql initdb/manufacturing.sql +cp migrations/migrations_owner_onboarding_server_postgres/2023-10-03-152801_create_db/up.sql initdb/owner-onboarding.sql +cp migrations/migrations_rendezvous_server_postgres/2023-10-03-152801_create_db/up.sql initdb/rendezvous.sql greenprint "🔧 Starting postgres" sudo podman run -d \ From 8b6556720f5656cf023f25ab707bf4459b42249d Mon Sep 17 00:00:00 2001 From: djach7 Date: Wed, 3 Jan 2024 14:03:16 -0500 Subject: [PATCH 24/25] fix: adds necessary crates and scripts to dev container Adds sqlite and libpq crates to devcontainer creation for successful building and testing in dev container. Also adds diesel scripts to dev container build to enable successful dev container integration testing. Signed-off-by: djach7 --- .devcontainer/Dockerfile | 6 +++++- .devcontainer/devcontainer.json | 12 ++++++++++++ .github/workflows/ci.yml | 10 +++++++++- 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 05379902d..090b44914 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,9 +1,13 @@ FROM fedora:latest +ENV PATH "$PATH:/home/vscode/.cargo/bin" + RUN bash -c "$(curl -fsSL "https://raw.githubusercontent.com/microsoft/vscode-dev-containers/main/script-library/common-redhat.sh")" -- "true" "vscode" "1000" "1000" "true" RUN dnf install -y \ - sudo git cargo rust rust-src git-core openssl openssl-devel clippy rustfmt golang tpm2-tss-devel clevis clevis-luks cryptsetup cryptsetup-devel clang-devel \ + sudo git cargo rust rust-src git-core openssl openssl-devel clippy rustfmt golang tpm2-tss-devel clevis clevis-luks cryptsetup cryptsetup-devel clang-devel sqlite sqlite-devel libpq libpq-devel \ && dnf clean all USER vscode + +RUN cargo install --force diesel_cli --no-default-features --features sqlite \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index a972b346c..713753531 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -27,6 +27,17 @@ "serayuzgur.crates", "rust-lang.rust-analyzer" ], + "remoteEnv": { + "PATH": "${containerEnv:PATH}:/home/vscode/.cargo/bin", + "SQLITE_MANUFACTURER_DATABASE_URL": "../ci-manufacturer-db.sqlite", + "SQLITE_OWNER_DATABASE_URL": "../ci-owner-db.sqlite", + "SQLITE_RENDEZVOUS_DATABASE_URL": "../ci-rendezvous-db.sqlite" + }, + "containerEnv": { + "SQLITE_MANUFACTURER_DATABASE_URL": "../ci-manufacturer-db.sqlite", + "SQLITE_OWNER_DATABASE_URL": "../ci-owner-db.sqlite", + "SQLITE_RENDEZVOUS_DATABASE_URL": "../ci-rendezvous-db.sqlite" + }, "hostRequirements": { "memory": "4gb" }, @@ -35,5 +46,6 @@ "cargo", "build" ], + "postCreateCommand": "cargo install --force diesel_cli --no-default-features --features sqlite && diesel migration run --migration-dir ./migrations/migrations_manufacturing_server_sqlite --database-url ./ci-manufacturer-db.sqlite && diesel migration run --migration-dir ./migrations/migrations_owner_onboarding_server_sqlite --database-url ./ci-owner-db.sqlite && diesel migration run --migration-dir ./migrations/migrations_rendezvous_server_sqlite --database-url ./ci-rendezvous-db.sqlite", "waitFor": "onCreateCommand" } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index db41724fa..265104c38 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -190,4 +190,12 @@ jobs: - name: Test building in devcontainer run: docker run --rm -v `pwd`:/code:z --workdir /code --user root devcontainer-fdo-rs cargo build --workspace --verbose - name: Test testing in devcontainer - run: docker run --rm -v `pwd`:/code:z --workdir /code --user root devcontainer-fdo-rs cargo test --lib --bins --workspace --verbose + run: | + docker run -d -v `pwd`:/code:z --workdir /code --user root -e SQLITE_MANUFACTURER_DATABASE_URL='../ci-manufacturer-db.sqlite' -e SQLITE_OWNER_DATABASE_URL='../ci-owner-db.sqlite' -e SQLITE_RENDEZVOUS_DATABASE_URL='../ci-rendezvous-db.sqlite' --name tests devcontainer-fdo-rs sleep infinity + docker exec --user root tests cargo build --lib --bins --workspace --verbose + docker exec --user root tests diesel migration run --migration-dir ./migrations/migrations_manufacturing_server_sqlite --database-url ./ci-manufacturer-db.sqlite + docker exec --user root tests diesel migration run --migration-dir ./migrations/migrations_owner_onboarding_server_sqlite --database-url ./ci-owner-db.sqlite + docker exec --user root tests diesel migration run --migration-dir ./migrations/migrations_rendezvous_server_sqlite --database-url ./ci-rendezvous-db.sqlite + docker exec --user root tests cargo test + docker stop tests + docker rm tests From 530fef045e6befe5d0a88e686c94de06b213fd99 Mon Sep 17 00:00:00 2001 From: Irene Diez Date: Thu, 25 Jan 2024 17:34:37 +0100 Subject: [PATCH 25/25] chore: bump db to 0.4.13 Signed-off-by: Irene Diez --- Cargo.lock | 6 +++--- db/Cargo.toml | 6 +++--- owner-tool/Cargo.toml | 2 +- store/Cargo.toml | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c7c7583f..988f5dab2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -691,7 +691,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.18", + "syn 2.0.48", ] [[package]] @@ -700,7 +700,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.18", + "syn 2.0.48", ] [[package]] @@ -911,7 +911,7 @@ dependencies = [ [[package]] name = "fdo-db" -version = "0.4.12" +version = "0.4.13" dependencies = [ "anyhow", "diesel", diff --git a/db/Cargo.toml b/db/Cargo.toml index 44217d305..cfc3319ab 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fdo-db" -version = "0.4.12" +version = "0.4.13" edition = "2021" @@ -8,10 +8,10 @@ edition = "2021" anyhow = "1.0" diesel = { version = "2.1.0", features = ["sqlite", "postgres", "r2d2"] } -fdo-data-formats = { path = "../data-formats", version = "0.4.12" } +fdo-data-formats = { path = "../data-formats", version = "0.4.13" } [dev-dependencies] -fdo-http-wrapper = { path = "../http-wrapper", version = "0.4.12", features = ["server"] } +fdo-http-wrapper = { path = "../http-wrapper", version = "0.4.13", features = ["server"] } openssl = "0.10.55" [features] diff --git a/owner-tool/Cargo.toml b/owner-tool/Cargo.toml index bef1b0579..9654b062c 100644 --- a/owner-tool/Cargo.toml +++ b/owner-tool/Cargo.toml @@ -19,6 +19,6 @@ tss-esapi = { version = "7.4", features = ["generate-bindings"] } fdo-util = { path = "../util", version = "0.4.13" } fdo-data-formats = { path = "../data-formats", version = "0.4.13" } fdo-http-wrapper = { path = "../http-wrapper", version = "0.4.13", features = ["client"] } -fdo-db = { path = "../db", version = "0.4.12"} +fdo-db = { path = "../db", version = "0.4.13"} hex = "0.4" diff --git a/store/Cargo.toml b/store/Cargo.toml index cb838d327..1d0fce1de 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -23,7 +23,7 @@ xattr = { version = "1.0", default-features = false, optional = true } # We *ne serde_cbor = { version = "0.11", optional = true } # database -fdo-db = { path = "../db", version = "0.4.12"} +fdo-db = { path = "../db", version = "0.4.13"} diesel = { version = "2.1.0", features = ["sqlite", "postgres", "r2d2"], optional = true }