From ee3fa440088b0298c7df1a84fec1811a8448b424 Mon Sep 17 00:00:00 2001 From: David Crespo Date: Wed, 1 Sep 2021 10:21:05 -0500 Subject: [PATCH] Database queries with Diesel (#192) --- Cargo.lock | 94 +- diesel.toml | 5 + omicron-common/Cargo.toml | 2 + omicron-common/src/api/external/error.rs | 42 + omicron-common/src/api/external/mod.rs | 175 ++- omicron-common/src/config.rs | 6 + omicron-common/src/db.rs | 2 +- omicron-common/src/lib.rs | 2 + omicron-common/src/sql/dbinit.sql | 12 +- omicron-nexus/Cargo.toml | 5 +- omicron-nexus/src/db/README.adoc | 204 ++++ omicron-nexus/src/db/datastore.rs | 1019 +++++++++-------- omicron-nexus/src/db/diesel_schema.rs | 172 +++ omicron-nexus/src/db/mod.rs | 12 +- omicron-nexus/src/db/model.rs | 790 +++++++------ omicron-nexus/src/db/operations.rs | 125 -- omicron-nexus/src/db/pagination.rs | 59 + omicron-nexus/src/db/pool.rs | 56 +- omicron-nexus/src/db/saga_recovery.rs | 95 +- omicron-nexus/src/db/saga_types.rs | 304 +++-- omicron-nexus/src/db/schema.rs | 457 -------- omicron-nexus/src/db/sec_store.rs | 12 +- omicron-nexus/src/db/sql.rs | 679 ----------- omicron-nexus/src/db/sql_operations.rs | 830 -------------- omicron-nexus/src/db/update_and_check.rs | 192 ++++ .../src/http_entrypoints_external.rs | 45 +- omicron-nexus/src/lib.rs | 2 + omicron-nexus/src/nexus.rs | 159 ++- omicron-nexus/src/sagas.rs | 2 +- omicron-nexus/tests/test_projects.rs | 61 + 30 files changed, 2309 insertions(+), 3311 deletions(-) create mode 100644 diesel.toml create mode 100644 omicron-nexus/src/db/README.adoc create mode 100644 omicron-nexus/src/db/diesel_schema.rs delete mode 100644 omicron-nexus/src/db/operations.rs create mode 100644 omicron-nexus/src/db/pagination.rs delete mode 100644 omicron-nexus/src/db/schema.rs delete mode 100644 omicron-nexus/src/db/sql.rs delete mode 100644 omicron-nexus/src/db/sql_operations.rs create mode 100644 omicron-nexus/src/db/update_and_check.rs create mode 100644 omicron-nexus/tests/test_projects.rs diff --git a/Cargo.lock b/Cargo.lock index 439e8884ac..aa39ae9320 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,6 +35,17 @@ dependencies = [ "syn", ] +[[package]] +name = "async-bb8-diesel" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/async-bb8-diesel?rev=0a6d535#0a6d535f8ac21b407879e6d7dc5214186a187e08" +dependencies = [ + "async-trait", + "bb8", + "diesel", + "tokio", +] + [[package]] name = "async-trait" version = "0.1.51" @@ -96,18 +107,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "bb8-postgres" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61fdf56d52b2cca401d2380407e5c35d3d25d3560224ecf74d6e4ca13e51239b" -dependencies = [ - "async-trait", - "bb8", - "tokio", - "tokio-postgres", -] - [[package]] name = "bhyve_api" version = "0.1.0" @@ -346,6 +345,34 @@ dependencies = [ "syn", ] +[[package]] +name = "diesel" +version = "2.0.0" +source = "git+https://github.com/diesel-rs/diesel?rev=a39dd2e#a39dd2ebc5acccb8cde73d40b0d81dde3e073170" +dependencies = [ + "bitflags", + "byteorder", + "chrono", + "diesel_derives", + "ipnetwork", + "itoa", + "libc", + "pq-sys", + "r2d2", + "serde_json", + "uuid", +] + +[[package]] +name = "diesel_derives" +version = "2.0.0" +source = "git+https://github.com/diesel-rs/diesel?rev=a39dd2e#a39dd2ebc5acccb8cde73d40b0d81dde3e073170" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "difference" version = "2.0.0" @@ -921,6 +948,15 @@ dependencies = [ "serde", ] +[[package]] +name = "ipnetwork" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4088d739b183546b239688ddbc79891831df421773df95e236daf7867866d355" +dependencies = [ + "serde", +] + [[package]] name = "itertools" version = "0.10.1" @@ -1217,6 +1253,7 @@ dependencies = [ "async-trait", "backoff", "chrono", + "diesel", "dropshot", "expectorate", "futures", @@ -1257,16 +1294,18 @@ name = "omicron-nexus" version = "0.1.0" dependencies = [ "anyhow", + "async-bb8-diesel", "async-trait", "bb8", - "bb8-postgres", "bytes", "chrono", + "diesel", "dropshot", "expectorate", "futures", "http", "hyper", + "ipnetwork", "lazy_static", "libc", "newtype_derive", @@ -1613,6 +1652,15 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +[[package]] +name = "pq-sys" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ac25eee5a0582f45a67e837e350d784e7003bd29a5f460796772061ca49ffda" +dependencies = [ + "vcpkg", +] + [[package]] name = "predicates" version = "1.0.8" @@ -1776,6 +1824,17 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "r2d2" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" +dependencies = [ + "log", + "parking_lot", + "scheduled-thread-pool", +] + [[package]] name = "rand" version = "0.8.3" @@ -1993,6 +2052,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "scheduled-thread-pool" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" +dependencies = [ + "parking_lot", +] + [[package]] name = "schemars" version = "0.8.3" diff --git a/diesel.toml b/diesel.toml new file mode 100644 index 0000000000..3392a5a73a --- /dev/null +++ b/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "omicron-nexus/src/db/diesel_schema.rs" diff --git a/omicron-common/Cargo.toml b/omicron-common/Cargo.toml index f650a47d9a..3cfba5cd61 100644 --- a/omicron-common/Cargo.toml +++ b/omicron-common/Cargo.toml @@ -6,6 +6,8 @@ edition = "2018" [dependencies] anyhow = "1.0" async-trait = "0.1.51" +# Tracking pending 2.0 version. +diesel = { git = "https://github.com/diesel-rs/diesel", rev = "a39dd2e", features = ["postgres", "r2d2", "chrono", "uuid"] } futures = "0.3.15" http = "0.2.0" hyper = "0.14" diff --git a/omicron-common/src/api/external/error.rs b/omicron-common/src/api/external/error.rs index ae4e7b99b8..7d8ce3a374 100644 --- a/omicron-common/src/api/external/error.rs +++ b/omicron-common/src/api/external/error.rs @@ -6,6 +6,8 @@ use crate::api::external::Name; use crate::api::external::ResourceType; +use diesel::result::DatabaseErrorKind as DieselErrorKind; +use diesel::result::Error as DieselError; use dropshot::HttpError; use dropshot::HttpErrorResponseBody; use serde::Deserialize; @@ -161,6 +163,46 @@ impl Error { }, } } + + /// Converts a Diesel error to an external type error. + pub fn from_diesel( + error: DieselError, + resource_type: ResourceType, + lookup_type: LookupType, + ) -> Error { + match error { + DieselError::NotFound => { + Error::ObjectNotFound { type_name: resource_type, lookup_type } + } + DieselError::DatabaseError(kind, _info) => { + Error::unavail(format!("Database error: {:?}", kind).as_str()) + } + _ => Error::internal_error("Unknown diesel error"), + } + } + + /// Converts a Diesel error to an external type error, when requested as + /// part of a creation operation. + pub fn from_diesel_create( + error: DieselError, + resource_type: ResourceType, + object_name: &str, + ) -> Error { + match error { + DieselError::DatabaseError(kind, _info) => match kind { + DieselErrorKind::UniqueViolation => { + Error::ObjectAlreadyExists { + type_name: resource_type, + object_name: object_name.to_string(), + } + } + _ => Error::unavail( + format!("Database error: {:?}", kind).as_str(), + ), + }, + _ => Error::internal_error("Unknown diesel error"), + } + } } impl From for HttpError { diff --git a/omicron-common/src/api/external/mod.rs b/omicron-common/src/api/external/mod.rs index 246edb109a..bd9780816d 100644 --- a/omicron-common/src/api/external/mod.rs +++ b/omicron-common/src/api/external/mod.rs @@ -14,6 +14,10 @@ use anyhow::Context; use api_identity::ObjectIdentity; use chrono::DateTime; use chrono::Utc; +use diesel::backend::{Backend, RawValue}; +use diesel::deserialize::{self, FromSql}; +use diesel::serialize::{self, ToSql}; +use diesel::sql_types; pub use dropshot::PaginationOrder; use futures::future::ready; use futures::stream::BoxStream; @@ -43,6 +47,8 @@ pub type CreateResult = Result; pub type DeleteResult = Result<(), Error>; /** Result of a list operation that returns an ObjectStream */ pub type ListResult = Result, Error>; +/** Result of a list operation that returns a vector */ +pub type ListResultVec = Result, Error>; /** Result of a lookup operation for the specified type */ pub type LookupResult = Result; /** Result of an update operation for the specified type */ @@ -111,8 +117,39 @@ pub struct DataPageParams<'a, NameType> { Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize, )] #[serde(try_from = "String")] +// Diesel-specific derives: +// +// - Types which implement "ToSql" should implement "AsExpression". +// - Types which implement "FromSql" should implement "FromSqlRow". +#[derive(AsExpression, FromSqlRow)] +#[sql_type = "sql_types::Text"] pub struct Name(String); +// Serialize the "Name" object to SQL as TEXT. +impl ToSql for Name +where + DB: Backend, + String: ToSql, +{ + fn to_sql( + &self, + out: &mut serialize::Output, + ) -> serialize::Result { + (&self.0 as &String).to_sql(out) + } +} + +// Deserialize the "Name" object from SQL TEXT. +impl FromSql for Name +where + DB: Backend, + String: FromSql, +{ + fn from_sql(bytes: RawValue) -> deserialize::Result { + Name::try_from(String::from_sql(bytes)?).map_err(|e| e.into()) + } +} + /** * `Name::try_from(String)` is the primary method for constructing an Name * from an input string. This validates the string according to our @@ -278,8 +315,42 @@ impl Name { * the database as an i64. Constraining it here ensures that we can't fail to * serialize the value. */ -#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema)] +#[derive( + Copy, + Clone, + Debug, + Deserialize, + Serialize, + JsonSchema, + AsExpression, + FromSqlRow, +)] +#[sql_type = "sql_types::BigInt"] pub struct ByteCount(u64); + +impl ToSql for ByteCount +where + DB: Backend, + i64: ToSql, +{ + fn to_sql( + &self, + out: &mut serialize::Output, + ) -> serialize::Result { + i64::from(self).to_sql(out) + } +} + +impl FromSql for ByteCount +where + DB: Backend, + i64: FromSql, +{ + fn from_sql(bytes: RawValue) -> deserialize::Result { + ByteCount::try_from(i64::from_sql(bytes)?).map_err(|e| e.into()) + } +} + impl ByteCount { pub fn from_kibibytes_u32(kibibytes: u32) -> ByteCount { ByteCount::try_from(1024 * u64::from(kibibytes)).unwrap() @@ -372,8 +443,35 @@ impl From<&ByteCount> for i64 { PartialEq, PartialOrd, Serialize, + AsExpression, + FromSqlRow, )] +#[sql_type = "sql_types::BigInt"] pub struct Generation(u64); + +impl ToSql for Generation +where + DB: Backend, + i64: ToSql, +{ + fn to_sql( + &self, + out: &mut serialize::Output, + ) -> serialize::Result { + (self.0 as i64).to_sql(out) + } +} + +impl FromSql for Generation +where + DB: Backend, + i64: FromSql, +{ + fn from_sql(bytes: RawValue) -> deserialize::Result { + Generation::try_from(i64::from_sql(bytes)?).map_err(|e| e.into()) + } +} + impl Generation { pub fn new() -> Generation { Generation(1) @@ -436,6 +534,7 @@ pub enum ResourceType { Sled, SagaDbg, Vpc, + Oximeter, } impl Display for ResourceType { @@ -452,6 +551,7 @@ impl Display for ResourceType { ResourceType::Sled => "sled", ResourceType::SagaDbg => "saga_dbg", ResourceType::Vpc => "vpc", + ResourceType::Oximeter => "oximeter", } ) } @@ -660,9 +760,42 @@ impl InstanceState { } /** The number of CPUs in an Instance */ -#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema)] +#[derive( + Copy, + Clone, + Debug, + Deserialize, + Serialize, + JsonSchema, + AsExpression, + FromSqlRow, +)] +#[sql_type = "sql_types::BigInt"] pub struct InstanceCpuCount(pub u16); +impl ToSql for InstanceCpuCount +where + DB: Backend, + i64: ToSql, +{ + fn to_sql( + &self, + out: &mut serialize::Output, + ) -> serialize::Result { + (self.0 as i64).to_sql(out) + } +} + +impl FromSql for InstanceCpuCount +where + DB: Backend, + i64: FromSql, +{ + fn from_sql(bytes: RawValue) -> deserialize::Result { + InstanceCpuCount::try_from(i64::from_sql(bytes)?).map_err(|e| e.into()) + } +} + impl TryFrom for InstanceCpuCount { type Error = anyhow::Error; @@ -1207,11 +1340,43 @@ pub struct VpcSubnet { /// The `MacAddr` represents a Media Access Control (MAC) address, used to uniquely identify /// hardware devices on a network. // NOTE: We're using the `macaddr` crate for the internal representation. But as with the `ipnet`, -// this crate does not implement `JsonSchema`, nor the the SQL conversion traits `FromSql` and -// `ToSql`. -#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)] +// this crate does not implement `JsonSchema`. +#[derive( + Clone, + Copy, + Debug, + Deserialize, + PartialEq, + Serialize, + AsExpression, + FromSqlRow, +)] +#[sql_type = "sql_types::Text"] pub struct MacAddr(pub macaddr::MacAddr6); +impl ToSql for MacAddr +where + DB: Backend, + String: ToSql, +{ + fn to_sql( + &self, + out: &mut serialize::Output, + ) -> serialize::Result { + self.0.to_string().to_sql(out) + } +} + +impl FromSql for MacAddr +where + DB: Backend, + String: FromSql, +{ + fn from_sql(bytes: RawValue) -> deserialize::Result { + MacAddr::try_from(String::from_sql(bytes)?).map_err(|e| e.into()) + } +} + impl std::ops::Deref for MacAddr { type Target = macaddr::MacAddr6; fn deref(&self) -> &Self::Target { diff --git a/omicron-common/src/config.rs b/omicron-common/src/config.rs index 8ae143d75f..ef8ef7a89f 100644 --- a/omicron-common/src/config.rs +++ b/omicron-common/src/config.rs @@ -30,6 +30,12 @@ pub struct PostgresConfigWithUrl { config: tokio_postgres::config::Config, } +impl PostgresConfigWithUrl { + pub fn url(&self) -> String { + self.url_raw.clone() + } +} + impl FromStr for PostgresConfigWithUrl { type Err = tokio_postgres::Error; diff --git a/omicron-common/src/db.rs b/omicron-common/src/db.rs index c5ddc13946..d13c1176ee 100644 --- a/omicron-common/src/db.rs +++ b/omicron-common/src/db.rs @@ -57,7 +57,7 @@ impl DbError { } } -/** Given an arbitrary [`DbError`], produce an [`Error`] for the problem. */ +/** Given an arbitrary [`DbError`], produce an [`enum@Error`] for the problem. */ /* * This could potentially be an `impl From for Error`. However, * there are multiple different ways to do this transformation, depending on diff --git a/omicron-common/src/lib.rs b/omicron-common/src/lib.rs index 69aaf15cb7..edba24e66a 100644 --- a/omicron-common/src/lib.rs +++ b/omicron-common/src/lib.rs @@ -41,3 +41,5 @@ pub use oximeter_client::Client as OximeterClient; #[macro_use] extern crate slog; +#[macro_use] +extern crate diesel; diff --git a/omicron-common/src/sql/dbinit.sql b/omicron-common/src/sql/dbinit.sql index 542bc73047..6b7890e860 100644 --- a/omicron-common/src/sql/dbinit.sql +++ b/omicron-common/src/sql/dbinit.sql @@ -108,8 +108,8 @@ CREATE TABLE omicron.public.Instance ( * table? */ /* Runtime state */ - -- instance_state omicron.public.InstanceState NOT NULL, // TODO see above - instance_state TEXT NOT NULL, + -- state omicron.public.InstanceState NOT NULL, // TODO see above + state TEXT NOT NULL, time_state_updated TIMESTAMPTZ NOT NULL, state_generation INT NOT NULL, /* @@ -172,13 +172,13 @@ CREATE TABLE omicron.public.Disk ( /* Runtime state */ -- disk_state omicron.public.DiskState NOT NULL, /* TODO see above */ disk_state STRING(15) NOT NULL, - time_state_updated TIMESTAMPTZ NOT NULL, - state_generation INT NOT NULL, /* * Every Disk may be attaching to, attached to, or detaching from at most * one Instance at a time. */ attach_instance_id UUID, + state_generation INT NOT NULL, + time_state_updated TIMESTAMPTZ NOT NULL, /* Disk configuration */ size_bytes INT NOT NULL, @@ -232,7 +232,7 @@ CREATE TABLE omicron.public.MetricProducer ( port INT4 NOT NULL, interval FLOAT NOT NULL, /* TODO: Is this length appropriate? */ - route STRING(512) + base_route STRING(512) ); /* @@ -361,7 +361,7 @@ CREATE TABLE omicron.public.Saga ( * - number of adoptions? */ saga_state STRING(31) NOT NULL, /* see SagaState above */ - current_sec UUID NOT NULL, + current_sec UUID, adopt_generation INT NOT NULL, adopt_time TIMESTAMPTZ NOT NULL ); diff --git a/omicron-nexus/Cargo.toml b/omicron-nexus/Cargo.toml index f7af1aa962..bbfaa61360 100644 --- a/omicron-nexus/Cargo.toml +++ b/omicron-nexus/Cargo.toml @@ -7,10 +7,13 @@ edition = "2018" anyhow = "1.0" async-trait = "0.1.51" bb8 = "0.7.1" -bb8-postgres = "0.7.0" +async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "0a6d535" } +# Tracking pending 2.0 version. +diesel = { git = "https://github.com/diesel-rs/diesel", rev = "a39dd2e", features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } futures = "0.3.15" http = "0.2.0" hyper = "0.14" +ipnetwork = "0.18" lazy_static = "1.4.0" libc = "0.2.98" newtype_derive = "0.1.6" diff --git a/omicron-nexus/src/db/README.adoc b/omicron-nexus/src/db/README.adoc new file mode 100644 index 0000000000..309851c9c0 --- /dev/null +++ b/omicron-nexus/src/db/README.adoc @@ -0,0 +1,204 @@ +:showtitle: +:toc: left +:icons: font + += Nexus Database + +This directory contains functionality relating to Nexus' database +implementation. This implementation makes use of https://diesel.rs/[Diesel], a +rust query builder. + +NOTE: Diesel's last released stable branch is 1.4.x, but there is ongoing +development on the not-yet-released 2.0 branch. **We are currently using the +2.0 branch**, as this branch includes necessary support for using Diesel in an +async context. Additionally, there are other miscellanous features (such as +"Selectable") which are nice-to-have. Futhermore,this gives us an opportunity +to more easily contribute upstream. + +== Why use Diesel? + +Diesel provides a strongly-typed, extensible framework for creating queries. +Instead of parsing SQL as raw strings, this provides additional layers of +validation through the type system, ensuring: + +- Operations only are issued to tables with corresponding columns. +- Complex queries can be chained together and validated at compile-time. +- Code generation avoids boilerplate, by autogenerating the translation of + structs to/from SQL. +- Escape hatches exist for writing raw SQL and CTEs. + +== How do I use Diesel? + +Although there are high-quality https://diesel.rs/guides/[guides for Diesel], +the following will be a brief overview of features as they appear within +Omicron. + +=== Tables + +Diesel auto-generates types which allow callers to construct SQL expressions. +These types, however, must map to the underlying database to be useful. + +We make use of the https://docs.diesel.rs/master/diesel/macro.table.html[table macro] +within link:diesel_schema.rs[diesel_schema.rs] to define these tables and their +corresponding SQL types. These types are then accessible within the `diesel_schema` +module. + +=== Creating Queries + +For performing queries on a database - insertions, queries, updates, and +deletions - Diesel can operate on explicit columns or a structure which maps to +a group of columns. + +Many of these structures - representing the data model - are stored within +link:model.rs[model.rs], where they also have helper conversions from/into types +which are used outside the database (such as in the HTTP API). + +Diesel provides some derive macros worth knowing about: + +https://docs.diesel.rs/master/diesel/prelude/derive.Insertable.html[Insertable] indicates +that a structure may be inserted into a table. +[source,rust] +---- +#[derive(Insertable)] +#[table_name = "instance"] +struct Instance { + ... +} + +use db::diesel_schema::instance::dsl; +let insert_query = diesel::insert_into(dsl::instance) + .values(Instance::new()); +---- + +https://docs.diesel.rs/master/diesel/prelude/derive.Queryable.html[Queryable] indicates +that a structure may be queried from a table. +[source,rust] +---- +#[derive(Queryable)] +struct Instance { + ... +} + +use db::diesel_schema::instance::dsl; +let lookup_query = dsl::instance.filter(dsl::id.eq(id_to_lookup)); +---- + +https://docs.diesel.rs/master/diesel/prelude/derive.AsChangeset.html[AsChangeset] indicates +that a structure can be used to update a table. +[source,rust] +---- +#[derive(AsChangeset)] +struct InstanceUpdate { + ... +} + +use db::diesel_schema::instance::dsl; +let update_query = diesel::update(dsl::instance).set(InstanceUpdate::new()); +---- + +=== Issuing Queries + +To issue these queries to the actual database, we make use of +https://github.com/oxidecomputer/async-bb8-diesel[async-bb8-diesel], which +provides an async wrapper above a threaded connection pool. + +In the prior section, we showed how to construct queries, but never actually +issued the database. + +Assuming we have access to a link:pool.rs[database pool object], queries take +the following form: + +[source,rust] +---- +let pool = ...; // See: pool.rs +let query = ...; // See: prior section on constructing queries. + +query.execute_async(&pool).await +---- + +Additional examples of issuing queries can be found within the +https://github.com/oxidecomputer/async-bb8-diesel/blob/master/examples/usage.rs[examples section] +of the `async-bb8-diesel` repository. + +NOTE: At the time of writing, Diesel supports exclusively +https://docs.diesel.rs/master/diesel/prelude/trait.RunQueryDsl.html[synchronous queries] +to the database. The `async-bb8-diesel` crate provides an adapter trait +called https://github.com/oxidecomputer/async-bb8-diesel/blob/0a6d535f8ac21b407879e6d7dc5214186a187e08/src/lib.rs#L232-L260[AsyncRunQueryDsl] which provides the same functionality, but asynchronously +dispatching work to a thread pool. The goal of this trait it to fairly aligned +with the Diesel API for issuing requests, but (1) async, and (2) operating +on a thread pool. + +=== Transactions + +https://docs.diesel.rs/master/diesel/connection/trait.Connection.html#method.transaction[Transactions in Diesel] are issued as closures. Diesel automatically commits or rolls back the operation, +depending on the return value of the contained closure. + +Using the async adapter, diesel transactions may be issued as follows: + +[source,rust] +---- +// Note that the transaction is async, but the operations +// within the transaction are not. This is okay - they'll be +// moved to a thread where blocking is acceptible before +// being executed. +let result = pool.transaction(|conn| { + diesel::insert_into(...) + .values(...) + .execute(conn)?; + diesel::update(...) + .set(...) + .execute(conn)?; + Ok(result) +}).await?; +---- + +=== Helpers Functions + +As with any style of programming, when operations are repeated, it can be +useful to refactor them. As one example, we make use of link:pagination.rs[pagination] +while accessing the database. + +As many of the Diesel structures are strongly typed - tables, columns, etc - the +magic sauce for making helper functions work correctly is *generics*. This typically +meanings using https://docs.diesel.rs/master/diesel/query_dsl/methods/index.html[trait bounds +indicating which methods should be accessible], and then performing regular Diesel operations. + +=== CTEs + +Diesel support for CTEs is still underway, though the entrypoints for injecting raw SQL +are either: +- https://docs.diesel.rs/master/diesel/fn.sql_query.html[sql_query]: An entrypoint for +a full SQL query, if you want mostly dodge Diesel's query builder, or... +- https://diesel.rs/guides/extending-diesel.html[Extending the DSL], which may involve +a custom implementation of https://docs.diesel.rs/master/diesel/query_builder/trait.QueryFragment.html[QueryFragment]. + +An link:update_and_check.rs[example CTE exists within Omicron], which extends the Diesel +DSL to issue a "select" and "update" query simultaneously, performing a conditional update +that allows callers to distinguish between "object found and updated", "object found and +NOT updated", and "object not found". + +This is just one example of a CTE which extends diesel, but it fits in with the +typical Diesel pattern of "fully construct a query, then execute it". + +[source,rust] +---- +let updated = diesel::update(dsl::instance) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(instance_id)) + .set(new_runtime) + // New query method: blanket implemented as an extension for update queries. + .check_if_exists(instance_id) + // New execution method: only callable after "check_if_exists". + .execute_and_check::(pool) + .await?; +---- + +== Areas for Improvement + +- **Selectable for embedded structures**: Many of our database model structures - +which map to a SQL table - are "flattened" for usage in diesel. We +could potentially avoid this with "Selectable". +- **Native Async Support**: The `async-bb8-diesel` respository provides a mechanism for +offloading Diesel requests to a Tokio-controlled synchronous thread pool, but ideally +we'd use a native Diesel API that never requires blocking threads. This improvement +would require contribution to upstream Diesel. diff --git a/omicron-nexus/src/db/datastore.rs b/omicron-nexus/src/db/datastore.rs index fff42a4809..8da4138a8a 100644 --- a/omicron-nexus/src/db/datastore.rs +++ b/omicron-nexus/src/db/datastore.rs @@ -19,49 +19,28 @@ */ use super::Pool; +use async_bb8_diesel::{AsyncRunQueryDsl, DieselConnectionManager}; use chrono::Utc; +use diesel::{ExpressionMethods, QueryDsl}; use omicron_common::api; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; use omicron_common::api::external::Generation; -use omicron_common::api::external::ListResult; +use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; +use omicron_common::api::external::LookupType; use omicron_common::api::external::Name; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use omicron_common::bail_unless; -use omicron_common::db::sql_row_value; -use std::convert::TryFrom; use std::sync::Arc; -use tokio_postgres::types::ToSql; use uuid::Uuid; -use super::operations::sql_execute_maybe_one; -use super::operations::sql_query_maybe_one; -use super::schema; -use super::schema::Disk; -use super::schema::Instance; -use super::schema::LookupByAttachedInstance; -use super::schema::LookupByUniqueId; -use super::schema::LookupByUniqueName; -use super::schema::LookupByUniqueNameInProject; -use super::schema::Project; -use super::schema::Vpc; -use super::sql::SqlSerialize; -use super::sql::SqlString; -use super::sql::SqlValueSet; -use super::sql::Table; -use super::sql_operations::sql_fetch_page_by; -use super::sql_operations::sql_fetch_page_from_table; -use super::sql_operations::sql_fetch_row_by; -use super::sql_operations::sql_fetch_row_raw; -use super::sql_operations::sql_insert; -use super::sql_operations::sql_insert_unique; -use super::sql_operations::sql_insert_unique_idempotent_and_fetch; -use super::sql_operations::sql_update_precond; use crate::db; +use crate::db::pagination::paginated; +use crate::db::update_and_check::{UpdateAndCheck, UpdateStatus}; pub struct DataStore { pool: Arc, @@ -72,30 +51,51 @@ impl DataStore { DataStore { pool } } + fn pool( + &self, + ) -> &bb8::Pool> { + self.pool.pool() + } + /// Create a project pub async fn project_create( &self, - new_project: &db::model::Project, + project: db::model::Project, ) -> CreateResult { - let client = self.pool.acquire().await?; - let mut values = SqlValueSet::new(); - new_project.sql_serialize(&mut values); - sql_insert_unique::(&client, &values, &new_project.name()) + use db::diesel_schema::project::dsl; + + let name = project.name().to_string(); + diesel::insert_into(dsl::project) + .values(project) + .get_result_async(self.pool()) .await + .map_err(|e| { + Error::from_diesel_create( + e, + ResourceType::Project, + name.as_str(), + ) + }) } - /// Fetch metadata for a project + /// Lookup a project by name. pub async fn project_fetch( &self, - project_name: &Name, + name: &Name, ) -> LookupResult { - let client = self.pool.acquire().await?; - sql_fetch_row_by::( - &client, - (), - project_name, - ) - .await + use db::diesel_schema::project::dsl; + dsl::project + .filter(dsl::time_deleted.is_null()) + .filter(dsl::name.eq(name.clone())) + .first_async::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Project, + LookupType::ByName(name.as_str().to_owned()), + ) + }) } /// Delete a project @@ -104,23 +104,23 @@ impl DataStore { * depend on the Project (Disks, Instances). We can do this with a * generation counter that gets bumped when these resources are created. */ - pub async fn project_delete(&self, project_name: &Name) -> DeleteResult { - let client = self.pool.acquire().await?; + pub async fn project_delete(&self, name: &Name) -> DeleteResult { + use db::diesel_schema::project::dsl; let now = Utc::now(); - sql_execute_maybe_one( - &client, - format!( - "UPDATE {} SET time_deleted = $1 WHERE \ - time_deleted IS NULL AND name = $2 LIMIT 2 \ - RETURNING {}", - Project::TABLE_NAME, - Project::ALL_COLUMNS.join(", ") - ) - .as_str(), - &[&now, &project_name], - || Error::not_found_by_name(ResourceType::Project, project_name), - ) - .await + diesel::update(dsl::project) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::name.eq(name.clone())) + .set(dsl::time_deleted.eq(now)) + .get_result_async::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Project, + LookupType::ByName(name.as_str().to_owned()), + ) + })?; + Ok(()) } /// Look up the id for a project based on its name @@ -128,87 +128,86 @@ impl DataStore { &self, name: &Name, ) -> Result { - let client = self.pool.acquire().await?; - let row = sql_fetch_row_raw::( - &client, - (), - name, - &["id"], - ) - .await?; - sql_row_value(&row, "id") - } - - /// List a page of projects by id + use db::diesel_schema::project::dsl; + dsl::project + .filter(dsl::time_deleted.is_null()) + .filter(dsl::name.eq(name.clone())) + .select(dsl::id) + .get_result_async::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Project, + LookupType::ByName(name.as_str().to_owned()), + ) + }) + } + pub async fn projects_list_by_id( &self, pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResult { - let client = self.pool.acquire().await?; - sql_fetch_page_from_table::( - &client, - (), - pagparams, - ) - .await + ) -> ListResultVec { + use db::diesel_schema::project::dsl; + paginated(dsl::project, dsl::id, pagparams) + .filter(dsl::time_deleted.is_null()) + .load_async::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Project, + LookupType::Other("Listing All".to_string()), + ) + }) } - /// List a page of projects by name pub async fn projects_list_by_name( &self, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { - let client = self.pool.acquire().await?; - sql_fetch_page_by::< - LookupByUniqueName, - Project, - ::Model, - >(&client, (), pagparams, Project::ALL_COLUMNS) - .await + ) -> ListResultVec { + use db::diesel_schema::project::dsl; + paginated(dsl::project, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .load_async::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Project, + LookupType::Other("Listing All".to_string()), + ) + }) } /// Updates a project by name (clobbering update -- no etag) pub async fn project_update( &self, - project_name: &Name, + name: &Name, update_params: &api::external::ProjectUpdateParams, ) -> UpdateResult { - let client = self.pool.acquire().await?; - let now = Utc::now(); - - let mut sql = - format!("UPDATE {} SET time_modified = $1 ", Project::TABLE_NAME); - let mut params: Vec<&(dyn ToSql + Sync)> = vec![&now]; - - if let Some(new_name) = &update_params.identity.name { - sql.push_str(&format!(", name = ${} ", params.len() + 1)); - params.push(new_name); - } - - if let Some(new_description) = &update_params.identity.description { - sql.push_str(&format!(", description = ${} ", params.len() + 1)); - params.push(new_description); - } - - sql.push_str(&format!( - " WHERE name = ${} AND time_deleted IS NULL LIMIT 2 RETURNING {}", - params.len() + 1, - Project::ALL_COLUMNS.join(", ") - )); - params.push(project_name); - - let row = sql_query_maybe_one(&client, sql.as_str(), ¶ms, || { - Error::not_found_by_name(ResourceType::Project, project_name) - }) - .await?; - Ok(db::model::Project::try_from(&row)?) + use db::diesel_schema::project::dsl; + let updates: db::model::ProjectUpdate = update_params.clone().into(); + + diesel::update(dsl::project) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::name.eq(name.clone())) + .set(updates) + .get_result_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Project, + LookupType::ByName(name.as_str().to_owned()), + ) + }) } /* * Instances */ - /// /// Idempotently insert a database record for an Instance /// /// This is intended to be used by a saga action. When we say this is @@ -226,7 +225,6 @@ impl DataStore { /// In addition to the usual database errors (e.g., no connections /// available), this function can fail if there is already a different /// instance (having a different id) with the same name in the same project. - /// /* * TODO-design Given that this is really oriented towards the saga * interface, one wonders if it's even worth having an abstraction here, or @@ -240,38 +238,38 @@ impl DataStore { params: &api::external::InstanceCreateParams, runtime_initial: &db::model::InstanceRuntimeState, ) -> CreateResult { - let client = self.pool.acquire().await?; - let mut values = SqlValueSet::new(); + use db::diesel_schema::instance::dsl; + let instance = db::model::Instance::new( *instance_id, *project_id, params, runtime_initial.clone(), ); - instance.sql_serialize(&mut values); - let instance = sql_insert_unique_idempotent_and_fetch::< - Instance, - LookupByUniqueId, - >( - &client, - &values, - params.identity.name.as_str(), - "id", - (), - instance_id, - ) - .await?; + let name = instance.name.clone(); + let instance: db::model::Instance = diesel::insert_into(dsl::instance) + .values(instance) + .on_conflict(dsl::id) + .do_nothing() + .get_result_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel_create( + e, + ResourceType::Instance, + name.as_str(), + ) + })?; bail_unless!( - instance.runtime.run_state.state() - == &api::external::InstanceState::Creating, + instance.state.state() == &api::external::InstanceState::Creating, "newly-created Instance has unexpected state: {:?}", - instance.runtime.run_state + instance.state ); bail_unless!( - instance.runtime.gen == runtime_initial.gen, + instance.state_generation == runtime_initial.gen, "newly-created Instance has unexpected generation: {:?}", - instance.runtime.gen + instance.state_generation ); Ok(instance) } @@ -280,23 +278,41 @@ impl DataStore { &self, project_id: &Uuid, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { - let client = self.pool.acquire().await?; - sql_fetch_page_by::< - LookupByUniqueNameInProject, - Instance, - ::Model, - >(&client, (project_id,), pagparams, Instance::ALL_COLUMNS) - .await + ) -> ListResultVec { + use db::diesel_schema::instance::dsl; + + paginated(dsl::instance, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::project_id.eq(*project_id)) + .load_async::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Instance, + LookupType::Other("Listing All".to_string()), + ) + }) } pub async fn instance_fetch( &self, instance_id: &Uuid, ) -> LookupResult { - let client = self.pool.acquire().await?; - sql_fetch_row_by::(&client, (), instance_id) + use db::diesel_schema::instance::dsl; + + dsl::instance + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(*instance_id)) + .get_result_async(self.pool()) .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Instance, + LookupType::ById(*instance_id), + ) + }) } pub async fn instance_fetch_by_name( @@ -304,13 +320,21 @@ impl DataStore { project_id: &Uuid, instance_name: &Name, ) -> LookupResult { - let client = self.pool.acquire().await?; - sql_fetch_row_by::( - &client, - (project_id,), - instance_name, - ) - .await + use db::diesel_schema::instance::dsl; + + dsl::instance + .filter(dsl::time_deleted.is_null()) + .filter(dsl::project_id.eq(*project_id)) + .filter(dsl::name.eq(instance_name.clone())) + .get_result_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Instance, + LookupType::ByName(instance_name.as_str().to_owned()), + ) + }) } /* @@ -327,28 +351,29 @@ impl DataStore { instance_id: &Uuid, new_runtime: &db::model::InstanceRuntimeState, ) -> Result { - let client = self.pool.acquire().await?; - - let mut values = SqlValueSet::new(); - new_runtime.sql_serialize(&mut values); - - let mut cond_sql = SqlString::new(); - let param = cond_sql.next_param(&new_runtime.gen); - cond_sql.push_str(&format!("state_generation < {}", param)); - - let update = sql_update_precond::( - &client, - (), - instance_id, - &["state_generation"], - &values, - cond_sql, - ) - .await?; - let row = &update.found_state; - let found_id: Uuid = sql_row_value(&row, "found_id")?; - bail_unless!(found_id == *instance_id); - Ok(update.updated) + use db::diesel_schema::instance::dsl; + + let updated = diesel::update(dsl::instance) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(*instance_id)) + .filter(dsl::state_generation.lt(new_runtime.gen)) + .set(new_runtime.clone()) + .check_if_exists(*instance_id) + .execute_and_check::(self.pool()) + .await + .map(|r| match r.status { + UpdateStatus::Updated => true, + UpdateStatus::NotUpdatedButExists => false, + }) + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Instance, + LookupType::ById(*instance_id), + ) + })?; + + Ok(updated) } pub async fn project_delete_instance( @@ -364,49 +389,31 @@ impl DataStore { * (e.g., disk attachments). If that changes, we'll want to check for * such dependencies here. */ - let client = self.pool.acquire().await?; + use api::external::InstanceState as ApiInstanceState; + use db::diesel_schema::instance::dsl; + use db::model::InstanceState as DbInstanceState; + let now = Utc::now(); - let mut values = SqlValueSet::new(); - db::model::InstanceState::new(api::external::InstanceState::Destroyed) - .sql_serialize(&mut values); - values.set("time_deleted", &now); - - let mut cond_sql = SqlString::new(); - - let stopped = api::external::InstanceState::Stopped.to_string(); - let p1 = cond_sql.next_param(&stopped); - let failed = api::external::InstanceState::Failed.to_string(); - let p2 = cond_sql.next_param(&failed); - cond_sql.push_str(&format!("instance_state in ({}, {})", p1, p2)); - - let update = sql_update_precond::( - &client, - (), - instance_id, - &["instance_state", "time_deleted"], - &values, - cond_sql, - ) - .await?; - - let row = &update.found_state; - let found_id: Uuid = sql_row_value(&row, "found_id")?; - let variant: &str = sql_row_value(&row, "found_instance_state")?; - let instance_state = api::external::InstanceState::try_from(variant) - .map_err(|e| Error::internal_error(&e))?; - bail_unless!(found_id == *instance_id); - - if update.updated { - Ok(()) - } else { - Err(Error::InvalidRequest { - message: format!( - "instance cannot be deleted in state \"{}\"", - instance_state - ), - }) - } + let destroyed = DbInstanceState::new(ApiInstanceState::Destroyed); + let stopped = DbInstanceState::new(ApiInstanceState::Stopped); + let failed = DbInstanceState::new(ApiInstanceState::Failed); + + diesel::update(dsl::instance) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(*instance_id)) + .filter(dsl::state.eq_any(vec![stopped, failed])) + .set((dsl::state.eq(destroyed), dsl::time_deleted.eq(now))) + .get_result_async::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Instance, + LookupType::ById(*instance_id), + ) + })?; + Ok(()) } /* @@ -420,19 +427,28 @@ impl DataStore { &self, instance_id: &Uuid, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { - let client = self.pool.acquire().await?; - sql_fetch_page_by::< - LookupByAttachedInstance, - Disk, - db::model::DiskAttachment, - >( - &client, - (instance_id,), - pagparams, - &["id", "name", "disk_state", "attach_instance_id"], - ) - .await + ) -> ListResultVec { + use db::diesel_schema::disk::dsl; + + paginated(dsl::disk, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::attach_instance_id.eq(*instance_id)) + .load_async::(self.pool()) + .await + .map(|disks| { + disks + .into_iter() + // Unwrap safety: filtered by instance_id in query. + .map(|disk| disk.attachment().unwrap()) + .collect() + }) + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Disk, + LookupType::Other("Listing All".to_string()), + ) + }) } pub async fn project_create_disk( @@ -442,40 +458,35 @@ impl DataStore { params: &api::external::DiskCreateParams, runtime_initial: &db::model::DiskRuntimeState, ) -> CreateResult { - /* - * See project_create_instance() for a discussion of how this function - * works. The pattern here is nearly identical. - */ - let client = self.pool.acquire().await?; - let mut values = SqlValueSet::new(); + use db::diesel_schema::disk::dsl; + let disk = db::model::Disk::new( *disk_id, *project_id, params.clone(), runtime_initial.clone(), ); - disk.sql_serialize(&mut values); - let disk = - sql_insert_unique_idempotent_and_fetch::( - &client, - &mut values, - params.identity.name.as_str(), - "id", - (), - disk_id, - ) - .await?; + let name = disk.name.clone(); + let disk: db::model::Disk = diesel::insert_into(dsl::disk) + .values(disk) + .on_conflict(dsl::id) + .do_nothing() + .get_result_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel_create(e, ResourceType::Disk, name.as_str()) + })?; + let runtime = disk.runtime(); bail_unless!( - disk.runtime.disk_state.state() - == &api::external::DiskState::Creating, + runtime.state().state() == &api::external::DiskState::Creating, "newly-created Disk has unexpected state: {:?}", - disk.runtime.disk_state + runtime.disk_state ); bail_unless!( - disk.runtime.gen == runtime_initial.gen, + runtime.gen == runtime_initial.gen, "newly-created Disk has unexpected generation: {:?}", - disk.runtime.gen + runtime.gen ); Ok(disk) } @@ -484,14 +495,21 @@ impl DataStore { &self, project_id: &Uuid, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { - let client = self.pool.acquire().await?; - sql_fetch_page_by::< - LookupByUniqueNameInProject, - Disk, - ::Model, - >(&client, (project_id,), pagparams, Disk::ALL_COLUMNS) - .await + ) -> ListResultVec { + use db::diesel_schema::disk::dsl; + + paginated(dsl::disk, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::project_id.eq(*project_id)) + .load_async::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Disk, + LookupType::Other("Listing All".to_string()), + ) + }) } pub async fn disk_update_runtime( @@ -499,36 +517,49 @@ impl DataStore { disk_id: &Uuid, new_runtime: &db::model::DiskRuntimeState, ) -> Result { - let client = self.pool.acquire().await?; - - let mut values = SqlValueSet::new(); - new_runtime.sql_serialize(&mut values); - - let mut cond_sql = SqlString::new(); - let param = cond_sql.next_param(&new_runtime.gen); - cond_sql.push_str(&format!("state_generation < {}", param)); - - let update = sql_update_precond::( - &client, - (), - disk_id, - &["state_generation"], - &values, - cond_sql, - ) - .await?; - let row = &update.found_state; - let found_id: Uuid = sql_row_value(&row, "found_id")?; - bail_unless!(found_id == *disk_id); - Ok(update.updated) + use db::diesel_schema::disk::dsl; + + let updated = diesel::update(dsl::disk) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(*disk_id)) + .filter(dsl::state_generation.lt(new_runtime.gen)) + .set(new_runtime.clone()) + .check_if_exists(*disk_id) + .execute_and_check::(self.pool()) + .await + .map(|r| match r.status { + UpdateStatus::Updated => true, + UpdateStatus::NotUpdatedButExists => false, + }) + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Disk, + LookupType::ById(*disk_id), + ) + })?; + + Ok(updated) } pub async fn disk_fetch( &self, disk_id: &Uuid, ) -> LookupResult { - let client = self.pool.acquire().await?; - sql_fetch_row_by::(&client, (), disk_id).await + use db::diesel_schema::disk::dsl; + + dsl::disk + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(*disk_id)) + .get_result_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Disk, + LookupType::ById(*disk_id), + ) + }) } pub async fn disk_fetch_by_name( @@ -536,67 +567,55 @@ impl DataStore { project_id: &Uuid, disk_name: &Name, ) -> LookupResult { - let client = self.pool.acquire().await?; - sql_fetch_row_by::( - &client, - (project_id,), - disk_name, - ) - .await + use db::diesel_schema::disk::dsl; + + dsl::disk + .filter(dsl::time_deleted.is_null()) + .filter(dsl::project_id.eq(*project_id)) + .filter(dsl::name.eq(disk_name.clone())) + .get_result_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Disk, + LookupType::ByName(disk_name.as_str().to_owned()), + ) + }) } pub async fn project_delete_disk(&self, disk_id: &Uuid) -> DeleteResult { - let client = self.pool.acquire().await?; + use db::diesel_schema::disk::dsl; let now = Utc::now(); - let mut values = SqlValueSet::new(); - values.set("time_deleted", &now); - db::model::DiskState::new(api::external::DiskState::Destroyed) - .sql_serialize(&mut values); - - let mut cond_sql = SqlString::new(); - let disk_state_detached = - api::external::DiskState::Detached.to_string(); - let p1 = cond_sql.next_param(&disk_state_detached); - let disk_state_faulted = api::external::DiskState::Faulted.to_string(); - let p2 = cond_sql.next_param(&disk_state_faulted); - cond_sql.push_str(&format!("disk_state in ({}, {})", p1, p2)); - - let update = sql_update_precond::( - &client, - (), - disk_id, - &["disk_state", "attach_instance_id", "time_deleted"], - &values, - cond_sql, - ) - .await?; - - let row = &update.found_state; - let found_id: Uuid = sql_row_value(&row, "found_id")?; - bail_unless!(found_id == *disk_id); - - // TODO-cleanup It would be nice to use - // api::external::DiskState::try_from(&tokio_postgres::Row), but the column names - // are different here. - let disk_state_str: &str = sql_row_value(&row, "found_disk_state")?; - let attach_instance_id: Option = - sql_row_value(&row, "found_attach_instance_id")?; - let found_disk_state = api::external::DiskState::try_from(( - disk_state_str, - attach_instance_id, - )) - .map_err(|e| Error::internal_error(&e))?; - - if update.updated { - Ok(()) - } else { - Err(Error::InvalidRequest { + let destroyed = api::external::DiskState::Destroyed.label(); + let detached = api::external::DiskState::Detached.label(); + let faulted = api::external::DiskState::Faulted.label(); + + let result = diesel::update(dsl::disk) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(*disk_id)) + .filter(dsl::disk_state.eq_any(vec![detached, faulted])) + .set((dsl::disk_state.eq(destroyed), dsl::time_deleted.eq(now))) + .check_if_exists(*disk_id) + .execute_and_check::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Disk, + LookupType::ById(*disk_id), + ) + })?; + + match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => Err(Error::InvalidRequest { message: format!( "disk cannot be deleted in state \"{}\"", - found_disk_state + result.found.disk_state ), - }) + }), } } @@ -605,10 +624,20 @@ impl DataStore { &self, info: &db::model::OximeterInfo, ) -> Result<(), Error> { - let client = self.pool.acquire().await?; - let mut values = SqlValueSet::new(); - info.sql_serialize(&mut values); - sql_insert::(&client, &values).await + use db::diesel_schema::oximeter::dsl; + + diesel::insert_into(dsl::oximeter) + .values(*info) + .execute_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel_create( + e, + ResourceType::Oximeter, + "Oximeter Info", + ) + })?; + Ok(()) } // Create a record for a new producer endpoint @@ -616,10 +645,20 @@ impl DataStore { &self, producer: &db::model::ProducerEndpoint, ) -> Result<(), Error> { - let client = self.pool.acquire().await?; - let mut values = SqlValueSet::new(); - producer.sql_serialize(&mut values); - sql_insert::(&client, &values).await + use db::diesel_schema::metricproducer::dsl; + + diesel::insert_into(dsl::metricproducer) + .values(producer.clone()) + .execute_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel_create( + e, + ResourceType::Oximeter, + "Producer Endpoint", + ) + })?; + Ok(()) } // Create a record of an assignment of a producer to a collector @@ -628,13 +667,22 @@ impl DataStore { oximeter_id: Uuid, producer_id: Uuid, ) -> Result<(), Error> { - let client = self.pool.acquire().await?; - let now = Utc::now(); - let mut values = SqlValueSet::new(); - values.set("time_created", &now); - let reg = db::model::OximeterAssignment { oximeter_id, producer_id }; - reg.sql_serialize(&mut values); - sql_insert::(&client, &values).await + use db::diesel_schema::oximeterassignment::dsl; + + let assignment = + db::model::OximeterAssignment::new(oximeter_id, producer_id); + diesel::insert_into(dsl::oximeterassignment) + .values(assignment) + .execute_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel_create( + e, + ResourceType::Oximeter, + "Oximeter Assignment", + ) + })?; + Ok(()) } /* @@ -645,22 +693,39 @@ impl DataStore { &self, saga: &db::saga_types::Saga, ) -> Result<(), Error> { - let client = self.pool.acquire().await?; - let mut values = SqlValueSet::new(); - saga.sql_serialize(&mut values); - sql_insert::(&client, &values).await + use db::diesel_schema::saga::dsl; + + let name = saga.template_name.clone(); + diesel::insert_into(dsl::saga) + .values(saga.clone()) + .execute_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel_create(e, ResourceType::SagaDbg, &name) + })?; + Ok(()) } pub async fn saga_create_event( &self, event: &db::saga_types::SagaNodeEvent, ) -> Result<(), Error> { - let client = self.pool.acquire().await?; - let mut values = SqlValueSet::new(); - event.sql_serialize(&mut values); + use db::diesel_schema::saganodeevent::dsl; + // TODO-robustness This INSERT ought to be conditional on this SEC still // owning this saga. - sql_insert::(&client, &values).await + diesel::insert_into(dsl::saganodeevent) + .values(event.clone()) + .execute_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel_create( + e, + ResourceType::SagaDbg, + "Saga Event", + ) + })?; + Ok(()) } pub async fn saga_update_state( @@ -670,65 +735,63 @@ impl DataStore { current_sec: db::saga_types::SecId, current_adopt_generation: Generation, ) -> Result<(), Error> { - let client = self.pool.acquire().await?; - let mut values = SqlValueSet::new(); - values.set("saga_state", &new_state.to_string()); - let mut precond_sql = SqlString::new(); - let p1 = precond_sql.next_param(¤t_sec); - let p2 = precond_sql.next_param(¤t_adopt_generation); - precond_sql.push_str(&format!( - "current_sec = {} AND adopt_generation = {}", - p1, p2 - )); - let update = sql_update_precond::< - schema::Saga, - schema::LookupGenericByUniqueId, - >( - &client, - (), - &saga_id.0, - &["current_sec", "adopt_generation", "saga_state"], - &values, - precond_sql, - ) - .await?; - let row = &update.found_state; - let found_sec: Option<&str> = sql_row_value(row, "found_current_sec") - .unwrap_or(Some("(unknown)")); - let found_gen = - sql_row_value::<_, Generation>(row, "found_adopt_generation") - .map(|i| i.to_string()) - .unwrap_or_else(|_| "(unknown)".to_owned()); - let found_saga_state = - sql_row_value::<_, String>(row, "found_saga_state") - .unwrap_or_else(|_| "(unknown)".to_owned()); - bail_unless!(update.updated, - "failed to update saga {:?} with state {:?}: preconditions not met: \ - expected current_sec = {:?}, adopt_generation = {:?}, \ - but found current_sec = {:?}, adopt_generation = {:?}, state = {:?}", - saga_id, - new_state, - current_sec, - current_adopt_generation, - found_sec, - found_gen, - found_saga_state, - ); - Ok(()) + use db::diesel_schema::saga::dsl; + + let saga_id: db::saga_types::SagaId = saga_id.into(); + let result = diesel::update(dsl::saga) + .filter(dsl::id.eq(saga_id)) + .filter(dsl::current_sec.eq(current_sec)) + .filter(dsl::adopt_generation.eq(current_adopt_generation)) + .set(dsl::saga_state.eq(new_state.to_string())) + .check_if_exists(saga_id) + .execute_and_check::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::SagaDbg, + LookupType::ById(saga_id.0.into()), + ) + })?; + + match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => Err(Error::InvalidRequest { + message: format!( + "failed to update saga {:?} with state {:?}: preconditions not met: \ + expected current_sec = {:?}, adopt_generation = {:?}, \ + but found current_sec = {:?}, adopt_generation = {:?}, state = {:?}", + saga_id, + new_state, + current_sec, + current_adopt_generation, + result.found.current_sec, + result.found.adopt_generation, + result.found.saga_state, + ) + }), + } } pub async fn project_list_vpcs( &self, project_id: &Uuid, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { - let client = self.pool.acquire().await?; - sql_fetch_page_by::< - LookupByUniqueNameInProject, - Vpc, - ::Model, - >(&client, (project_id,), pagparams, Vpc::ALL_COLUMNS) - .await + ) -> ListResultVec { + use db::diesel_schema::vpc::dsl; + + paginated(dsl::vpc, dsl::name, pagparams) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::project_id.eq(*project_id)) + .load_async::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Vpc, + LookupType::Other("Listing All".to_string()), + ) + }) } pub async fn project_create_vpc( @@ -737,20 +800,20 @@ impl DataStore { project_id: &Uuid, params: &api::external::VpcCreateParams, ) -> Result { - let client = self.pool.acquire().await?; - let mut values = SqlValueSet::new(); - let vpc = db::model::Vpc::new(*vpc_id, *project_id, params.clone()); - vpc.sql_serialize(&mut values); + use db::diesel_schema::vpc::dsl; - sql_insert_unique_idempotent_and_fetch::( - &client, - &values, - params.identity.name.as_str(), - "id", - (), - &vpc_id, - ) - .await + let vpc = db::model::Vpc::new(*vpc_id, *project_id, params.clone()); + let name = vpc.name.clone(); + let vpc: db::model::Vpc = diesel::insert_into(dsl::vpc) + .values(vpc) + .on_conflict(dsl::id) + .do_nothing() + .get_result_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel_create(e, ResourceType::Vpc, name.as_str()) + })?; + Ok(vpc) } pub async fn project_update_vpc( @@ -758,41 +821,22 @@ impl DataStore { vpc_id: &Uuid, params: &api::external::VpcUpdateParams, ) -> Result<(), Error> { - let client = self.pool.acquire().await?; - let now = Utc::now(); - - let mut values = SqlValueSet::new(); - values.set("time_modified", &now); - - if let Some(new_name) = ¶ms.identity.name { - values.set("name", new_name); - } - - if let Some(new_description) = ¶ms.identity.description { - values.set("description", new_description); - } - - if let Some(dns_name) = ¶ms.dns_name { - values.set("dns_name", dns_name); - } - - // dummy condition because sql_update_precond breaks otherwise - // TODO-cleanup: write sql_update that takes no preconditions? - let mut cond_sql = SqlString::new(); - cond_sql.push_str("true"); - - sql_update_precond::( - &client, - (), - vpc_id, - &[], - &values, - cond_sql, - ) - .await?; - - // TODO-correctness figure out how to get sql_update_precond to return - // the whole row + use db::diesel_schema::vpc::dsl; + let updates: db::model::VpcUpdate = params.clone().into(); + + diesel::update(dsl::vpc) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(*vpc_id)) + .set(updates) + .execute_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Vpc, + LookupType::ById(*vpc_id), + ) + })?; Ok(()) } @@ -801,29 +845,40 @@ impl DataStore { project_id: &Uuid, vpc_name: &Name, ) -> LookupResult { - let client = self.pool.acquire().await?; - sql_fetch_row_by::( - &client, - (project_id,), - vpc_name, - ) - .await + use db::diesel_schema::vpc::dsl; + + dsl::vpc + .filter(dsl::time_deleted.is_null()) + .filter(dsl::project_id.eq(*project_id)) + .filter(dsl::name.eq(vpc_name.clone())) + .get_result_async(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Vpc, + LookupType::ByName(vpc_name.as_str().to_owned()), + ) + }) } pub async fn project_delete_vpc(&self, vpc_id: &Uuid) -> DeleteResult { - let client = self.pool.acquire().await?; + use db::diesel_schema::vpc::dsl; + let now = Utc::now(); - sql_execute_maybe_one( - &client, - format!( - "UPDATE {} SET time_deleted = $1 WHERE \ - time_deleted IS NULL AND id = $2", - Vpc::TABLE_NAME, - ) - .as_str(), - &[&now, &vpc_id], - || Error::not_found_by_id(ResourceType::Vpc, vpc_id), - ) - .await + diesel::update(dsl::vpc) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(*vpc_id)) + .set(dsl::time_deleted.eq(now)) + .get_result_async::(self.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::Vpc, + LookupType::ById(*vpc_id), + ) + })?; + Ok(()) } } diff --git a/omicron-nexus/src/db/diesel_schema.rs b/omicron-nexus/src/db/diesel_schema.rs new file mode 100644 index 0000000000..0f5e6e4041 --- /dev/null +++ b/omicron-nexus/src/db/diesel_schema.rs @@ -0,0 +1,172 @@ +//! Describes the Diesel database schema. +//! +//! NOTE: Should be kept up-to-date with dbinit.sql. + +table! { + disk (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + project_id -> Uuid, + disk_state -> Text, + attach_instance_id -> Nullable, + state_generation -> Int8, + time_state_updated -> Timestamptz, + size_bytes -> Int8, + origin_snapshot -> Nullable, + } +} + +table! { + instance (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + project_id -> Uuid, + state -> Text, + time_state_updated -> Timestamptz, + state_generation -> Int8, + active_server_id -> Uuid, + ncpus -> Int8, + memory -> Int8, + hostname -> Text, + } +} + +table! { + metricproducer (id) { + id -> Uuid, + time_created -> Timestamptz, + time_modified -> Timestamptz, + ip -> Inet, + port -> Int4, + interval -> Float8, + base_route -> Nullable, + } +} + +table! { + networkinterface (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + vpc_id -> Uuid, + subnet_id -> Uuid, + mac -> Text, + ip -> Inet, + } +} + +table! { + oximeter (id) { + id -> Uuid, + time_created -> Timestamptz, + time_modified -> Timestamptz, + ip -> Inet, + port -> Int4, + } +} + +table! { + oximeterassignment (oximeter_id, producer_id) { + oximeter_id -> Uuid, + producer_id -> Uuid, + time_created -> Timestamptz, + } +} + +table! { + project (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + } +} + +table! { + saga (id) { + id -> Uuid, + creator -> Uuid, + template_name -> Text, + time_created -> Timestamptz, + saga_params -> Jsonb, + saga_state -> Text, + current_sec -> Nullable, + adopt_generation -> Int8, + adopt_time -> Timestamptz, + } +} + +table! { + saganodeevent (saga_id, node_id, event_type) { + saga_id -> Uuid, + node_id -> Int8, + event_type -> Text, + data -> Nullable, + event_time -> Timestamptz, + creator -> Uuid, + } +} + +table! { + sled (id) { + id -> Uuid, + time_created -> Timestamptz, + time_modified -> Timestamptz, + sled_agent_ip -> Nullable, + } +} + +table! { + vpc (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + project_id -> Uuid, + dns_name -> Text, + } +} + +table! { + vpcsubnet (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + vpc_id -> Uuid, + ipv4_block -> Nullable, + ipv6_block -> Nullable, + } +} + +allow_tables_to_appear_in_same_query!( + disk, + instance, + metricproducer, + networkinterface, + oximeter, + oximeterassignment, + project, + saga, + saganodeevent, + sled, + vpc, + vpcsubnet, +); diff --git a/omicron-nexus/src/db/mod.rs b/omicron-nexus/src/db/mod.rs index b32ffc063f..3f748f003b 100644 --- a/omicron-nexus/src/db/mod.rs +++ b/omicron-nexus/src/db/mod.rs @@ -4,16 +4,15 @@ mod config; mod datastore; -mod operations; +mod pagination; mod pool; mod saga_recovery; mod saga_types; mod sec_store; -mod sql_operations; +mod update_and_check; +pub mod diesel_schema; pub mod model; -pub mod schema; -pub mod sql; /* public for examples only */ pub use config::Config; pub use datastore::DataStore; @@ -21,8 +20,3 @@ pub use pool::Pool; pub use saga_recovery::recover; pub use saga_types::SecId; pub use sec_store::CockroachDbSecStore; - -/* These are exposed only so that we can write examples that use them. */ -pub use sql::where_cond; -pub use sql::SqlString; -pub use sql::SqlValueSet; diff --git a/omicron-nexus/src/db/model.rs b/omicron-nexus/src/db/model.rs index b80e96cd75..d327f0f9a1 100644 --- a/omicron-nexus/src/db/model.rs +++ b/omicron-nexus/src/db/model.rs @@ -1,19 +1,23 @@ //! Structures stored to the database. +use super::diesel_schema::{ + disk, instance, metricproducer, networkinterface, oximeter, + oximeterassignment, project, vpc, vpcsubnet, +}; use chrono::{DateTime, Utc}; +use diesel::backend::{Backend, RawValue}; +use diesel::deserialize::{self, FromSql}; +use diesel::serialize::{self, ToSql}; +use diesel::sql_types; use omicron_common::api::external::{ self, ByteCount, Error, Generation, InstanceCpuCount, }; use omicron_common::api::internal; use omicron_common::db::sql_row_value; use std::convert::TryFrom; -use std::net::{IpAddr, SocketAddr}; -use std::time::Duration; +use std::net::SocketAddr; use uuid::Uuid; -use super::sql::SqlSerialize; -use super::sql::SqlValueSet; - // TODO: Break up types into multiple files // NOTE: This object is not currently stored in the database. @@ -48,6 +52,8 @@ impl Into for Sled { } } +// TODO: As Diesel needs to flatten things out, this structure +// may become unused. #[derive(Clone, Debug)] pub struct IdentityMetadata { pub id: Uuid, @@ -55,6 +61,7 @@ pub struct IdentityMetadata { pub description: String, pub time_created: DateTime, pub time_modified: DateTime, + pub time_deleted: Option>, } impl IdentityMetadata { @@ -66,6 +73,7 @@ impl IdentityMetadata { description: params.description, time_created: now, time_modified: now, + time_deleted: None, } } } @@ -90,24 +98,11 @@ impl From for IdentityMetadata { description: metadata.description, time_created: metadata.time_created, time_modified: metadata.time_modified, + time_deleted: None, } } } -/// Serialization to DB. -impl SqlSerialize for IdentityMetadata { - fn sql_serialize(&self, output: &mut SqlValueSet) { - output.set("id", &self.id); - output.set("name", &self.name); - output.set("description", &self.description); - output.set("time_created", &self.time_created); - output.set("time_modified", &self.time_modified); - - // TODO: Is this right? When should this be set? - output.set("time_deleted", &(None as Option>)); - } -} - /// Deserialization from the DB. impl TryFrom<&tokio_postgres::Row> for IdentityMetadata { type Error = Error; @@ -132,77 +127,128 @@ impl TryFrom<&tokio_postgres::Row> for IdentityMetadata { description: sql_row_value(value, "description")?, time_created: sql_row_value(value, "time_created")?, time_modified: sql_row_value(value, "time_modified")?, + time_deleted: sql_row_value(value, "time_deleted")?, }) } } /// Describes a project within the database. +#[derive(Queryable, Identifiable, Insertable, Debug)] +#[table_name = "project"] pub struct Project { - identity: IdentityMetadata, + pub id: Uuid, + pub name: external::Name, + pub description: String, + pub time_created: DateTime, + pub time_modified: DateTime, + pub time_deleted: Option>, } impl Project { /// Creates a new database Project object. - pub fn new(params: &external::ProjectCreateParams) -> Self { + pub fn new(params: external::ProjectCreateParams) -> Self { let id = Uuid::new_v4(); - Self { identity: IdentityMetadata::new(id, params.identity.clone()) } + let identity = IdentityMetadata::new(id, params.identity); + Self { + id: identity.id, + name: identity.name, + description: identity.description, + time_created: identity.time_created, + time_modified: identity.time_modified, + time_deleted: identity.time_deleted, + } } pub fn name(&self) -> &str { - self.identity.name.as_str() + self.name.as_str() } pub fn id(&self) -> &Uuid { - &self.identity.id + &self.id } } -/// Conversion to the internal API type. impl Into for Project { fn into(self) -> external::Project { - external::Project { identity: self.identity.into() } + external::Project { + identity: external::IdentityMetadata { + id: self.id, + name: self.name, + description: self.description, + time_created: self.time_created, + time_modified: self.time_modified, + }, + } } } /// Conversion from the internal API type. impl From for Project { fn from(project: external::Project) -> Self { - Self { identity: project.identity.into() } + Self { + id: project.identity.id, + name: project.identity.name, + description: project.identity.description, + time_created: project.identity.time_created, + time_modified: project.identity.time_modified, + time_deleted: None, + } } } -/// Serialization to DB. -impl SqlSerialize for Project { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.identity.sql_serialize(output); - } +/// Describes a set of updates for the [`Project`] model. +#[derive(AsChangeset)] +#[table_name = "project"] +pub struct ProjectUpdate { + pub name: Option, + pub description: Option, + pub time_modified: DateTime, } -/// Deserialization from DB. -impl TryFrom<&tokio_postgres::Row> for Project { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Project { identity: IdentityMetadata::try_from(value)? }) +impl From for ProjectUpdate { + fn from(params: external::ProjectUpdateParams) -> Self { + Self { + name: params.identity.name, + description: params.identity.description, + time_modified: Utc::now(), + } } } /// An Instance (VM). -#[derive(Clone, Debug)] +#[derive(Queryable, Identifiable, Insertable, Debug)] +#[table_name = "instance"] pub struct Instance { - /// common identifying metadata - pub identity: IdentityMetadata, + pub id: Uuid, + pub name: external::Name, + pub description: String, + pub time_created: DateTime, + pub time_modified: DateTime, + pub time_deleted: Option>, /// id for the project containing this Instance pub project_id: Uuid, - /// state owned by the data plane - pub runtime: InstanceRuntimeState, + /// runtime state of the Instance + pub state: InstanceState, + + /// timestamp for this information + // TODO: Is this redundant with "time_modified"? + pub time_state_updated: DateTime, + + /// generation number for this state + pub state_generation: Generation, + + /// which sled is running this Instance + pub active_server_id: Uuid, + // TODO-completeness: add disks, network, tags, metrics /// number of CPUs allocated for this Instance pub ncpus: InstanceCpuCount, + /// memory allocated for this Instance pub memory: ByteCount, + /// RFC1035-compliant hostname for the Instance. // TODO-cleanup different type? pub hostname: String, @@ -215,16 +261,50 @@ impl Instance { params: &external::InstanceCreateParams, runtime: InstanceRuntimeState, ) -> Self { + let identity = + IdentityMetadata::new(instance_id, params.identity.clone()); + Self { - identity: IdentityMetadata::new( - instance_id, - params.identity.clone(), - ), + id: identity.id, + name: identity.name, + description: identity.description, + time_created: identity.time_created, + time_modified: identity.time_modified, + time_deleted: identity.time_deleted, + project_id, + + state: runtime.state, + time_state_updated: runtime.time_updated, + state_generation: runtime.gen, + active_server_id: runtime.sled_uuid, + ncpus: params.ncpus, memory: params.memory, hostname: params.hostname.clone(), - runtime, + } + } + + // TODO: We could definitely derive this. + // We could actually derive any "into subset" struct with + // identically named fields. + pub fn identity(&self) -> IdentityMetadata { + IdentityMetadata { + id: self.id, + name: self.name.clone(), + description: self.description.clone(), + time_created: self.time_created, + time_modified: self.time_modified, + time_deleted: self.time_deleted, + } + } + + pub fn runtime(&self) -> InstanceRuntimeState { + InstanceRuntimeState { + state: self.state, + sled_uuid: self.active_server_id, + gen: self.state_generation, + time_updated: self.time_state_updated, } } } @@ -233,57 +313,35 @@ impl Instance { impl Into for Instance { fn into(self) -> external::Instance { external::Instance { - identity: self.identity.clone().into(), + identity: self.identity().into(), project_id: self.project_id, ncpus: self.ncpus, memory: self.memory, hostname: self.hostname.clone(), - runtime: self.runtime.into(), + runtime: self.runtime().into(), } } } -/// Serialization to DB. -impl SqlSerialize for Instance { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.identity.sql_serialize(output); - output.set("project_id", &self.project_id); - self.runtime.sql_serialize(output); - output.set("ncpus", &self.ncpus); - output.set("memory", &self.memory); - output.set("hostname", &self.hostname); - } -} - -/// Deserialization from DB. -impl TryFrom<&tokio_postgres::Row> for Instance { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Instance { - identity: IdentityMetadata::try_from(value)?, - project_id: sql_row_value(value, "project_id")?, - ncpus: sql_row_value(value, "ncpus")?, - memory: sql_row_value(value, "memory")?, - hostname: sql_row_value(value, "hostname")?, - runtime: InstanceRuntimeState::try_from(value)?, - }) - } -} - /// Runtime state of the Instance, including the actual running state and minimal /// metadata /// /// This state is owned by the sled agent running that Instance. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, AsChangeset)] +#[table_name = "instance"] pub struct InstanceRuntimeState { /// runtime state of the Instance - pub run_state: InstanceState, + #[column_name = "state"] + pub state: InstanceState, /// which sled is running this Instance + // TODO: should this be optional? + #[column_name = "active_server_id"] pub sled_uuid: Uuid, /// generation number for this state + #[column_name = "state_generation"] pub gen: Generation, /// timestamp for this information + #[column_name = "time_state_updated"] pub time_updated: DateTime, } @@ -291,7 +349,7 @@ pub struct InstanceRuntimeState { impl Into for InstanceRuntimeState { fn into(self) -> external::InstanceRuntimeState { external::InstanceRuntimeState { - run_state: self.run_state.0, + run_state: *self.state.state(), time_run_state_updated: self.time_updated, } } @@ -301,7 +359,7 @@ impl Into for InstanceRuntimeState { impl From for InstanceRuntimeState { fn from(state: internal::nexus::InstanceRuntimeState) -> Self { Self { - run_state: InstanceState(state.run_state), + state: InstanceState::new(state.run_state), sled_uuid: state.sled_uuid, gen: state.gen, time_updated: state.time_updated, @@ -313,7 +371,7 @@ impl From for InstanceRuntimeState { impl Into for InstanceRuntimeState { fn into(self) -> internal::nexus::InstanceRuntimeState { internal::sled_agent::InstanceRuntimeState { - run_state: self.run_state.0, + run_state: *self.state.state(), sled_uuid: self.sled_uuid, gen: self.gen, time_updated: self.time_updated, @@ -321,33 +379,10 @@ impl Into for InstanceRuntimeState { } } -/// Serialization to the database. -impl SqlSerialize for InstanceRuntimeState { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.run_state.sql_serialize(output); - output.set("active_server_id", &self.sled_uuid); - output.set("state_generation", &self.gen); - output.set("time_state_updated", &self.time_updated); - } -} - -/// Deserialization from the database. -impl TryFrom<&tokio_postgres::Row> for InstanceRuntimeState { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(InstanceRuntimeState { - run_state: InstanceState::try_from(value)?, - sled_uuid: sql_row_value(value, "active_server_id")?, - gen: sql_row_value(value, "state_generation")?, - time_updated: sql_row_value(value, "time_state_updated")?, - }) - } -} - /// A wrapper around the external "InstanceState" object, /// which may be stored to disk. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, AsExpression, FromSqlRow)] +#[sql_type = "sql_types::Text"] pub struct InstanceState(external::InstanceState); impl InstanceState { @@ -360,40 +395,64 @@ impl InstanceState { } } -/// Serialization to the database. -impl SqlSerialize for InstanceState { - fn sql_serialize(&self, output: &mut SqlValueSet) { - output.set("instance_state", &self.0.label()); +impl ToSql for InstanceState +where + DB: Backend, + String: ToSql, +{ + fn to_sql( + &self, + out: &mut serialize::Output, + ) -> serialize::Result { + (&self.0.label().to_string() as &String).to_sql(out) } } -/// Deserialization from the database. -impl TryFrom<&tokio_postgres::Row> for InstanceState { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - let variant: &str = sql_row_value(value, "instance_state")?; - Ok(InstanceState( - external::InstanceState::try_from(variant) - .map_err(|err| Error::InternalError { message: err })?, - )) +impl FromSql for InstanceState +where + DB: Backend, + String: FromSql, +{ + fn from_sql(bytes: RawValue) -> deserialize::Result { + let s = String::from_sql(bytes)?; + let state = external::InstanceState::try_from(s.as_str())?; + Ok(InstanceState::new(state)) } } /// A Disk (network block device). -#[derive(Clone, Debug)] +#[derive(Queryable, Identifiable, Insertable, Clone, Debug)] +#[table_name = "disk"] pub struct Disk { - /// common identifying metadata. - pub identity: IdentityMetadata, + // IdentityMetadata + pub id: Uuid, + pub name: external::Name, + pub description: String, + pub time_created: DateTime, + pub time_modified: DateTime, + pub time_deleted: Option>, + /// id for the project containing this Disk pub project_id: Uuid, + + // DiskRuntimeState + /// runtime state of the Disk + pub disk_state: String, + pub attach_instance_id: Option, + /// generation number for this state + #[column_name = "state_generation"] + pub gen: Generation, + /// timestamp for this information + #[column_name = "time_state_updated"] + pub time_updated: DateTime, + + /// size of the Disk + #[column_name = "size_bytes"] + pub size: ByteCount, /// id for the snapshot from which this Disk was created (None means a blank /// disk) + #[column_name = "origin_snapshot"] pub create_snapshot_id: Option, - /// size of the Disk - pub size: ByteCount, - /// runtime state of the Disk - pub runtime: DiskRuntimeState, } impl Disk { @@ -403,12 +462,71 @@ impl Disk { params: external::DiskCreateParams, runtime_initial: DiskRuntimeState, ) -> Self { + let identity = IdentityMetadata::new(disk_id, params.identity); Self { - identity: IdentityMetadata::new(disk_id, params.identity), + id: identity.id, + name: identity.name, + description: identity.description, + time_created: identity.time_created, + time_modified: identity.time_modified, + time_deleted: identity.time_deleted, + project_id, - create_snapshot_id: params.snapshot_id, + + disk_state: runtime_initial.disk_state, + attach_instance_id: runtime_initial.attach_instance_id, + gen: runtime_initial.gen, + time_updated: runtime_initial.time_updated, + size: params.size, - runtime: runtime_initial, + create_snapshot_id: params.snapshot_id, + } + } + + pub fn identity(&self) -> IdentityMetadata { + IdentityMetadata { + id: self.id, + name: self.name.clone(), + description: self.description.clone(), + time_created: self.time_created, + time_modified: self.time_modified, + time_deleted: self.time_deleted, + } + } + + pub fn state(&self) -> DiskState { + // TODO: If we could store disk state in-line, we could avoid the + // unwrap. Would prefer to parse it as such. + // + // TODO: also impl'd for DiskRuntimeState + DiskState::new( + external::DiskState::try_from(( + self.disk_state.as_str(), + self.attach_instance_id, + )) + .unwrap(), + ) + } + + pub fn runtime(&self) -> DiskRuntimeState { + DiskRuntimeState { + disk_state: self.disk_state.clone(), + attach_instance_id: self.attach_instance_id, + gen: self.gen, + time_updated: self.time_updated, + } + } + + pub fn attachment(&self) -> Option { + if let Some(instance_id) = self.attach_instance_id { + Some(DiskAttachment { + instance_id, + disk_id: self.id, + disk_name: self.name.clone(), + disk_state: self.state().into(), + }) + } else { + None } } } @@ -416,59 +534,76 @@ impl Disk { /// Conversion to the external API type. impl Into for Disk { fn into(self) -> external::Disk { - let device_path = format!("/mnt/{}", self.identity.name.as_str()); + let device_path = format!("/mnt/{}", self.name.as_str()); external::Disk { - identity: self.identity.clone().into(), + identity: self.identity().into(), project_id: self.project_id, snapshot_id: self.create_snapshot_id, size: self.size, - state: self.runtime.disk_state.into(), + state: self.state().into(), device_path, } } } -/// Serialization to the DB. -impl SqlSerialize for Disk { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.identity.sql_serialize(output); - output.set("project_id", &self.project_id); - self.runtime.sql_serialize(output); - output.set("size_bytes", &self.size); - output.set("origin_snapshot", &self.create_snapshot_id); - } -} - -/// Deserialization from the DB. -impl TryFrom<&tokio_postgres::Row> for Disk { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Disk { - identity: IdentityMetadata::try_from(value)?, - project_id: sql_row_value(value, "project_id")?, - create_snapshot_id: sql_row_value(value, "origin_snapshot")?, - size: sql_row_value(value, "size_bytes")?, - runtime: DiskRuntimeState::try_from(value)?, - }) - } -} - -#[derive(Clone, Debug)] +#[derive(AsChangeset, Clone, Debug)] +#[table_name = "disk"] +// When "attach_instance_id" is set to None, we'd like to +// clear it from the DB, rather than ignore the update. +#[changeset_options(treat_none_as_null = "true")] pub struct DiskRuntimeState { /// runtime state of the Disk - pub disk_state: DiskState, + pub disk_state: String, + pub attach_instance_id: Option, /// generation number for this state + #[column_name = "state_generation"] pub gen: Generation, /// timestamp for this information + #[column_name = "time_state_updated"] pub time_updated: DateTime, } +impl DiskRuntimeState { + pub fn new() -> Self { + Self { + disk_state: external::DiskState::Creating.label().to_string(), + attach_instance_id: None, + gen: Generation::new(), + time_updated: Utc::now(), + } + } + + pub fn detach(self) -> Self { + Self { + disk_state: external::DiskState::Detached.label().to_string(), + attach_instance_id: None, + gen: self.gen.next(), + time_updated: Utc::now(), + } + } + + pub fn state(&self) -> DiskState { + // TODO: If we could store disk state in-line, we could avoid the + // unwrap. Would prefer to parse it as such. + DiskState::new( + external::DiskState::try_from(( + self.disk_state.as_str(), + self.attach_instance_id, + )) + .unwrap(), + ) + } +} + /// Conversion from the internal API type. impl From for DiskRuntimeState { fn from(runtime: internal::nexus::DiskRuntimeState) -> Self { Self { - disk_state: runtime.disk_state.into(), + disk_state: runtime.disk_state.label().to_string(), + attach_instance_id: runtime + .disk_state + .attached_instance_id() + .map(|id| *id), gen: runtime.gen, time_updated: runtime.time_updated, } @@ -479,36 +614,14 @@ impl From for DiskRuntimeState { impl Into for DiskRuntimeState { fn into(self) -> internal::nexus::DiskRuntimeState { internal::nexus::DiskRuntimeState { - disk_state: self.disk_state.into(), + disk_state: self.state().into(), gen: self.gen, time_updated: self.time_updated, } } } -/// Serialization to the DB. -impl SqlSerialize for DiskRuntimeState { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.disk_state.sql_serialize(output); - output.set("state_generation", &self.gen); - output.set("time_state_updated", &self.time_updated); - } -} - -/// Deserialization from the DB. -impl TryFrom<&tokio_postgres::Row> for DiskRuntimeState { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(DiskRuntimeState { - disk_state: DiskState::try_from(value)?, - gen: sql_row_value(value, "state_generation")?, - time_updated: sql_row_value(value, "time_state_updated")?, - }) - } -} - -#[derive(Clone, Debug)] +#[derive(Clone, Debug, AsExpression)] pub struct DiskState(external::DiskState); impl DiskState { @@ -542,72 +655,24 @@ impl Into for DiskState { } } -/// Serialization to the DB. -impl SqlSerialize for DiskState { - fn sql_serialize(&self, output: &mut SqlValueSet) { - let attach_id = &self.0.attached_instance_id().map(|id| *id); - output.set("attach_instance_id", attach_id); - output.set("disk_state", &self.0.label()); - } -} - -/// Deserialization from the DB. -impl TryFrom<&tokio_postgres::Row> for DiskState { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - let disk_state_str: &str = sql_row_value(value, "disk_state")?; - let instance_uuid: Option = - sql_row_value(value, "attach_instance_id")?; - Ok(DiskState( - external::DiskState::try_from((disk_state_str, instance_uuid)) - .map_err(|e| Error::internal_error(&e))?, - )) - } -} - -#[derive(Clone, Debug)] -pub struct DiskAttachment { - pub instance_id: Uuid, - pub disk_id: Uuid, - pub disk_name: external::Name, - pub disk_state: DiskState, -} - -impl Into for DiskAttachment { - fn into(self) -> external::DiskAttachment { - external::DiskAttachment { - instance_id: self.instance_id, - disk_id: self.disk_id, - disk_name: self.disk_name, - disk_state: self.disk_state.into(), - } - } -} - -impl TryFrom<&tokio_postgres::Row> for DiskAttachment { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(DiskAttachment { - instance_id: sql_row_value(value, "attach_instance_id")?, - disk_id: sql_row_value(value, "id")?, - disk_name: sql_row_value(value, "name")?, - disk_state: DiskState::try_from(value)?, - }) - } -} +/// Type which describes the attachment status of a disk. +/// +/// This happens to be the same as the type in the external API, +/// but it is not required to be. +pub type DiskAttachment = external::DiskAttachment; /// Information announced by a metric server, used so that clients can contact it and collect /// available metric data from it. -#[derive(Debug, Clone)] +#[derive(Queryable, Identifiable, Insertable, Debug, Clone)] +#[table_name = "metricproducer"] pub struct ProducerEndpoint { pub id: Uuid, pub time_created: DateTime, pub time_modified: DateTime, - pub address: SocketAddr, + pub ip: ipnetwork::IpNetwork, + pub port: i32, + pub interval: f64, pub base_route: String, - pub interval: Duration, } impl ProducerEndpoint { @@ -617,9 +682,10 @@ impl ProducerEndpoint { id: endpoint.id, time_created: now, time_modified: now, - address: endpoint.address, + ip: endpoint.address.ip().into(), + port: endpoint.address.port().into(), base_route: endpoint.base_route.clone(), - interval: endpoint.interval, + interval: endpoint.interval.as_secs_f64(), } } @@ -629,120 +695,59 @@ impl ProducerEndpoint { } } -impl SqlSerialize for ProducerEndpoint { - fn sql_serialize(&self, output: &mut SqlValueSet) { - output.set("id", &self.id); - output.set("time_created", &self.time_created); - output.set("time_modified", &self.time_modified); - output.set("ip", &self.address.ip()); - output.set("port", &i32::from(self.address.port())); - output.set("interval", &self.interval.as_secs_f64()); - output.set("route", &self.base_route); - } -} - -impl TryFrom<&tokio_postgres::Row> for ProducerEndpoint { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - let id: Uuid = sql_row_value(value, "id")?; - let time_created: DateTime = sql_row_value(value, "time_created")?; - let time_modified: DateTime = - sql_row_value(value, "time_modified")?; - let ip: IpAddr = sql_row_value(value, "ip")?; - let port: i32 = sql_row_value(value, "port")?; - let address = SocketAddr::new(ip, port as _); - let base_route: String = sql_row_value(value, "route")?; - let interval = - Duration::from_secs_f64(sql_row_value(value, "interval")?); - Ok(Self { - id, - time_created, - time_modified, - address, - base_route, - interval, - }) - } -} - /// Message used to notify Nexus that this oximeter instance is up and running. -#[derive(Debug, Clone, Copy)] +#[derive(Queryable, Identifiable, Insertable, Debug, Clone, Copy)] +#[table_name = "oximeter"] pub struct OximeterInfo { /// The ID for this oximeter instance. - pub collector_id: Uuid, + pub id: Uuid, /// When this resource was created. pub time_created: DateTime, /// When this resource was last modified. pub time_modified: DateTime, /// The address on which this oximeter instance listens for requests - pub address: SocketAddr, + pub ip: ipnetwork::IpNetwork, + pub port: i32, } impl OximeterInfo { pub fn new(info: &internal::nexus::OximeterInfo) -> Self { let now = Utc::now(); Self { - collector_id: info.collector_id, + id: info.collector_id, time_created: now, time_modified: now, - address: info.address, + ip: info.address.ip().into(), + port: info.address.port().into(), } } } -impl SqlSerialize for OximeterInfo { - fn sql_serialize(&self, output: &mut SqlValueSet) { - output.set("id", &self.collector_id); - output.set("time_created", &self.time_created); - output.set("time_modified", &self.time_modified); - output.set("ip", &self.address.ip()); - output.set("port", &i32::from(self.address.port())); - } -} - -impl TryFrom<&tokio_postgres::Row> for OximeterInfo { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - let collector_id: Uuid = sql_row_value(value, "id")?; - let ip: IpAddr = sql_row_value(value, "ip")?; - let port: i32 = sql_row_value(value, "port")?; - let time_created: DateTime = sql_row_value(value, "time_created")?; - let time_modified: DateTime = - sql_row_value(value, "time_modified")?; - let address = SocketAddr::new(ip, port as _); - Ok(Self { collector_id, time_created, time_modified, address }) - } -} - /// An assignment of an Oximeter instance to a metric producer for collection. -#[derive(Debug, Clone, Copy)] +#[derive(Queryable, Insertable, Debug, Clone, Copy)] +#[table_name = "oximeterassignment"] pub struct OximeterAssignment { pub oximeter_id: Uuid, pub producer_id: Uuid, + pub time_created: DateTime, } -impl SqlSerialize for OximeterAssignment { - fn sql_serialize(&self, output: &mut SqlValueSet) { - output.set("oximeter_id", &self.oximeter_id); - output.set("producer_id", &self.producer_id); +impl OximeterAssignment { + pub fn new(oximeter_id: Uuid, producer_id: Uuid) -> Self { + Self { oximeter_id, producer_id, time_created: Utc::now() } } } -impl TryFrom<&tokio_postgres::Row> for OximeterAssignment { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - let oximeter_id: Uuid = sql_row_value(value, "oximeter_id")?; - let producer_id: Uuid = sql_row_value(value, "producer_id")?; - Ok(Self { oximeter_id, producer_id }) - } -} - -#[derive(Clone, Debug)] +#[derive(Queryable, Identifiable, Insertable, Clone, Debug)] +#[table_name = "vpc"] pub struct Vpc { - pub identity: IdentityMetadata, + pub id: Uuid, + pub name: external::Name, + pub description: String, + pub time_created: DateTime, + pub time_modified: DateTime, + pub time_deleted: Option>, + pub project_id: Uuid, pub dns_name: external::Name, } @@ -753,18 +758,36 @@ impl Vpc { project_id: Uuid, params: external::VpcCreateParams, ) -> Self { + let identity = IdentityMetadata::new(vpc_id, params.identity); Self { - identity: IdentityMetadata::new(vpc_id, params.identity), + id: identity.id, + name: identity.name, + description: identity.description, + time_created: identity.time_created, + time_modified: identity.time_modified, + time_deleted: identity.time_deleted, + project_id, dns_name: params.dns_name, } } + + pub fn identity(&self) -> IdentityMetadata { + IdentityMetadata { + id: self.id, + name: self.name.clone(), + description: self.description.clone(), + time_created: self.time_created, + time_modified: self.time_modified, + time_deleted: self.time_deleted, + } + } } impl Into for Vpc { fn into(self) -> external::Vpc { external::Vpc { - identity: self.identity.into(), + identity: self.identity().into(), project_id: self.project_id, dns_name: self.dns_name, // VPC subnets are accessed through a separate row lookup. @@ -773,66 +796,79 @@ impl Into for Vpc { } } -impl TryFrom<&tokio_postgres::Row> for Vpc { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Self { - identity: IdentityMetadata::try_from(value)?, - project_id: sql_row_value(value, "project_id")?, - dns_name: sql_row_value(value, "dns_name")?, - }) - } +#[derive(AsChangeset)] +#[table_name = "vpc"] +pub struct VpcUpdate { + pub name: Option, + pub description: Option, + pub time_modified: DateTime, + pub dns_name: Option, } -impl SqlSerialize for Vpc { - fn sql_serialize(&self, output: &mut SqlValueSet) { - self.identity.sql_serialize(output); - output.set("project_id", &self.project_id); - output.set("dns_name", &self.dns_name); +impl From for VpcUpdate { + fn from(params: external::VpcUpdateParams) -> Self { + Self { + name: params.identity.name, + description: params.identity.description, + time_modified: Utc::now(), + dns_name: params.dns_name, + } } } -#[derive(Clone, Debug)] +#[derive(Queryable, Identifiable, Insertable, Clone, Debug)] +#[table_name = "vpcsubnet"] pub struct VpcSubnet { - pub identity: IdentityMetadata, + pub id: Uuid, + pub name: external::Name, + pub description: String, + pub time_created: DateTime, + pub time_modified: DateTime, + pub time_deleted: Option>, + pub vpc_id: Uuid, - pub ipv4_block: Option, - pub ipv6_block: Option, + pub ipv4_block: Option, + pub ipv6_block: Option, } -impl TryFrom<&tokio_postgres::Row> for VpcSubnet { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Self { - identity: IdentityMetadata::try_from(value)?, - vpc_id: sql_row_value(value, "vpc_id")?, - ipv4_block: sql_row_value(value, "ipv4_block")?, - ipv6_block: sql_row_value(value, "ipv6_block")?, - }) +impl VpcSubnet { + pub fn identity(&self) -> IdentityMetadata { + IdentityMetadata { + id: self.id, + name: self.name.clone(), + description: self.description.clone(), + time_created: self.time_created, + time_modified: self.time_modified, + time_deleted: self.time_deleted, + } } } -#[derive(Clone, Debug)] +#[derive(Queryable, Identifiable, Insertable, Clone, Debug)] +#[table_name = "networkinterface"] pub struct NetworkInterface { - pub identity: IdentityMetadata, + pub id: Uuid, + pub name: external::Name, + pub description: String, + pub time_created: DateTime, + pub time_modified: DateTime, + pub time_deleted: Option>, + pub vpc_id: Uuid, pub subnet_id: Uuid, pub mac: external::MacAddr, - pub ip: IpAddr, + pub ip: ipnetwork::IpNetwork, } -impl TryFrom<&tokio_postgres::Row> for NetworkInterface { - type Error = Error; - - fn try_from(value: &tokio_postgres::Row) -> Result { - Ok(Self { - identity: IdentityMetadata::try_from(value)?, - vpc_id: sql_row_value(value, "vpc_id")?, - subnet_id: sql_row_value(value, "subnet_id")?, - mac: sql_row_value(value, "mac")?, - ip: sql_row_value(value, "ip")?, - }) +impl NetworkInterface { + pub fn identity(&self) -> IdentityMetadata { + IdentityMetadata { + id: self.id, + name: self.name.clone(), + description: self.description.clone(), + time_created: self.time_created, + time_modified: self.time_modified, + time_deleted: self.time_deleted, + } } } diff --git a/omicron-nexus/src/db/operations.rs b/omicron-nexus/src/db/operations.rs deleted file mode 100644 index 14cc7a8ceb..0000000000 --- a/omicron-nexus/src/db/operations.rs +++ /dev/null @@ -1,125 +0,0 @@ -/*! - * Low-level facilities that work directly with tokio_postgres to run queries - * and extract values - */ - -use omicron_common::api::external::Error; -use omicron_common::db::sql_error_generic; -use omicron_common::db::DbError; -use std::convert::TryFrom; -use tokio_postgres::types::ToSql; - -/** - * Wrapper around [`tokio_postgres::Client::query`] that produces errors - * that include the SQL. - */ -/* - * TODO-debugging It would be really, really valuable to include the - * query parameters here as well. However, the only thing we can do with - * ToSql is to serialize it to a PostgreSQL type (as a string of bytes). - * We could require that these impl some trait that also provides Debug, and - * then we could use that. Or we could find a way to parse the resulting - * PostgreSQL value and print _that_ out as a String somehow? - */ -pub async fn sql_query( - client: &tokio_postgres::Client, - sql: &str, - params: &[&(dyn ToSql + Sync)], -) -> Result, DbError> { - client - .query(sql, params) - .await - .map_err(|e| DbError::SqlError { sql: sql.to_owned(), source: e }) -} - -/** - * Like [`sql_query()`], but produces an error unless exactly one row is - * returned. - */ -pub async fn sql_query_always_one( - client: &tokio_postgres::Client, - sql: &str, - params: &[&(dyn ToSql + Sync)], -) -> Result { - sql_query(client, sql, params).await.and_then(|mut rows| match rows.len() { - 1 => Ok(rows.pop().unwrap()), - nrows_found => Err(DbError::BadRowCount { - sql: sql.to_owned(), - nrows_found: u64::try_from(nrows_found).unwrap(), - }), - }) -} - -/** - * Like [`sql_query()`], but produces an error based on the row count: - * - * * the result of `mkzerror()` if there are no rows returned. This is - * expected to be a suitable [`Error::ObjectNotFound`] error. - * * a generic InternalError if more than one row is returned. This is - * expected to be impossible for this query. Otherwise, the caller should use - * [`sql_query()`]. - */ -/* - * TODO-debugging can we include the SQL in the Error - */ -pub async fn sql_query_maybe_one( - client: &tokio_postgres::Client, - sql: &str, - params: &[&(dyn ToSql + Sync)], - mkzerror: impl Fn() -> Error, -) -> Result { - sql_query(client, sql, params).await.map_err(sql_error_generic).and_then( - |mut rows| match rows.len() { - 1 => Ok(rows.pop().unwrap()), - 0 => Err(mkzerror()), - nrows_found => Err(sql_error_generic(DbError::BadRowCount { - sql: sql.to_owned(), - nrows_found: u64::try_from(nrows_found).unwrap(), - })), - }, - ) -} - -/** - * Wrapper around [`tokio_postgres::Client::execute`] that produces errors - * that include the SQL. - */ -/* TODO-debugging See sql_query(). */ -pub async fn sql_execute( - client: &tokio_postgres::Client, - sql: &str, - params: &[&(dyn ToSql + Sync)], -) -> Result { - client - .execute(sql, params) - .await - .map_err(|e| DbError::SqlError { sql: sql.to_owned(), source: e }) -} - -/** - * Like [`sql_execute()`], but produces an error based on the row count: - * - * * the result of `mkzerror()` if there are no rows returned. This is - * expected to be a suitable [`Error::ObjectNotFound`] error. - * * a generic InternalError if more than one row is returned. This is - * expected to be impossible for this query. Otherwise, the caller should use - * [`sql_query()`]. - */ -/* TODO-debugging can we include the SQL in the Error */ -pub async fn sql_execute_maybe_one( - client: &tokio_postgres::Client, - sql: &str, - params: &[&(dyn ToSql + Sync)], - mkzerror: impl Fn() -> Error, -) -> Result<(), Error> { - sql_execute(client, sql, params).await.map_err(sql_error_generic).and_then( - |nrows| match nrows { - 1 => Ok(()), - 0 => Err(mkzerror()), - nrows_found => Err(sql_error_generic(DbError::BadRowCount { - sql: sql.to_owned(), - nrows_found, - })), - }, - ) -} diff --git a/omicron-nexus/src/db/pagination.rs b/omicron-nexus/src/db/pagination.rs new file mode 100644 index 0000000000..3ec9400217 --- /dev/null +++ b/omicron-nexus/src/db/pagination.rs @@ -0,0 +1,59 @@ +//! Interface for paginating database queries. + +use diesel::dsl::{Asc, Desc, Gt, Lt}; +use diesel::expression::AsExpression; +use diesel::pg::Pg; +use diesel::query_builder::AsQuery; +use diesel::query_builder::BoxedSelectStatement; +use diesel::query_dsl::methods as query_methods; +use diesel::sql_types::SqlType; +use diesel::AppearsOnTable; +use diesel::Column; +use diesel::{ExpressionMethods, QueryDsl}; +use omicron_common::api::external::DataPageParams; + +// Shorthand alias for "the SQL type of the whole table". +type TableSqlType = ::SqlType; + +// Shorthand alias for the type made from "table.into_boxed()". +type BoxedQuery = BoxedSelectStatement<'static, TableSqlType, T, Pg>; + +/// Uses `pagparams` to list a subset of rows in `table`, ordered by `column`. +pub fn paginated( + table: T, + column: C, + pagparams: &DataPageParams<'_, M>, +) -> BoxedQuery +where + // T is a table which can create a BoxedQuery. + T: diesel::Table, + T: query_methods::BoxedDsl<'static, Pg, Output = BoxedQuery>, + // C is a column which appears in T. + C: 'static + Column + Copy + ExpressionMethods + AppearsOnTable, + // Required to compare the column with the marker type. + C::SqlType: SqlType, + M: Clone + AsExpression, + // Defines the methods which can be called on "query", and tells + // the compiler we're gonna output a BoxedQuery each time. + BoxedQuery: query_methods::OrderDsl, Output = BoxedQuery>, + BoxedQuery: query_methods::OrderDsl, Output = BoxedQuery>, + BoxedQuery: query_methods::FilterDsl, Output = BoxedQuery>, + BoxedQuery: query_methods::FilterDsl, Output = BoxedQuery>, +{ + let mut query = table.into_boxed().limit(pagparams.limit.get().into()); + let marker = pagparams.marker.map(|m| m.clone()); + match pagparams.direction { + dropshot::PaginationOrder::Ascending => { + if let Some(marker) = marker { + query = query.filter(column.gt(marker)); + } + query.order(column.asc()) + } + dropshot::PaginationOrder::Descending => { + if let Some(marker) = marker { + query = query.filter(column.lt(marker)); + } + query.order(column.desc()) + } + } +} diff --git a/omicron-nexus/src/db/pool.rs b/omicron-nexus/src/db/pool.rs index f66ac5fb18..3739b89cbe 100644 --- a/omicron-nexus/src/db/pool.rs +++ b/omicron-nexus/src/db/pool.rs @@ -25,56 +25,28 @@ */ use super::Config as DbConfig; -use bb8_postgres::PostgresConnectionManager; -use omicron_common::api::external::Error; -use std::ops::Deref; +use async_bb8_diesel::DieselConnectionManager; +use diesel::PgConnection; -#[derive(Debug)] +/// Wrapper around a database connection pool. +/// +/// Expected to be used as the primary interface to the database. pub struct Pool { - pool: bb8::Pool>, -} - -pub struct Conn<'a> { - conn: bb8::PooledConnection< - 'a, - PostgresConnectionManager, - >, -} - -impl<'a> Deref for Conn<'a> { - type Target = tokio_postgres::Client; - - fn deref(&self) -> &Self::Target { - self.conn.deref() - } + pool: bb8::Pool>, } impl Pool { pub fn new(db_config: &DbConfig) -> Self { - let mgr = bb8_postgres::PostgresConnectionManager::new( - (*db_config.url).clone(), - tokio_postgres::NoTls, - ); - let pool = bb8::Builder::new().build_unchecked(mgr); + let manager = + DieselConnectionManager::::new(&db_config.url.url()); + let pool = bb8::Builder::new().build_unchecked(manager); Pool { pool } } - pub async fn acquire(&self) -> Result, Error> { - /* - * TODO-design It would be better to provide more detailed error - * information here so that we could monitor various kinds of failures. - * It would also be nice if callers could provide parameters for, e.g., - * how long to wait for a connection here. Really, it would be nice if - * they could create their own handles to the connection pool with - * parameters like this. It could also have its own logger. - */ - self.pool.get().await.map(|conn| Conn { conn }).map_err(|e| { - Error::ServiceUnavailable { - message: format!( - "failed to acquire database connection: {}", - e.to_string() - ), - } - }) + /// Returns a reference to the underlying pool. + pub fn pool( + &self, + ) -> &bb8::Pool> { + &self.pool } } diff --git a/omicron-nexus/src/db/saga_recovery.rs b/omicron-nexus/src/db/saga_recovery.rs index edc24852ac..5c505ccec8 100644 --- a/omicron-nexus/src/db/saga_recovery.rs +++ b/omicron-nexus/src/db/saga_recovery.rs @@ -3,14 +3,16 @@ */ use crate::db; -use crate::db::schema; -use crate::db::sql::Table; -use crate::db::sql_operations::sql_paginate; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::{ExpressionMethods, QueryDsl}; use omicron_common::api::external::Error; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; use omicron_common::backoff::internal_service_policy; use omicron_common::backoff::retry_notify; use omicron_common::backoff::BackoffError; use std::collections::BTreeMap; +use std::convert::TryFrom; use std::fmt; use std::sync::Arc; use steno::SagaTemplateGeneric; @@ -96,7 +98,7 @@ where * before we go back to one that's failed. */ /* TODO-debug want visibility into "abandoned" sagas */ - let saga_id = saga.id; + let saga_id: steno::SagaId = saga.id.into(); if let Err(error) = recover_saga(&log, &uctx, &pool, &sec_client, templates, saga) .await @@ -128,20 +130,29 @@ async fn list_unfinished_sagas( * step, not recovering all the individual sagas. */ trace!(&log, "listing sagas"); - let client = pool.acquire().await?; + use db::diesel_schema::saga::dsl; - let mut sql = db::sql::SqlString::new(); - let saga_state_done = steno::SagaCachedState::Done.to_string(); - let p1 = sql.next_param(&saga_state_done); - let p2 = sql.next_param(sec_id); - sql.push_str(&format!("saga_state != {} AND current_sec = {}", p1, p2)); - - sql_paginate::< - schema::LookupGenericByUniqueId, - schema::Saga, - ::Model, - >(&client, (), schema::Saga::ALL_COLUMNS, sql) - .await + // TODO(diesel-conversion): Do we want this to be paginated? + // In the pre-diesel era, this method read all relevant rows from + // the database - as we do here - but did so in chunks of 100 at a time, + // looping until all rows were loaded. + // + // It's not clear to me why this happened, as it wasn't a memory-saving + // measure - ultimately all rows were stored in a Vec as the return value. + // + // TODO: See load-saga-log below; a similar call was made there. + dsl::saga + .filter(dsl::saga_state.ne(steno::SagaCachedState::Done.to_string())) + .filter(dsl::current_sec.eq(*sec_id)) + .load_async(pool.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::SagaDbg, + LookupType::ById(sec_id.0), + ) + }) } /** @@ -160,30 +171,30 @@ async fn recover_saga( where T: Send + Sync + fmt::Debug + 'static, { - let saga_id = saga.id; + let saga_id: steno::SagaId = saga.id.into(); let template_name = saga.template_name.as_str(); trace!(log, "recovering saga: start"; - "saga_id" => saga.id.to_string(), + "saga_id" => saga_id.to_string(), "template_name" => template_name, ); let template = templates.get(template_name).ok_or_else(|| { Error::internal_error(&format!( "saga {} uses unknown template {:?}", - saga.id, template_name, + saga_id, template_name, )) })?; trace!(log, "recovering saga: found template"; - "saga_id" => ?saga.id, + "saga_id" => ?saga_id, "template_name" => template_name ); let log_events = load_saga_log(pool, &saga).await?; trace!(log, "recovering saga: loaded log"; - "saga_id" => ?saga.id, + "saga_id" => ?saga_id, "template_name" => template_name ); let _ = sec_client .saga_resume( - saga.id, + saga_id, Arc::clone(uctx), Arc::clone(template), saga.template_name, @@ -204,7 +215,7 @@ where sec_client.saga_start(saga_id).await.map_err(|error| { Error::internal_error(&format!("failed to start saga: {:#}", error)) })?; - info!(log, "recovering saga: done"; "saga_id" => ?saga.id); + info!(log, "recovering saga: done"; "saga_id" => ?saga_id); Ok(()) } @@ -215,24 +226,24 @@ pub async fn load_saga_log( pool: &db::Pool, saga: &db::saga_types::Saga, ) -> Result, Error> { - let client = pool.acquire().await?; - let mut extra_sql = db::sql::SqlString::new(); - extra_sql.push_str("TRUE"); + use db::diesel_schema::saganodeevent::dsl; - let log_records = sql_paginate::< - schema::LookupSagaNodeEvent, - schema::SagaNodeEvent, - ::Model, - >( - &client, - (&saga.id.0,), - schema::SagaNodeEvent::ALL_COLUMNS, - extra_sql, - ) - .await?; + // TODO(diesel-conversion): See the note above in list_unfinished_sagas + // regarding pagination. + let log_records: Vec = dsl::saganodeevent + .filter(dsl::saga_id.eq(saga.id)) + .load_async::(pool.pool()) + .await + .map_err(|e| { + Error::from_diesel( + e, + ResourceType::SagaDbg, + LookupType::ById(saga.id.0 .0), + ) + })? + .into_iter() + .map(|db_event| steno::SagaNodeEvent::try_from(db_event)) + .collect::>()?; - Ok(log_records - .iter() - .map(steno::SagaNodeEvent::from) - .collect::>()) + Ok(log_records) } diff --git a/omicron-nexus/src/db/saga_types.rs b/omicron-nexus/src/db/saga_types.rs index 0a3be7dd98..930a7e6579 100644 --- a/omicron-nexus/src/db/saga_types.rs +++ b/omicron-nexus/src/db/saga_types.rs @@ -10,14 +10,15 @@ * conversions. */ -use crate::db; +use super::diesel_schema::{saga, saganodeevent}; +use diesel::backend::{Backend, RawValue}; +use diesel::deserialize::{self, FromSql}; +use diesel::serialize::{self, ToSql}; +use diesel::sql_types; use omicron_common::api::external::Error; use omicron_common::api::external::Generation; -use omicron_common::db::sql_row_value; -use omicron_common::impl_sql_wrapping; use std::convert::TryFrom; use std::sync::Arc; -use steno::SagaId; use uuid::Uuid; /** @@ -26,9 +27,35 @@ use uuid::Uuid; * For us, these will generally be Nexus instances, and the SEC id will match * the Nexus id. */ -#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)] -pub struct SecId(Uuid); -impl_sql_wrapping!(SecId, Uuid); +#[derive( + AsExpression, FromSqlRow, Clone, Copy, Eq, Ord, PartialEq, PartialOrd, +)] +#[sql_type = "sql_types::Uuid"] +pub struct SecId(pub Uuid); + +impl ToSql for SecId +where + DB: Backend, + Uuid: ToSql, +{ + fn to_sql( + &self, + out: &mut serialize::Output, + ) -> serialize::Result { + (&self.0 as &Uuid).to_sql(out) + } +} + +impl FromSql for SecId +where + DB: Backend, + Uuid: FromSql, +{ + fn from_sql(bytes: RawValue) -> deserialize::Result { + let id = Uuid::from_sql(bytes)?; + Ok(SecId(id)) + } +} // TODO-cleanup figure out how to use custom_derive here? NewtypeDebug! { () pub struct SecId(Uuid); } @@ -41,83 +68,176 @@ impl From<&SecId> for Uuid { } } -/** - * Represents a row in the "Saga" table - */ +/// Newtype wrapper around [`steno::SagaId`] which implements +/// Diesel traits. +/// +/// This exists because Omicron cannot implement foreign traits +/// for foreign types. +#[derive( + AsExpression, Copy, Clone, Debug, FromSqlRow, PartialEq, PartialOrd, +)] +#[sql_type = "sql_types::Uuid"] +pub struct SagaId(pub steno::SagaId); + +NewtypeFrom! { () pub struct SagaId(steno::SagaId); } + +impl ToSql for SagaId +where + DB: Backend, + Uuid: ToSql, +{ + fn to_sql( + &self, + out: &mut serialize::Output<'_, W, DB>, + ) -> serialize::Result { + (&self.0.into() as &Uuid).to_sql(out) + } +} + +impl FromSql for SagaId +where + DB: Backend, + Uuid: FromSql, +{ + fn from_sql(bytes: RawValue<'_, DB>) -> deserialize::Result { + let id = Uuid::from_sql(bytes)?; + Ok(SagaId(steno::SagaId::from(id))) + } +} + +/// Newtype wrapper around [`steno::SagaNodeId`] which implements +/// Diesel traits. +/// +/// This exists because Omicron cannot implement foreign traits +/// for foreign types. +#[derive( + AsExpression, Copy, Clone, Debug, FromSqlRow, PartialEq, PartialOrd, +)] +#[sql_type = "sql_types::BigInt"] +pub struct SagaNodeId(pub steno::SagaNodeId); + +NewtypeFrom! { () pub struct SagaNodeId(steno::SagaNodeId); } + +impl ToSql for SagaNodeId +where + DB: Backend, + i64: ToSql, +{ + fn to_sql( + &self, + out: &mut serialize::Output<'_, W, DB>, + ) -> serialize::Result { + // Diesel newtype -> steno type -> u32 -> i64 -> SQL + (u32::from(self.0) as i64).to_sql(out) + } +} + +impl FromSql for SagaNodeId +where + DB: Backend, + i64: FromSql, +{ + fn from_sql(bytes: RawValue<'_, DB>) -> deserialize::Result { + let id = u32::try_from(i64::from_sql(bytes)?)?; + Ok(SagaNodeId(id.into())) + } +} + +/// Newtype wrapper around [`steno::SagaCachedState`] which implements +/// Diesel traits. +/// +/// This exists because Omicron cannot implement foreign traits +/// for foreign types. +#[derive(AsExpression, FromSqlRow, Clone, Copy, Debug, PartialEq)] +#[sql_type = "sql_types::Text"] +pub struct SagaCachedState(pub steno::SagaCachedState); + +NewtypeFrom! { () pub struct SagaCachedState(steno::SagaCachedState); } + +impl ToSql for SagaCachedState +where + DB: Backend, + String: ToSql, +{ + fn to_sql( + &self, + out: &mut serialize::Output<'_, W, DB>, + ) -> serialize::Result { + (&self.0.to_string() as &String).to_sql(out) + } +} + +impl FromSql for SagaCachedState +where + DB: Backend, + String: FromSql, +{ + fn from_sql(bytes: RawValue<'_, DB>) -> deserialize::Result { + let s = String::from_sql(bytes)?; + let state = steno::SagaCachedState::try_from(s.as_str())?; + Ok(Self(state)) + } +} + +/// Represents a row in the "Saga" table +#[derive(Queryable, Insertable, Clone, Debug)] +#[table_name = "saga"] pub struct Saga { pub id: SagaId, pub creator: SecId, pub template_name: String, pub time_created: chrono::DateTime, pub saga_params: serde_json::Value, - pub saga_state: steno::SagaCachedState, + pub saga_state: SagaCachedState, pub current_sec: Option, pub adopt_generation: Generation, pub adopt_time: chrono::DateTime, } -impl TryFrom<&tokio_postgres::Row> for Saga { - type Error = Error; - - fn try_from(row: &tokio_postgres::Row) -> Result { - let saga_state_str: String = sql_row_value(row, "saga_state")?; - let saga_state = steno::SagaCachedState::try_from( - saga_state_str.as_str(), - ) - .map_err(|e| { - Error::internal_error(&format!( - "failed to parse saga state {:?}: {:#}", - saga_state_str, e - )) - })?; - Ok(Saga { - id: SagaId(sql_row_value::<_, Uuid>(row, "id")?), - creator: sql_row_value(row, "creator")?, - template_name: sql_row_value(row, "template_name")?, - time_created: sql_row_value(row, "time_created")?, - saga_params: sql_row_value(row, "saga_params")?, - saga_state: saga_state, - current_sec: sql_row_value(row, "current_sec")?, - adopt_generation: sql_row_value(row, "adopt_generation")?, - adopt_time: sql_row_value(row, "adopt_time")?, - }) - } -} - -impl db::sql::SqlSerialize for Saga { - fn sql_serialize(&self, output: &mut db::SqlValueSet) { - output.set("id", &self.id.0); - output.set("creator", &self.creator); - output.set("template_name", &self.template_name); - output.set("time_created", &self.time_created); - output.set("saga_params", &self.saga_params); - output.set("saga_state", &self.saga_state.to_string()); - output.set("current_sec", &self.current_sec); - output.set("adopt_generation", &self.adopt_generation); - output.set("adopt_time", &self.adopt_time); - } -} - -/** - * Represents a row in the "SagaNodeEvent" table - */ -#[derive(Clone, Debug)] +/// Represents a row in the "SagaNodeEvent" table +#[derive(Queryable, Insertable, Clone, Debug)] +#[table_name = "saganodeevent"] pub struct SagaNodeEvent { pub saga_id: SagaId, - pub node_id: steno::SagaNodeId, - pub event_type: steno::SagaNodeEventType, + pub node_id: SagaNodeId, + pub event_type: String, + pub data: Option, pub event_time: chrono::DateTime, pub creator: SecId, } -impl TryFrom<&tokio_postgres::Row> for SagaNodeEvent { - type Error = Error; +impl SagaNodeEvent { + pub fn new(event: steno::SagaNodeEvent, creator: SecId) -> Self { + let data = match event.event_type { + steno::SagaNodeEventType::Succeeded(ref data) => { + Some((**data).clone()) + } + steno::SagaNodeEventType::Failed(ref err) => { + // It's hard to imagine how this serialize step could fail. If + // we're worried that it could, we could instead store the + // serialized value directly in the `SagaNodeEvent`. We'd be + // forced to construct it in a context where failure could be + // handled. + Some(serde_json::to_value(err).unwrap()) + } + _ => None, + }; - fn try_from(row: &tokio_postgres::Row) -> Result { - let event_data: Option = sql_row_value(row, "data")?; - let event_name: String = sql_row_value(row, "event_type")?; + Self { + saga_id: event.saga_id.into(), + node_id: event.node_id.into(), + event_type: event.event_type.label().to_string(), + data, + event_time: chrono::Utc::now(), + creator, + } + } +} - let event_type = match (event_name.as_str(), event_data) { +impl TryFrom for steno::SagaNodeEvent { + type Error = Error; + fn try_from(ours: SagaNodeEvent) -> Result { + let event_type = match (ours.event_type.as_str(), ours.data) { ("started", None) => steno::SagaNodeEventType::Started, ("succeeded", Some(d)) => { steno::SagaNodeEventType::Succeeded(Arc::new(d)) @@ -143,58 +263,10 @@ impl TryFrom<&tokio_postgres::Row> for SagaNodeEvent { } }; - let node_id_i64: i64 = sql_row_value(row, "node_id")?; - let node_id_u32 = u32::try_from(node_id_i64) - .map_err(|_| Error::internal_error("node id out of range"))?; - let node_id = steno::SagaNodeId::from(node_id_u32); - - Ok(SagaNodeEvent { - saga_id: SagaId(sql_row_value::<_, Uuid>(row, "saga_id")?), - node_id, + Ok(steno::SagaNodeEvent { + saga_id: ours.saga_id.into(), + node_id: ours.node_id.into(), event_type, - event_time: sql_row_value(row, "event_time")?, - creator: sql_row_value(row, "creator")?, }) } } - -impl db::sql::SqlSerialize for SagaNodeEvent { - fn sql_serialize(&self, output: &mut db::SqlValueSet) { - let (event_name, event_data) = match self.event_type { - steno::SagaNodeEventType::Started => ("started", None), - steno::SagaNodeEventType::Succeeded(ref d) => { - ("succeeded", Some((**d).clone())) - } - steno::SagaNodeEventType::Failed(ref d) => { - /* - * It's hard to imagine how this serialize step could fail. If - * we're worried that it could, we could instead store the - * serialized value directly in the `SagaNodeEvent`. We'd be - * forced to construct it in a context where failure could be - * handled. - */ - let json = serde_json::to_value(d).unwrap(); - ("failed", Some(json)) - } - steno::SagaNodeEventType::UndoStarted => ("undo_started", None), - steno::SagaNodeEventType::UndoFinished => ("undo_finished", None), - }; - - output.set("saga_id", &Uuid::from(self.saga_id)); - output.set("node_id", &i64::from(u32::from(self.node_id))); - output.set("event_type", &event_name); - output.set("data", &event_data); - output.set("event_time", &self.event_time); - output.set("creator", &self.creator); - } -} - -impl From<&SagaNodeEvent> for steno::SagaNodeEvent { - fn from(ours: &SagaNodeEvent) -> Self { - steno::SagaNodeEvent { - saga_id: ours.saga_id, - node_id: ours.node_id, - event_type: ours.event_type.clone(), - } - } -} diff --git a/omicron-nexus/src/db/schema.rs b/omicron-nexus/src/db/schema.rs deleted file mode 100644 index e251b7719b..0000000000 --- a/omicron-nexus/src/db/schema.rs +++ /dev/null @@ -1,457 +0,0 @@ -/*! - * Rust types that describe the control plane database schema - * - * This includes the control-plane-specific impls for the generic facilities in - * ./sql.rs. - */ - -use omicron_common::api::external::Error; -use omicron_common::api::external::Name; -use omicron_common::api::external::ResourceType; -use uuid::Uuid; - -use super::sql::LookupKey; -use super::sql::ResourceTable; -use super::sql::Table; -use crate::db; - -// TODO: Now that db/model.rs is a thing, we could actually impl Table -// for all those real structs? Might help to reduce things a bit; the -// schema does use them as Models anyway. - -/** Describes the "Project" table */ -pub struct Project; -impl Table for Project { - type Model = db::model::Project; - const TABLE_NAME: &'static str = "Project"; - const ALL_COLUMNS: &'static [&'static str] = &[ - "id", - "name", - "description", - "time_created", - "time_modified", - "time_deleted", - ]; -} - -impl ResourceTable for Project { - const RESOURCE_TYPE: ResourceType = ResourceType::Project; -} - -/** Describes the "Instance" table */ -pub struct Instance; -impl Table for Instance { - type Model = db::model::Instance; - const TABLE_NAME: &'static str = "Instance"; - const ALL_COLUMNS: &'static [&'static str] = &[ - "id", - "name", - "description", - "time_created", - "time_modified", - "time_deleted", - "project_id", - "instance_state", - "time_state_updated", - "state_generation", - "active_server_id", - "ncpus", - "memory", - "hostname", - ]; -} - -impl ResourceTable for Instance { - const RESOURCE_TYPE: ResourceType = ResourceType::Instance; -} - -/** Describes the "Disk" table */ -pub struct Disk; -impl Table for Disk { - type Model = db::model::Disk; - const TABLE_NAME: &'static str = "Disk"; - const ALL_COLUMNS: &'static [&'static str] = &[ - "id", - "name", - "description", - "time_created", - "time_modified", - "time_deleted", - "project_id", - "disk_state", - "time_state_updated", - "state_generation", - "attach_instance_id", - "size_bytes", - "origin_snapshot", - ]; -} - -impl ResourceTable for Disk { - const RESOURCE_TYPE: ResourceType = ResourceType::Disk; -} - -/** Describes the "Saga" table */ -pub struct Saga; -impl Table for Saga { - type Model = db::saga_types::Saga; - const TABLE_NAME: &'static str = "Saga"; - const ALL_COLUMNS: &'static [&'static str] = &[ - "id", - "creator", - "template_name", - "time_created", - "saga_params", - "saga_state", - "current_sec", - "adopt_generation", - "adopt_time", - ]; - const LIVE_CONDITIONS: &'static str = "TRUE"; -} - -/** Describes the "SagaNodeEvent" table */ -pub struct SagaNodeEvent; -impl Table for SagaNodeEvent { - type Model = db::saga_types::SagaNodeEvent; - const TABLE_NAME: &'static str = "SagaNodeEvent"; - const ALL_COLUMNS: &'static [&'static str] = - &["saga_id", "node_id", "event_type", "data", "event_time", "creator"]; - const LIVE_CONDITIONS: &'static str = "TRUE"; -} - -/** Describes the "Oximeter" table */ -pub struct Oximeter; -impl Table for Oximeter { - type Model = db::model::OximeterInfo; - const TABLE_NAME: &'static str = "Oximeter"; - const ALL_COLUMNS: &'static [&'static str] = - &["id", "time_created", "time_modified", "ip", "port"]; -} - -/** Describes the "MetricProducer" table */ -pub struct MetricProducer; -impl Table for MetricProducer { - type Model = db::model::ProducerEndpoint; - const TABLE_NAME: &'static str = "MetricProducer"; - const ALL_COLUMNS: &'static [&'static str] = &[ - "id", - "time_created", - "time_modified", - "ip", - "port", - "interval", - "route", - ]; -} - -/** Describes the "OximeterAssignment" table */ -pub struct OximeterAssignment; -impl Table for OximeterAssignment { - type Model = db::model::OximeterAssignment; - const TABLE_NAME: &'static str = "OximeterAssignment"; - const ALL_COLUMNS: &'static [&'static str] = - &["oximeter_id", "producer_id", "time_created"]; -} - -/** Describes the "Vpc" table */ -pub struct Vpc; -impl Table for Vpc { - type Model = db::model::Vpc; - const TABLE_NAME: &'static str = "Vpc"; - const ALL_COLUMNS: &'static [&'static str] = &[ - "id", - "name", - "description", - "time_created", - "time_modified", - "time_deleted", - "project_id", - "dns_name", - ]; -} - -impl ResourceTable for Vpc { - const RESOURCE_TYPE: ResourceType = ResourceType::Vpc; -} - -/** Describes the "VpcSubnet" table */ -pub struct VpcSubnet; -impl Table for VpcSubnet { - type Model = db::model::VpcSubnet; - const TABLE_NAME: &'static str = "VpcSubnet"; - const ALL_COLUMNS: &'static [&'static str] = &[ - "id", - "name", - "description", - "time_created", - "time_modified", - "time_deleted", - "vpc_id", - "ipv4_block", - "ipv6_block", - ]; -} - -/** Describes the "NetworkInterface" table */ -pub struct NetworkInterface; -impl Table for NetworkInterface { - type Model = db::model::NetworkInterface; - const TABLE_NAME: &'static str = "NetworkInterface"; - const ALL_COLUMNS: &'static [&'static str] = &[ - "id", - "name", - "description", - "time_created", - "time_modified", - "time_deleted", - "vpc_id", - "subnet_id", - "mac", - "ip", - ]; -} - -#[cfg(test)] -mod test { - use super::Disk; - use super::Instance; - use super::Project; - use super::Saga; - use super::SagaNodeEvent; - use super::Table; - use super::{MetricProducer, Oximeter, OximeterAssignment}; - use super::{NetworkInterface, Vpc, VpcSubnet}; - use omicron_common::dev; - use std::collections::BTreeSet; - use tokio_postgres::types::ToSql; - - /* - * Check the Rust descriptions of the database schema against what's in a - * newly-populated database. This is simplistic for lots of reasons: it - * doesn't account for multiple versions of the schema, schema migrations, - * types, and any number of other complexities. It still seems like a - * useful sanity check. - */ - #[tokio::test] - async fn test_schemas() { - let logctx = dev::test_setup_log("test_schemas").await; - let mut database = dev::test_setup_database(&logctx.log).await; - let client = database - .connect() - .await - .expect("failed to connect to test database"); - - check_table_schema::(&client).await; - check_table_schema::(&client).await; - check_table_schema::(&client).await; - check_table_schema::(&client).await; - check_table_schema::(&client).await; - check_table_schema::(&client).await; - check_table_schema::(&client).await; - check_table_schema::(&client).await; - check_table_schema::(&client).await; - check_table_schema::(&client).await; - check_table_schema::(&client).await; - - database.cleanup().await.expect("failed to clean up database"); - logctx.cleanup_successful(); - } - - async fn check_table_schema(c: &tokio_postgres::Client) { - let sql = "SELECT column_name FROM information_schema.columns \ - WHERE table_catalog = $1 AND lower(table_name) = lower($2)"; - let sql_params: Vec<&(dyn ToSql + Sync)> = - vec![&"omicron", &T::TABLE_NAME]; - let rows = c - .query(sql, &sql_params) - .await - .expect("failed to query information_schema"); - - if rows.is_empty() { - panic!( - "querying information_schema: found no rows \ - (sql = {:?}, $1 = {:?}, $2 = {:?})", - sql, sql_params[0], sql_params[1], - ); - } - - let set_expected = T::ALL_COLUMNS - .iter() - .cloned() - .map(str::to_owned) - .collect::>(); - let expected = - set_expected.iter().cloned().collect::>().join(", "); - let set_found = rows - .iter() - .map(|r| { - r.try_get::<'_, _, String>("column_name") - .expect("missing \"column_name\"") - }) - .collect::>(); - let found = - set_found.iter().cloned().collect::>().join(", "); - let list_missing = set_expected - .difference(&set_found) - .cloned() - .collect::>(); - let list_extra = set_found - .difference(&set_expected) - .cloned() - .collect::>(); - - eprintln!("TABLE: {}", T::TABLE_NAME); - eprintln!("found in database: {}", expected); - eprintln!("found in Rust definition: {}", found); - eprintln!("missing from Rust: {}", list_extra.join(", ")); - eprintln!("missing from database: {}", list_missing.join(", ")); - - if !list_missing.is_empty() || !list_extra.is_empty() { - panic!( - "mismatch between columns in database schema and those defined \ - in Rust code for table {:?}", - T::TABLE_NAME - ); - } else { - eprintln!("TABLE {} Rust and database fields match", T::TABLE_NAME); - } - } -} - -/** - * Implementation of [`LookupKey`] for looking up objects by their universally - * unique id - */ -pub struct LookupByUniqueId; -impl<'a, R: ResourceTable> LookupKey<'a, R> for LookupByUniqueId { - type ScopeKey = (); - const SCOPE_KEY_COLUMN_NAMES: &'static [&'static str] = &[]; - type ItemKey = Uuid; - const ITEM_KEY_COLUMN_NAME: &'static str = "id"; - - fn where_select_error( - _scope_key: Self::ScopeKey, - item_key: &Self::ItemKey, - ) -> Error { - Error::not_found_by_id(R::RESOURCE_TYPE, item_key) - } -} - -/** - * Like [`LookupByUniqueId`], but for objects that are not API resources (so - * that "not found" error is different). - */ -pub struct LookupGenericByUniqueId; -impl<'a, T: Table> LookupKey<'a, T> for LookupGenericByUniqueId { - type ScopeKey = (); - const SCOPE_KEY_COLUMN_NAMES: &'static [&'static str] = &[]; - type ItemKey = Uuid; - const ITEM_KEY_COLUMN_NAME: &'static str = "id"; - - fn where_select_error( - _scope_key: Self::ScopeKey, - item_key: &Self::ItemKey, - ) -> Error { - Error::internal_error(&format!( - "table {:?}: expected row with id {:?}, but found none", - T::TABLE_NAME, - item_key, - )) - } -} - -/** - * Implementation of [`LookupKey`] for looking up objects by just their name - * - * This generally assumes that the name is unique within an instance of the - * control plane. (You could also use this to look up all objects of a certain - * type having a given name, but it's not clear that would be useful.) - */ -pub struct LookupByUniqueName; -impl<'a, R: ResourceTable> LookupKey<'a, R> for LookupByUniqueName { - type ScopeKey = (); - const SCOPE_KEY_COLUMN_NAMES: &'static [&'static str] = &[]; - type ItemKey = Name; - const ITEM_KEY_COLUMN_NAME: &'static str = "name"; - - fn where_select_error( - _scope_key: Self::ScopeKey, - item_key: &Self::ItemKey, - ) -> Error { - Error::not_found_by_name(R::RESOURCE_TYPE, item_key) - } -} - -/** - * Implementation of [`LookupKey`] for looking up objects within a project by - * the project_id and the object's name - */ -pub struct LookupByUniqueNameInProject; -impl<'a, R: ResourceTable> LookupKey<'a, R> for LookupByUniqueNameInProject { - type ScopeKey = (&'a Uuid,); - const SCOPE_KEY_COLUMN_NAMES: &'static [&'static str] = &["project_id"]; - type ItemKey = Name; - const ITEM_KEY_COLUMN_NAME: &'static str = "name"; - - fn where_select_error( - _scope_key: Self::ScopeKey, - item_key: &Self::ItemKey, - ) -> Error { - Error::not_found_by_name(R::RESOURCE_TYPE, item_key) - } -} - -/** - * Implementation of [`LookupKey`] for looking up objects by name within the - * scope of an instance (SQL column "attach_instance_id"). This is really just - * intended for finding attached disks. - */ -pub struct LookupByAttachedInstance; -impl<'a, R: ResourceTable> LookupKey<'a, R> for LookupByAttachedInstance { - /* - * TODO-design What if we want an additional filter here (like disk_state in - * ('attaching', 'attached', 'detaching'))? This would almost work using - * the fixed columns except that we cannot change the operator or supply - * multiple values. - */ - type ScopeKey = (&'a Uuid,); - const SCOPE_KEY_COLUMN_NAMES: &'static [&'static str] = - &["attach_instance_id"]; - type ItemKey = Name; - const ITEM_KEY_COLUMN_NAME: &'static str = "name"; - - fn where_select_error( - _scope_key: Self::ScopeKey, - _item_key: &Self::ItemKey, - ) -> Error { - /* - * This is not a supported API operation, so we do not have an - * appropriate NotFound error. - */ - Error::internal_error("attempted lookup attached instance") - } -} - -/** - * Implementation of [`LookupKey`] specifically for listing pages from the - * "SagaNodeEvent" table. We're always filtering on a specific `saga_id` and - * paginating by `node_id`. - */ -pub struct LookupSagaNodeEvent; -impl<'a> LookupKey<'a, SagaNodeEvent> for LookupSagaNodeEvent { - type ScopeKey = (&'a Uuid,); - const SCOPE_KEY_COLUMN_NAMES: &'static [&'static str] = &["saga_id"]; - type ItemKey = i64; - const ITEM_KEY_COLUMN_NAME: &'static str = "node_id"; - - fn where_select_error( - _scope_key: Self::ScopeKey, - _item_key: &Self::ItemKey, - ) -> Error { - /* There's no reason to ever use this function. */ - Error::internal_error( - "unexpected lookup for saga node unexpectedly found no rows", - ) - } -} diff --git a/omicron-nexus/src/db/sec_store.rs b/omicron-nexus/src/db/sec_store.rs index aa829a85d4..6a80e52f62 100644 --- a/omicron-nexus/src/db/sec_store.rs +++ b/omicron-nexus/src/db/sec_store.rs @@ -50,12 +50,12 @@ impl steno::SecStore for CockroachDbSecStore { let now = chrono::Utc::now(); let saga_record = db::saga_types::Saga { - id: create_params.id, + id: create_params.id.into(), creator: self.sec_id, template_name: create_params.template_name, time_created: now, saga_params: create_params.saga_params, - saga_state: create_params.state, + saga_state: create_params.state.into(), current_sec: Some(self.sec_id), adopt_generation: Generation::new(), adopt_time: now, @@ -73,13 +73,7 @@ impl steno::SecStore for CockroachDbSecStore { "node_id" => ?event.node_id, "event_type" => ?event.event_type, ); - let our_event = db::saga_types::SagaNodeEvent { - saga_id: event.saga_id, - node_id: event.node_id, - event_type: event.event_type, - creator: self.sec_id, - event_time: chrono::Utc::now(), - }; + let our_event = db::saga_types::SagaNodeEvent::new(event, self.sec_id); /* * TODO-robustness This should be wrapped with a retry loop rather than diff --git a/omicron-nexus/src/db/sql.rs b/omicron-nexus/src/db/sql.rs deleted file mode 100644 index 2db47758e2..0000000000 --- a/omicron-nexus/src/db/sql.rs +++ /dev/null @@ -1,679 +0,0 @@ -/*! - * Facilities for working with and generating SQL strings - * - * This is a bit of a mini ORM. The facilities in this file are intended to be - * agnostic to the control plane. There is a bit of leakage in a few places. - */ - -use omicron_common::api::external::DataPageParams; -use omicron_common::api::external::Error; -use omicron_common::api::external::ResourceType; -use std::collections::BTreeSet; -use std::convert::TryFrom; -use tokio_postgres::types::FromSql; -use tokio_postgres::types::ToSql; - -/** - * Interface for constructing SQL query strings and associated parameters for - * handling untrusted input - * - * This interface helps you safely construct SQL strings with untrusted input. - * **But it does not guarantee that the resulting SQL string is safe.** You - * must use `next_param()` for _all_ untrusted input for the resulting SQL - * string to be safe from SQL injection. - * - * ## Example - * - * Here's how you can safely include untrusted input into a SQL string using - * this interface: - * - * ```no_run - * # #[tokio::main] - * # async fn main() { - * # use omicron_nexus::db::sql::SqlString; - * let mut sql = SqlString::new(); - * let param = sql.next_param(&"Robert'); DROP TABLE Students;--"); - * sql.push_str(&format!("SELECT * FROM Students WHERE name = {}", param)); - * assert_eq!(sql.sql_fragment(), "SELECT * FROM Students WHERE name = $1"); - * - * // You can safely use this like so: - * # fn get_client() -> tokio_postgres::Client { unimplemented!(); } - * let client: tokio_postgres::Client = get_client(); - * let rows = client.query(sql.sql_fragment(), sql.sql_params()).await; - * # } - * ``` - */ -/* - * TODO-design Would it be useful (and possible) to have a macro like - * `sql!(sql_str[, param...])` where `sql_str` is `&'static str` and the params - * are all numbered params? - */ -#[derive(Clone)] -pub struct SqlString<'a> { - contents: String, - params: Vec<&'a (dyn ToSql + Sync)>, -} - -impl<'a> SqlString<'a> { - pub fn new() -> SqlString<'a> { - SqlString { contents: String::new(), params: Vec::new() } - } - - /** - * Construct a new SqlString with an empty string, but whose first numbered - * parameter starts after the given string. This is useful for cases where - * one chunk of code has generated a SqlString and another wants to insert - * that string (and parameters) into a new one. Since the parameter numbers - * are baked into the SQL text, the only way this can work is if the - * existing SqlString's parameters come first. The caller is responsible - * for joining the SQL text. - */ - // TODO-coverage - pub fn new_with_params(s: SqlString<'a>) -> (SqlString<'a>, String) { - let old_sql = s.sql_fragment().to_owned(); - let new_str = SqlString { contents: String::new(), params: s.params }; - (new_str, old_sql) - } - - /** - * Append `s` to the end of the SQL string being constructed - * - * `s` is not validated or escaped in any way. The caller is responsible - * for sending any untrusted input through `next_param()` and using only the - * identifier returned by `next_param()` in this string. - */ - pub fn push_str(&mut self, s: &str) { - self.contents.push_str(s) - } - - /** - * Allocate a new parameter with value `v`, returning an identifier name - * that can be inserted into the SQL string wherever you want to use the - * parameter. - */ - pub fn next_param<'b>(&'b mut self, v: &'a (dyn ToSql + Sync)) -> String - where - 'a: 'b, - { - self.params.push(v); - format!("${}", self.params.len()) - } - - /** Return the constructed SQL fragment. */ - pub fn sql_fragment(&self) -> &str { - &self.contents - } - - /** Return the parameters allocated by calls to `next_param()`. */ - pub fn sql_params(&self) -> &[&'a (dyn ToSql + Sync)] { - &self.params - } -} - -/** - * Build up a list of SQL name-value pairs - * - * This struct stores names and corresponding SQL values and provides a way to - * get them back out suitable for use in safe INSERT or UPDATE statements. For - * both INSERT and UPDATE, the values are provided as parameters to the query. - * - * Like the other interfaces here, the names here must be `&'static str` to make - * it harder to accidentally supply user input here. That's important because - * column names cannot be passed as parameters for the query. - * - * There's nothing that ensures that the types of the values correspond to - * their types in the database. Even if we statically checked this, we would - * only be checking that the values correspond with some Rust representation of - * the database schema that we've built into this program. That does not - * eliminate the runtime possibility that the types do not, in fact, match the - * types in the database. - */ -pub struct SqlValueSet { - names: Vec<&'static str>, - values: Vec>, - names_unique: BTreeSet<&'static str>, -} - -impl SqlValueSet { - pub fn new() -> SqlValueSet { - SqlValueSet { - names: Vec::new(), - values: Vec::new(), - names_unique: BTreeSet::new(), - } - } - - /* - * TODO-design Can we do better (avoiding Clone) using tokio_postgres's - * query_raw()? - */ - /// Set column `name` to value `value` - pub fn set(&mut self, name: &'static str, value: &T) - where - T: ToSql + Send + Sync + Clone + 'static, - { - assert!( - self.names_unique.insert(name), - "duplicate name specified for SqlValueSet" - ); - self.names.push(name); - self.values.push(Box::new((*value).clone())); - } - - /// Return the names for all values in this set - pub fn names(&self) -> &[&'static str] { - &self.names - } - - /// Return the values in this set (without their names) - pub fn values(&self) -> Vec<&(dyn ToSql + Send + Sync)> { - self.values.iter().map(|b| b.as_ref()).collect() - } - - /// - /// Construct a SQL fragment representing an UPDATE of the values in this - /// set. For example: - /// - /// ``` - /// # use omicron_nexus::db::SqlValueSet; - /// # use omicron_nexus::db::SqlString; - /// let mut values = SqlValueSet::new(); - /// values.set("name", &"hello"); - /// values.set("gen", &123i64); - /// let mut sql = SqlString::new(); - /// values.to_update_sql(&mut sql); - /// assert_eq!( - /// sql.sql_fragment(), - /// "name = $1, gen = $2" - /// ); - /// // Parameter $1 will be the string "hello" - /// // Parameter $2 will be the int8 with value 123 - /// ``` - /// - /// This could be safely used to construct a query like: - /// - /// ```ignore - /// UPDATE my_table SET name = $1, gen = $2 WHERE ... - /// ^^^^^^^^^^^^^^^^^^^ - /// | - /// +--- generated by this function - /// ``` - /// - // TODO-coverage - pub fn to_update_sql<'a, 'b>(&'a self, output: &'b mut SqlString<'a>) - where - 'a: 'b, - { - let set_parts = self - .names() - .iter() - .zip(self.values().iter()) - .map( - |(name, value): ( - &&'static str, - &&(dyn ToSql + Send + Sync), - )| { - assert!(valid_cockroachdb_identifier(*name)); - format!("{} = {}", *name, output.next_param(*value)) - }, - ) - .collect::>(); - output.push_str(&set_parts.join(", ")); - } -} - -/* TODO-doc */ -pub trait SqlSerialize { - fn sql_serialize(&self, output: &mut SqlValueSet); -} - -/** Describes a table in the control plane database */ -/* - * TODO-design We want to find a better way to abstract this. Diesel provides a - * compelling model in terms of using it. But it also seems fairly heavyweight - * and seems to tightly couple the application to the current database schema. - * This pattern of fetch-or-insert all-fields-of-an-object likely _isn't_ our - * most common use case, even though we do it a lot for basic CRUD. - * TODO-robustness it would also be great if this were generated from - * src/sql/dbinit.sql or vice versa. There is at least a test that verifies - * they're in sync. - */ -pub trait Table { - /** Struct that represents rows of this table when the full row is needed */ - /* TODO-cleanup what does the 'static actually mean here? */ - type Model: for<'a> TryFrom<&'a tokio_postgres::Row, Error = Error> - + Send - + 'static; - /** Name of the table */ - const TABLE_NAME: &'static str; - /** List of names of all columns in the table. */ - const ALL_COLUMNS: &'static [&'static str]; - - /** - * Parts of a WHERE clause that should be included in all queries for live - * records - */ - /* - * TODO-design This really ought to be a default only for ResourceTable. As - * it is, it's easy to accidentally leave this on non-ResourceTable Tables. - * The reverse, though, would make it easy to leave it off ResourceTables. - */ - const LIVE_CONDITIONS: &'static str = "time_deleted IS NULL"; -} - -/** - * Describes a [`Table`] whose rows represent HTTP resources in the public API - * - * Among other things, lookups from these tables produces suitable `Error`s - * when an object is not found. - */ -pub trait ResourceTable: Table { - /* - * TODO-cleanup can we remove the RESOURCE_TYPE here? And if so, can we - * make this totally agnostic to the control plane? - */ - /** [`ResourceType`] that corresponds to rows of this table */ - const RESOURCE_TYPE: ResourceType; -} - -/** - * Used to generate WHERE clauses for individual row lookups and paginated scans - * - * Impls of `LookupKey` describe the various queries that are used to select - * rows from the database. - * - * ## Looking up an object - * - * There are three common ways to identify an object: - * - * * by its id, which is universally unique. Any object can be looked up by - * its id. This is implemented using [`super::schema::LookupByUniqueId`]. - * * by a name that is unique within the whole control plane. Organizations - * have a unique name, for example, since they do not exist within any - * other scope. This is implemented using - * [`super::schema::LookupByUniqueName`]. - * * by a name that is unique within the scope of a parent object, which - * itself has a unique id. For example, Instances have names that are - * unique within their Project, so you can look up any Instance using the - * Project's id and the Instance's name. This is implemented using - * [`super::schema::LookupByUniqueNameInProject`]. - * - * Other lookups are possible as well. For example, - * [`super::schema::LookupByAttachedInstance`] specifically looks up disks based - * on the instance that they're attached to. - * - * - * ## Parts of a `LookupKey` - * - * Implementations of `LookupKey` define a _scope key_ and an _item key_. For - * example, Disks have a "name" that is unique within a Project. For a Disk, - * the usual scope key is a project id, and the usual item key is the name of - * the disk. That said, nothing requires that a row have only one way to look - * it up -- and indeed, resources like Disks can be looked up by unique id alone - * (null scope, id as item key) as well as by its name within a project - * (project_id scope, name as item key). - * - * The scope key can be a combination of values (corresponding to a combination - * of columns in the database). We use the [`IntoToSqlVec`] trait to turn a - * given scope key into a set of SQL values that we can insert into a query. - * - * The item key currently must be a single SQL value. It would be pretty easy - * to generalize this to a set of values just like the scope key. The challenge - * is only that item keys for pagination typically come from [`DataPageParams`], - * which assumes a single field. - * - * - * ## Pagination - * - * `LookupKey` supports both direct lookup of rows as described above as well as - * _pagination_: fetching the next rows in an ordered sequence (without knowing - * what the next row is). The main difference is that a direct lookup fetches a - * row _exactly_ matching the scope parameters and item key, while pagination - * fetches rows exactly matching the scope parameters and sorted immediately - * _after_ the given item key. - */ -pub trait LookupKey<'a, T: Table> { - /** Names of the database columns that make up the scope key */ - const SCOPE_KEY_COLUMN_NAMES: &'static [&'static str]; - - /** - * Rust type describing the scope key - * - * This must be convertible into a list of SQL parameters of the same length - * and sequence as the column names in - * [`LookupKey::SCOPE_KEY_COLUMN_NAMES`]. This is done using the - * [`IntoToSqlVec`] trait. - */ - type ScopeKey: IntoToSqlVec<'a> + 'a + Clone + Copy; - - /** Name of the database column storing the item key */ - const ITEM_KEY_COLUMN_NAME: &'static str; - - /** Rust type describing the item key */ - type ItemKey: ToSql + for<'f> FromSql<'f> + Sync + Clone + 'static; - - fn where_select_error( - scope_key: Self::ScopeKey, - item_key: &Self::ItemKey, - ) -> Error; - - /* - * The rest of this trait provides common implementation for all impls - * (in terms of the above items). - */ - - /** - * Appends to `output` a WHERE clause for selecting a specific row based on - * the given scope key and item key - * - * Typically we'll create a unique index on (scope key, item key) so that at - * most one row will match. However, the SQL generated here neither assumes - * nor guarantees that only one row will match. - */ - fn where_select_rows( - scope_key: Self::ScopeKey, - item_key: &'a Self::ItemKey, - output: &mut SqlString<'a>, - ) { - let mut column_names = - Vec::with_capacity(Self::SCOPE_KEY_COLUMN_NAMES.len() + 1); - column_names.extend_from_slice(&Self::SCOPE_KEY_COLUMN_NAMES); - column_names.push(Self::ITEM_KEY_COLUMN_NAME); - let mut param_values = scope_key.to_sql_vec(); - param_values.push(item_key); - where_cond(&column_names, ¶m_values, "=", output); - } - - /** - * Appends to `output` a a WHERE clause for selecting a page of rows - * - * A "page" here is a sequence of rows according to some sort order, as in - * API pagination. Callers of this function specify the page by specifying - * values from the last row they saw, not necessarily knowing exactly which - * row(s) are next. By contrast, [`LookupKey::where_select_rows`] (besides - * usually being used to select a only single row) does not assume anything - * about the ordering and expects callers to provide the item key of the row - * they want. - */ - fn where_select_page<'b, 'c>( - scope_key: Self::ScopeKey, - pagparams: &DataPageParams<'b, Self::ItemKey>, - output: &mut SqlString<'c>, - ) where - 'a: 'c, - 'b: 'c, - { - let (operator, order) = match pagparams.direction { - dropshot::PaginationOrder::Ascending => (">", "ASC"), - dropshot::PaginationOrder::Descending => ("<", "DESC"), - }; - - /* - * First, generate the conditions that are true for every page. For - * example, when listing Instances in a Project, this would specify the - * project_id. - */ - let fixed_column_names = Self::SCOPE_KEY_COLUMN_NAMES; - if fixed_column_names.len() > 0 { - let fixed_param_values = scope_key.to_sql_vec(); - output.push_str(" AND ("); - where_cond(fixed_column_names, &fixed_param_values, "=", output); - output.push_str(") "); - } - - /* - * If a marker was provided, then generate conditions that resume the - * scan after the marker value. - */ - if let Some(marker) = pagparams.marker { - let var_column_names = &[Self::ITEM_KEY_COLUMN_NAME]; - let marker_ref = marker as &(dyn ToSql + Sync); - let var_param_values = vec![marker_ref]; - output.push_str(" AND ("); - where_cond(var_column_names, &var_param_values, operator, output); - output.push_str(") "); - } - - /* - * Generate the ORDER BY clause based on the columns that make up the - * marker. - */ - output.push_str(&format!( - " ORDER BY {} {} ", - Self::ITEM_KEY_COLUMN_NAME, - order, - )); - } -} - -/** - * Defines a conversion for a type into a list of SQL parameters - * - * This is used for the scope key used within a [`LookupKey`]. Currently, all - * scope keys are tuples of 0 or 1 element so we define impls for those types. - */ -pub trait IntoToSqlVec<'a> { - fn to_sql_vec(self) -> Vec<&'a (dyn ToSql + Sync)>; -} - -impl<'a> IntoToSqlVec<'a> for () { - fn to_sql_vec(self) -> Vec<&'a (dyn ToSql + Sync)> { - Vec::new() - } -} - -impl<'a, 't1, T1> IntoToSqlVec<'a> for (&'t1 T1,) -where - T1: ToSql + Sync, - 't1: 'a, -{ - fn to_sql_vec(self) -> Vec<&'a (dyn ToSql + Sync)> { - vec![self.0] - } -} - -impl<'a, 't1, 't2, T1, T2> IntoToSqlVec<'a> for (&'t1 T1, &'t2 T2) -where - T1: ToSql + Sync, - 't1: 'a, - T2: ToSql + Sync, - 't2: 'a, -{ - fn to_sql_vec(self) -> Vec<&'a (dyn ToSql + Sync)> { - vec![self.0, self.1] - } -} - -/** - * Appends to `output` a SQL fragment specifying that each column in - * `column_names` match the corresponding value in `param_values` - * - * `operator` specifies which operator is used: `"="` would say that the values - * in returned rows must exactly match those in `param_values`. - * - * This function uses SQL parameters for all the of the values, so the caller - * need not escape the values or handle them specially in any way. - * - * The column names are required to be `&'static str` to make it more difficult - * to accidentally sneak user input into the query string (SQL injection). In - * practice, column names are typically hardcoded wherever they come from and - * they need to be passed between functions as `&'static str` to use them here. - * - * ## Example - * - * ``` - * # use omicron_nexus::db::sql::SqlString; - * # use omicron_nexus::db::sql::where_cond; - * use tokio_postgres::types::FromSql; - * # use tokio_postgres::types::IsNull; - * use tokio_postgres::types::ToSql; - * use tokio_postgres::types::Type; - * - * let column_names: &[&'static str] = &["city", "racks"]; - * let param_values: &[&(dyn ToSql + Sync)] = &[&"Ashburn", &123i64]; - * let mut output = SqlString::new(); - * where_cond(column_names, param_values, "=", &mut output); - * // The SQL string includes one condition for each column and value, - * // using parameters for the values. - * assert_eq!("( (city = $1) AND (racks = $2) )", output.sql_fragment()); - * let params = output.sql_params(); - * assert_eq!(params.len(), 2); - * - * // Parameter $1 will serialize to the SQL string `"Ashburn"` - * let mut bytes = bytes::BytesMut::new(); - * # let there = - * params[0].to_sql_checked(&Type::TEXT, &mut bytes); - * # assert!(matches!(there.unwrap(), IsNull::No)); - * let back = >::from_sql(&Type::TEXT, &bytes) - * .expect("failed to deserialize $1"); - * assert_eq!(back, "Ashburn"); - * - * // Parameter $2 will serialize to the SQL int `123` - * let mut bytes = bytes::BytesMut::new(); - * # let there = - * params[1].to_sql_checked(&Type::INT8, &mut bytes); - * # assert!(matches!(there.unwrap(), IsNull::No)); - * let back = >::from_sql(&Type::INT8, &bytes) - * .expect("failed to deserialize $2"); - * assert_eq!(back, 123); - * ``` - * - * ## Panics - * - * If `column_names.len() != param_values.len()`. - */ -/* - * NOTE: This function is public only so that we can access it from the example. - */ -pub fn where_cond<'a, 'b>( - column_names: &'b [&'static str], - param_values: &'b [&'a (dyn ToSql + Sync)], - operator: &'static str, - output: &'b mut SqlString<'a>, -) where - 'a: 'b, -{ - assert_eq!(param_values.len(), column_names.len()); - - if column_names.is_empty() { - output.push_str("TRUE"); - } else { - let conditions = column_names - .iter() - .zip(param_values.iter()) - .map(|(name, value): (&&'static str, &&(dyn ToSql + Sync))| { - assert!(valid_cockroachdb_identifier(name)); - format!( - "({} {} {})", - *name, - operator, - output.next_param(*value) - ) - }) - .collect::>() - .join(" AND "); - output.push_str(format!("( {} )", conditions).as_str()); - } -} - -/** - * Returns whether `name` is a valid CockroachDB identifier - * - * This is intended as a sanity-check, not an authoritative validator. - */ -pub fn valid_cockroachdb_identifier(name: &'static str) -> bool { - /* - * It would be nice if there were a supported library interface for this. - * Instead, we rely on the CockroachDB documentation on the syntax for - * identifiers. - */ - let mut iter = name.chars(); - let maybe_first = iter.next(); - if maybe_first.is_none() { - return false; - } - - let first = maybe_first.unwrap(); - if !first.is_alphabetic() && first != '_' { - return false; - } - - for c in iter { - if c != '_' && c != '$' && !c.is_alphanumeric() { - return false; - } - } - - return true; -} - -#[cfg(test)] -mod test { - use super::valid_cockroachdb_identifier; - use super::where_cond; - use super::SqlString; - use tokio_postgres::types::ToSql; - - #[test] - fn test_validate_identifier() { - assert!(valid_cockroachdb_identifier("abc")); - assert!(valid_cockroachdb_identifier("abc123")); - assert!(valid_cockroachdb_identifier("abc$123")); - assert!(valid_cockroachdb_identifier("abc_123")); - assert!(valid_cockroachdb_identifier("_abc_123")); - assert!(valid_cockroachdb_identifier("_abc_123$")); - - assert!(!valid_cockroachdb_identifier("")); - assert!(!valid_cockroachdb_identifier("ab\"cd")); - assert!(!valid_cockroachdb_identifier("1abc")); - assert!(!valid_cockroachdb_identifier("ab cd")); - assert!(!valid_cockroachdb_identifier("$abcd")); - } - - #[test] - fn test_where_cond() { - /* - * We tested a basic case in the example above. Here we test edge - * cases. - */ - let column_names: &[&'static str] = &["c1"]; - let param_values: &[&(dyn ToSql + Sync)] = &[&"v1"]; - /* List of length 1 */ - let mut output = SqlString::new(); - where_cond(column_names, ¶m_values, "=", &mut output); - assert_eq!("( (c1 = $1) )", output.sql_fragment()); - - /* Other operators besides "=" */ - let mut output = SqlString::new(); - where_cond(column_names, ¶m_values, ">=", &mut output); - assert_eq!("( (c1 >= $1) )", output.sql_fragment()); - let mut output = SqlString::new(); - where_cond(column_names, ¶m_values, "<", &mut output); - assert_eq!("( (c1 < $1) )", output.sql_fragment()); - - /* Zero-length list */ - let mut output = SqlString::new(); - where_cond(&[], &[], "=", &mut output); - assert_eq!("TRUE", output.sql_fragment()); - } - - #[test] - #[should_panic] - fn test_where_cond_bad_length() { - let column_names: &[&'static str] = &["c1"]; - let mut output = SqlString::new(); - where_cond(column_names, &[], "=", &mut output); - } - - #[test] - #[should_panic] - fn test_where_cond_bad_column() { - let column_names: &[&'static str] = &["c1\"c2"]; - let param_values: &[&(dyn ToSql + Sync)] = &[&"v1"]; - let mut output = SqlString::new(); - where_cond(column_names, param_values, "=", &mut output); - } - - /* TODO-coverage tests for SqlString */ - /* TODO-coverage tests for LookupKey */ -} diff --git a/omicron-nexus/src/db/sql_operations.rs b/omicron-nexus/src/db/sql_operations.rs deleted file mode 100644 index bfce389e17..0000000000 --- a/omicron-nexus/src/db/sql_operations.rs +++ /dev/null @@ -1,830 +0,0 @@ -/*! - * Facilities for executing complete SQL queries - */ - -use futures::StreamExt; -use omicron_common::api::external::DataPageParams; -use omicron_common::api::external::Error; -use omicron_common::api::external::ListResult; -use omicron_common::api::external::LookupResult; -use omicron_common::api::external::ResourceType; -use omicron_common::db::sql_error_generic; -use omicron_common::db::sql_row_value; -use omicron_common::db::DbError; -use std::convert::TryFrom; -use std::num::NonZeroU32; - -use super::operations::sql_execute; -use super::operations::sql_query; -use super::operations::sql_query_always_one; -use super::operations::sql_query_maybe_one; -use super::sql; -use super::sql::LookupKey; -use super::sql::ResourceTable; -use super::sql::SqlString; -use super::sql::SqlValueSet; -use super::sql::Table; - -/// Fetch parts of a row using the specified lookup -pub async fn sql_fetch_row_raw<'a, L, R>( - client: &tokio_postgres::Client, - scope_key: L::ScopeKey, - item_key: &'a L::ItemKey, - columns: &[&'static str], -) -> LookupResult -where - L: LookupKey<'a, R>, - R: ResourceTable, -{ - let mut lookup_cond_sql = SqlString::new(); - L::where_select_rows(scope_key, item_key, &mut lookup_cond_sql); - - let sql = format!( - "SELECT {} FROM {} WHERE ({}) AND ({}) LIMIT 2", - columns.join(", "), - R::TABLE_NAME, - R::LIVE_CONDITIONS, - &lookup_cond_sql.sql_fragment(), - ); - let query_params = lookup_cond_sql.sql_params(); - let mkzerror = move || L::where_select_error(scope_key, item_key); - sql_query_maybe_one(client, &sql, query_params, mkzerror).await -} - -/// Fetch an entire row using the specified lookup -pub async fn sql_fetch_row_by<'a, L, R>( - client: &tokio_postgres::Client, - scope_key: L::ScopeKey, - item_key: &'a L::ItemKey, -) -> LookupResult -where - L: LookupKey<'a, R>, - R: ResourceTable, -{ - let row = - sql_fetch_row_raw::(client, scope_key, item_key, R::ALL_COLUMNS) - .await?; - R::Model::try_from(&row) -} - -/// Fetch a page of rows from a table using the specified lookup -pub async fn sql_fetch_page_from_table<'a, L, T>( - client: &'a tokio_postgres::Client, - scope_key: L::ScopeKey, - pagparams: &'a DataPageParams<'a, L::ItemKey>, -) -> ListResult -where - L: LookupKey<'a, T>, - L::ScopeKey: 'a, - T: Table, -{ - sql_fetch_page_by::( - client, - scope_key, - pagparams, - T::ALL_COLUMNS, - ) - .await -} - -/// Like `fetch_page_from_table`, but the caller can specify which columns -/// to select and how to interpret the row -pub async fn sql_fetch_page_by<'a, L, T, R>( - client: &'a tokio_postgres::Client, - scope_key: L::ScopeKey, - pagparams: &'a DataPageParams<'a, L::ItemKey>, - columns: &'static [&'static str], -) -> ListResult -where - L: LookupKey<'a, T>, - T: Table, - R: for<'d> TryFrom<&'d tokio_postgres::Row, Error = Error> + Send + 'static, -{ - let mut s = SqlString::new(); - s.push_str("TRUE"); - let rows = - sql_fetch_page_raw::(client, scope_key, pagparams, columns, s) - .await?; - let list = rows.iter().map(R::try_from).collect::>>(); - Ok(futures::stream::iter(list).boxed()) -} - -/// Fetches a page of rows from a table `T` using the specified lookup `L`. The -/// page is identified by `pagparams`, which contains a marker value, a scan -/// direction, and a limit of rows to fetch. The caller can provide additional -/// constraints in SQL using `extra_cond_sql`. -async fn sql_fetch_page_raw<'a, 'b, L, T>( - client: &'a tokio_postgres::Client, - scope_key: L::ScopeKey, - pagparams: &DataPageParams<'b, L::ItemKey>, - columns: &'static [&'static str], - extra_cond_sql: SqlString<'b>, -) -> Result, Error> -where - L: LookupKey<'a, T>, - T: Table, - 'a: 'b, -{ - let (mut page_select_sql, extra_fragment) = - SqlString::new_with_params(extra_cond_sql); - L::where_select_page(scope_key, pagparams, &mut page_select_sql); - let limit = i64::from(pagparams.limit.get()); - let limit_clause = format!("LIMIT {}", page_select_sql.next_param(&limit)); - page_select_sql.push_str(limit_clause.as_str()); - - let sql = format!( - "SELECT {} FROM {} WHERE ({}) AND ({}) {}", - columns.join(", "), - T::TABLE_NAME, - extra_fragment, - T::LIVE_CONDITIONS, - &page_select_sql.sql_fragment(), - ); - let query_params = page_select_sql.sql_params(); - sql_query(client, sql.as_str(), query_params) - .await - .map_err(sql_error_generic) -} - -/// Fetch _all_ rows from table `T` that match SQL conditions `extra_cond`. -/// This function makes paginated requests using lookup `L`. Rows are returned -/// as a Vec of type `R`. -pub async fn sql_paginate<'a, L, T, R>( - client: &'a tokio_postgres::Client, - scope_key: L::ScopeKey, - columns: &'static [&'static str], - extra_cond: SqlString<'a>, -) -> Result, Error> -where - L: LookupKey<'a, T>, - T: Table, - R: for<'b> TryFrom<&'b tokio_postgres::Row, Error = Error> + Send + 'static, -{ - let mut results = Vec::new(); - let mut last_row_marker = None; - let direction = dropshot::PaginationOrder::Ascending; - let limit = NonZeroU32::new(100).unwrap(); - loop { - /* TODO-error more context here about what we were doing */ - let pagparams = DataPageParams { - marker: last_row_marker.as_ref(), - direction, - limit, - }; - let rows = sql_fetch_page_raw::( - client, - scope_key, - &pagparams, - columns, - extra_cond.clone(), - ) - .await?; - if rows.len() == 0 { - return Ok(results); - } - - /* - * Extract the marker column and prepare pagination parameters for the - * next query. - */ - let last_row = rows.last().unwrap(); - last_row_marker = Some(sql_row_value::<_, L::ItemKey>( - last_row, - L::ITEM_KEY_COLUMN_NAME, - )?); - - /* - * Now transform the rows we got and append them to the list we're - * accumulating. - */ - results.append( - &mut rows - .iter() - .map(R::try_from) - .collect::, Error>>()?, - ); - } -} - -/// -/// Describes a successful result of [`sql_update_precond`] -/// -pub struct UpdatePrecond { - /// Information about the requested row. This includes a number of columns - /// "found_$c" and "updated_$c" where "$c" is a column from the table. - /// These columns will include any columns required to find the row in the - /// first place (typically the "id") and any columns requested by the caller - /// in `precond_columns`. - pub found_state: tokio_postgres::Row, - - /// If true, the row was updated (the preconditions held). If not, the - /// preconditions were not true. - pub updated: bool, -} - -/// -/// Conditionally update a row in table `T` -/// -/// The row is identified using lookup `L` and the `scope_key` and `item_key`. -/// The values to be updated are provided by `update_values`. The preconditions -/// are specified by the SQL fragment `precond_sql`. -/// -/// Besides the usual database errors, there are a few common cases here: -/// -/// * The specified row was not found. In this case, a suitable -/// [`Error::ObjectNotFound`] error is returned. -/// -/// * The specified row was found and updated. In this case, an -/// [`UpdatePrecond`] is returned with `updated = true`. -/// -/// * The specified row was found, but it was not updated because the -/// preconditions did not hold. In this case, an [`UpdatePrecond`] is -/// returned with `updated = false`. -/// -/// On success (which includes the case where the row was not updated because -/// the precondition failed), the returned [`UpdatePrecond`] includes -/// information about the row that was found. -/// -/// ## Returns -/// -/// An [`UpdatePrecond`] that describes whether the row was updated and what -/// state was found, if any. -/// -// TODO-coverage -// TODO-log log for both cases here -pub async fn sql_update_precond<'a, 'b, T, L>( - client: &'b tokio_postgres::Client, - scope_key: L::ScopeKey, - item_key: &'a L::ItemKey, - precond_columns: &'b [&'static str], - update_values: &SqlValueSet, - precond_sql: SqlString<'a>, -) -> Result -where - T: Table, - L: LookupKey<'a, T>, -{ - /* - * We want to update a row only if it meets some preconditions. One - * traditional way to do that is to begin a transaction, SELECT [the row] - * FOR UPDATE, examine it here in the client, then decide whether to UPDATE - * it. The SELECT FOR UPDATE locks the row in the database until the - * transaction commits or aborts. This causes other clients that want to - * modify the row to block. This works okay, but it means locks are held - * and clients blocked for an entire round-trip to the database, plus - * processing time here. - * - * Another approach is to simply UPDATE the row and include the - * preconditions directly in the WHERE clause. This keeps database lock - * hold time to a minimum (no round trip with locks held). However, all you - * know after that is whether the row was updated. If it wasn't, you don't - * know if it's because the row wasn't present or it didn't meet the - * preconditions. - * - * One could begin a transaction, perform the UPDATE with preconditions in - * the WHERE clause, then SELECT the row, then COMMIT. If we could convince - * ourselves that the UPDATE and SELECT see the same snapshot of the data, - * this would be an improvement -- provided that we can send all four - * statements (BEGIN, UPDATE, SELECT, COMMIT) as a single batch and still - * see the results of the UPDATE and SELECT. It's not clear if this is - * possible using the PostgreSQL wire protocol, but it doesn't seem to be - * exposed from the tokio_postgres client. If these aren't sent in one - * batch, you have the same problem as above: locks held for an entire - * round-trip. - * - * While somewhat complicated at first glance, the SQL below allows us to - * correctly perform the update, minimize lock hold time, and still - * distinguish the cases we're interested in: successful update, failure - * because the row was in the wrong state, failure because the row doesn't - * exist - * - * The query starts with a CTE that SELECTs the row we're going to update. - * This includes the identifying columns plus any extra columns that the - * caller asked for. (These are usually the columns associated with the - * preconditions.) For example: - * - * SELECT id, state FROM Instance WHERE id = $1 - * ^^^ ^^^ ^^ - * | | - * | +---- "extra" columns (precondition columns) - * | - * +--------- "identifying" columns (can be several) - * (scope key + item key columns) - * - * In our example, the update will be conditional on the existing value - * of "state". - */ - let identifying_columns = { - let mut columns = L::SCOPE_KEY_COLUMN_NAMES.to_vec(); - columns.push(L::ITEM_KEY_COLUMN_NAME); - columns - }; - - let mut selected_columns = identifying_columns.clone(); - selected_columns.extend_from_slice(precond_columns); - for column in &selected_columns { - assert!(sql::valid_cockroachdb_identifier(*column)); - } - assert!(sql::valid_cockroachdb_identifier(T::TABLE_NAME)); - - /* - * Construct the SQL string for the "SELECT" CTE. We use - * SqlString::new_with_params() to construct a new SqlString whose first - * parameter will be after the last parameter from the caller-provided - * "precond_sql". This is important, since we'll be sending this all - * over as one query, including both the SQL we're generating here and - * what the caller has constructed, and the latter already has the - * caller's parameter numbers baked into it. - */ - let (mut lookup_cond_sql, precond_sql_str) = - SqlString::new_with_params(precond_sql); - L::where_select_rows(scope_key, item_key, &mut lookup_cond_sql); - let select_sql_str = format!( - "SELECT {} FROM {} WHERE ({}) AND ({}) LIMIT 2", - selected_columns.join(", "), - T::TABLE_NAME, - T::LIVE_CONDITIONS, - lookup_cond_sql.sql_fragment(), - ); - - /* - * The second CTE will be an UPDATE that conditionally updates the row that - * we found with the SELECT. In our example, it would look like this: - * - * UPDATE Instance - * +--> SET state = "running" - * | WHERE id = $1 AND state = "starting" - * | ^^^^^^^ ^^^^^^^^^^^^^^^^^ - * | | | - * | | +--- caller-provided precondition SQL - * | | (may include parameters!) - * | | - * | +--- Same as "SELECT" clause above - * | - * +------ "SET" clause constructed from the caller-provided - * "update_values", a SqlValueSet - * - * The WHERE clause looks just like the above SELECT's WHERE clause, - * plus the caller's preconditions. As before, we'll use - * SqlString::new_with_params() to start a new SqlString with parameters - * that start after the ones we've already assembled so far. - */ - let (mut update_set_sql, lookup_cond_str) = - SqlString::new_with_params(lookup_cond_sql); - update_values.to_update_sql(&mut update_set_sql); - let update_sql_str = format!( - "UPDATE {} SET {} WHERE ({}) AND ({}) AND ({}) LIMIT 2 RETURNING {}", - T::TABLE_NAME, - update_set_sql.sql_fragment(), - T::LIVE_CONDITIONS, - lookup_cond_str.as_str(), - precond_sql_str, - selected_columns.join(", "), - ); - - /* - * Put it all together. The result will look like this: - * - * WITH found_rows AS ( /* the above select part */ ) - * updated_rows AS ( /* the above update part */ ) - * SELECT - * found.id AS found_id, <--- identifying columns - * found.state AS found_state, <--- initial values for "extra" - * columns - * updated.id AS updated_id, <--- identifying columns - * updated.state AS updated_state, - * FROM - * found_rows found <-- result of the "SELECT" CTE - * FULL OUTER JOIN - * updated_rows updated <-- result of the "UPDATE" CTE - * ON - * found.id = updated.id <-- identifying columns - * - * Now, both the SELECT and UPDATE have "LIMIT 2", which means the FULL - * OUTER JOIN cannot produce very many rows. In practice, we expect the - * SELECT and UPDATE to find at most one row. The FULL OUTER JOIN just - * allows us to identify state inconsistency (e.g., multiple rows with - * the same id values). - * - * There will be no new parameters in the SQL we're generating now. We - * will need to provide the parameters from the various subparts above. - */ - let select_columns = identifying_columns - .iter() - .chain(precond_columns.iter()) - .map(|name: &&'static str| { - vec![ - format!("found.{} AS found_{}", *name, *name), - format!("updated.{} AS updated_{}", *name, *name), - ] - }) - .flatten() - .collect::>(); - let join_parts = identifying_columns - .iter() - .map(|name: &&'static str| { - format!("(found.{} = updated.{})", *name, *name) - }) - .collect::>(); - let sql = format!( - "WITH found_rows AS ({}), \ - updated_rows AS ({}) \ - SELECT {} FROM \ - found_rows found \ - FULL OUTER JOIN \ - updated_rows updated \ - ON \ - {}", - select_sql_str, - update_sql_str, - select_columns.join(", "), - join_parts.join(" AND "), - ); - - /* - * There are only three expected results of this query: - * - * (1) The row that we tried to update does not exist, which is true iff - * there were zero returned rows. In this case, our `mkzerror` function - * will be used to generate an ObjectNotFound error. - * - * (2) We found exactly one row and updated it. This is true iff there is - * one row with a non-null "updated_id" (where "id" is actually any of - * the identifying columns above). - * - * (3) There was exactly one Instance, but we did not update it because - * it failed the precondition. This is true iff there is one row and - * its "updated_id" is null. (Again, "id" here can be any of the - * identifying columns.) - * - * A lot of other things are operationally conceivable (like more than one - * returned row), but they should not be possible. We treat these as - * internal errors. - */ - /* - * TODO-correctness Is this a case where the caller may have already done a - * name-to-id translation? If so, it might be confusing if we produce a - * NotFound based on the id, since that's not what the end user actually - * provided. Maybe the caller should translate a NotFound-by-id to a - * NotFound-by-name if they get this error. It sounds kind of cheesy but - * only the caller knows this translation has been done. - */ - let mkzerror = || L::where_select_error(scope_key, item_key); - let row = sql_query_maybe_one( - &client, - sql.as_str(), - update_set_sql.sql_params(), - mkzerror, - ) - .await?; - - /* - * We successfully updated the row iff the "updated_*" columns for the - * identifying columns are non-NULL. We pick the lookup_key column, - * purely out of convenience. - */ - let updated_key = format!("updated_{}", L::ITEM_KEY_COLUMN_NAME); - let check_updated_key: Option = - sql_row_value(&row, updated_key.as_str())?; - Ok(UpdatePrecond { found_state: row, updated: check_updated_key.is_some() }) -} - -/** - * Given a [`DbError`] while creating an instance of type `rtype`, produce an - * appropriate [`Error`] - * - * This looks for the specific case of a uniqueness constraint violation and - * reports a suitable [`Error::ObjectAlreadyExists`] for it. Otherwise, we - * fall back to [`sql_error_generic`]. - */ -fn sql_error_on_create( - rtype: ResourceType, - unique_value: &str, - e: DbError, -) -> Error { - if let Some(code) = e.db_source().and_then(|s| s.code()) { - if *code == tokio_postgres::error::SqlState::UNIQUE_VIOLATION { - /* - * TODO-debuggability it would be nice to preserve the DbError here - * so that the SQL is visible in the log. - */ - return Error::ObjectAlreadyExists { - type_name: rtype, - object_name: unique_value.to_owned(), - }; - } - } - - sql_error_generic(e) -} - -/** - * Insert a row into table `T` and return the resulting row - * - * This function expects any conflict error is on the name. The caller should - * provide this in `unique_value` and it will be returned with an - * [`Error::ObjectAlreadyExists`] error. - */ -pub async fn sql_insert_unique( - client: &tokio_postgres::Client, - values: &SqlValueSet, - unique_value: &str, -) -> Result -where - R: ResourceTable, -{ - let mut sql = SqlString::new(); - let param_names = values - .values() - .iter() - .map(|value| sql.next_param(*value)) - .collect::>(); - let column_names = values.names().iter().cloned().collect::>(); - - sql.push_str( - format!( - "INSERT INTO {} ({}) VALUES ({}) RETURNING {}", - R::TABLE_NAME, - column_names.join(", "), - param_names.join(", "), - R::ALL_COLUMNS.join(", "), - ) - .as_str(), - ); - - let row = - sql_query_always_one(client, sql.sql_fragment(), sql.sql_params()) - .await - .map_err(|e| { - sql_error_on_create(R::RESOURCE_TYPE, unique_value, e) - })?; - - R::Model::try_from(&row) -} - -/// -/// Insert a database record, not necessarily idempotently -/// -/// This is used when the caller wants to explicitly handle the case of a -/// conflict or else expects that a conflict will never happen and wants to -/// treat it as an unhandleable operational error. -/// -pub async fn sql_insert( - client: &tokio_postgres::Client, - values: &SqlValueSet, -) -> Result<(), Error> -where - T: Table, -{ - let mut sql = SqlString::new(); - let param_names = values - .values() - .iter() - .map(|value| sql.next_param(*value)) - .collect::>(); - let column_names = values.names().iter().cloned().collect::>(); - - sql.push_str( - format!( - "INSERT INTO {} ({}) VALUES ({})", - T::TABLE_NAME, - column_names.join(", "), - param_names.join(", "), - ) - .as_str(), - ); - - sql_execute(client, sql.sql_fragment(), sql.sql_params()) - .await - .map(|_| ()) - .map_err(sql_error_generic) -} - -/// -/// Idempotently insert a database record. This is intended for cases where the -/// record may conflict with an existing record with the same id, which should -/// be ignored, or with an existing record with the same name, which should -/// produce an [`Error::ObjectAlreadyExists`] error. -/// -/// This function is intended for use (indirectly) by sagas that want to create -/// a record. Most sagas also want the contents of the record they inserted. -/// For that, see [`sql_insert_unique_idempotent_and_fetch`]. -/// -/// The contents of the row are given by the key-value pairs in `values`. -/// -/// The name of the column on which conflicts should be ignored is given by -/// `ignore_conflicts_on`. Typically, `ignore_conflicts_on` would be `"id"`. -/// It must be a `&'static str` to make it hard to accidentally provide -/// untrusted input here. -/// -/// In the event of a conflict on name, `unique_value` should contain the name -/// that the caller is trying to insert. This is provided in the returned -/// [`Error::ObjectAlreadyExists`] error. -/// -pub async fn sql_insert_unique_idempotent<'a, R>( - client: &'a tokio_postgres::Client, - values: &'a SqlValueSet, - unique_value: &'a str, - ignore_conflicts_on: &'static str, -) -> Result<(), Error> -where - R: ResourceTable, -{ - let mut sql = SqlString::new(); - let param_names = values - .values() - .iter() - .map(|value| sql.next_param(*value)) - .collect::>(); - let column_names = values.names().iter().cloned().collect::>(); - - sql.push_str( - format!( - "INSERT INTO {} ({}) VALUES ({}) ON CONFLICT ({}) DO NOTHING", - R::TABLE_NAME, - column_names.join(", "), - param_names.join(", "), - ignore_conflicts_on, - ) - .as_str(), - ); - - sql_execute(client, sql.sql_fragment(), sql.sql_params()) - .await - .map_err(|e| sql_error_on_create(R::RESOURCE_TYPE, unique_value, e)) - .map(|_| ()) -} - -/// -/// Idempotently insert a database record and fetch it back -/// -/// This function is intended for use (indirectly) by sagas that want to create a -/// record. It's essentially [`sql_insert_unique_idempotent`] followed by -/// [`sql_fetch_row_by`]. This first attempts to insert the specified record. -/// If the record is successfully inserted, the record is fetched again (using -/// `scope_key` and `item_key`) and returned. If the insert fails because a -/// record exists with the same id (whatever column is identified by -/// `ignore_conflicts_on`), no changes are made and the existing record is -/// returned. -/// -/// The insert + fetch sequence is not atomic. It's conceivable that another -/// client could modify the record in between. The caller is responsible for -/// ensuring that doesn't happen or does not matter for their purposes. In -/// practice, the surrounding saga framework should ensure that this doesn't -/// happen. -/// -// -// It's worth describing this process in more detail from the perspective of our -// caller. We'll consider the case of a saga that creates a new Instance. -// TODO This might belong in docs for the saga instead. -// -// -// EASY CASE -// -// If we do a simple INSERT, and that succeeds -- great. That's the common case -// and it's easy. -// -// -// ERROR: CONFLICT ON INSTANCE ID -// -// What if we get a conflict error on the instance id? Since the id is unique -// to this saga, we must assume that means that we're being invoked a second -// time after a previous one successfully updated the database. That means one -// of two things: (1) a previous database update succeeded, but something went -// wrong (e.g., the SEC crashed) before the saga could persistently record that -// fact; or (2) a second instance of the action is concurrently running with -// this one and already completed its database update. ((2) is only possible -// if, after evaluating use cases like this, we decide that it's okay to _allow_ -// two instances of the same action to run concurrently in the case of a -// partition among SECs or the like. We would only do that if it's clear that -// the action implementations can always handle this.) -// -// We opt to handle this by ignoring the conflict and always fetching the -// corresponding row in the table. In the common case, we'll find our own row. -// In the conflict case, we'll find the pre-existing row. In that latter case, -// the caller will verify that it looks the way we expect it to look (same -// "name", parameters, generation number, etc.). It should always match what we -// would have inserted. Having verified that, we can simply act as though we -// inserted it ourselves. In both cases (1) and (2) above, this action will -// complete successfully and the saga can proceed. In (2), the SEC may have to -// deal with the fact that the same action completed twice, but it doesn't have -// to _do_ anything about it (i.e., invoke an undo action). That said, if a -// second SEC did decide to unwind the saga and undo this action, things get -// more complicated. It would be nice if the framework guaranteed that (2) -// wasn't the case. -// -// -// ERROR: CONFLICT ON INSTANCE ID, MISSING ROW -// -// What if we get a conflict, fetch the corresponding row, and find none? We -// should make this impossible in a correct system. The only two things that -// might plausibly do this are (A) the undo action for this action, and (B) an -// actual instance delete operation. We can disallow (B) by not allowing -// anything to delete an instance whose create saga never finished (indicated by -// the state in the Instance row). Is there any implementation of sagas that -// would allow us to wind up in case (A)? Again, if SECs went split-brain -// because one of them got to this point in the saga, hit the easy case, then -// became partitioned from the rest of the world, and then a second SEC picked -// up the saga and reran this action, and then the first saga encountered some -// _other_ failure that triggered a saga unwind, resulting in the undo action -// for this step completing, and then the second SEC winds up in this state. -// One way to avoid this would be to have the undo action, rather than deleting -// the instance record, marking the row with a state indicating it is dead (or -// maybe never even alive). Something needs to clean these records up, but -// that's needed anyway for any sort of deleted record. -// -// -// COMBINING THE TWO QUERIES -// -// Since the case where we successfully insert the record is expected to be so -// common, we could use a RETURNING clause and only do a subsequent SELECT on -// conflict. This would bifurcate the code paths in a way that means we'll -// almost never wind up testing the conflict path. We could add this -// optimization if/when we find that the extra query is a problem. -// -// Relatedly, we could put the INSERT and SELECT queries into a transaction. -// This isn't necessary for our purposes. -// -// -// PROPOSED APPROACH -// -// Putting this all together, we arrive at the approach here: -// -// ACTION: -// - INSERT INTO Instance ... ON CONFLICT (id) DO NOTHING. -// - SELECT * from Instance WHERE id = ... -// -// UNDO ACTION: -// - UPDATE Instance SET state = deleted AND time_deleted = ... WHERE -// id = ...; -// (this will update 0 or 1 row, and it doesn't matter to us which) -// -// There are two more considerations: -// -// Does this work? Recall that sagas say that the effect of the sequence: -// -// start action (1) -// start + finish action (2) -// start + finish undo action -// finish action (1) -// -// must be the same as if action (1) had never happened. This is true of the -// proposed approach. That is, no matter what parts of the "action (1)" happen -// before or after the undo action finishes, the net result on the _database_ -// will be as if "action (1)" had never occurred. -// -// -// ERROR: CONFLICT ON NAME -// -// All of the above describes what happens when there's a conflict on "id", -// which is unique to this saga. It's also possible that there would be a -// conflict on the unique (project_id, name) index. In that case, we just want -// to fail right away -- the whole saga is going to fail with a user error. -// -pub async fn sql_insert_unique_idempotent_and_fetch<'a, R, L>( - client: &'a tokio_postgres::Client, - values: &'a SqlValueSet, - unique_value: &'a str, - ignore_conflicts_on: &'static str, - scope_key: L::ScopeKey, - item_key: &'a L::ItemKey, -) -> LookupResult -where - R: ResourceTable, - L: LookupKey<'a, R>, -{ - sql_insert_unique_idempotent::( - client, - values, - unique_value, - ignore_conflicts_on, - ) - .await?; - - /* - * If we get here, then we successfully inserted the record. It would - * be a bug if something else were to remove that record before we have - * a chance to fetch it. (That's not generally true -- just something - * that must be true for this function to be correct, and is true in the - * cases of our callers.) - */ - sql_fetch_row_by::(client, scope_key, item_key) - .await - .map_err(sql_error_not_missing) -} - -/// -/// Verifies that the given error is _not_ an `Object::ObjectNotFound`. -/// If it is, then it's converted to an `Error::InternalError` instead, -/// on the assumption that an `ObjectNotFound` in this context is neither -/// expected nor appropriate for the caller. -/// -fn sql_error_not_missing(e: Error) -> Error { - match e { - e @ Error::ObjectNotFound { .. } => Error::internal_error(&format!( - "unexpected ObjectNotFound: {:#}", - e - )), - e => e, - } -} diff --git a/omicron-nexus/src/db/update_and_check.rs b/omicron-nexus/src/db/update_and_check.rs new file mode 100644 index 0000000000..700a26bad2 --- /dev/null +++ b/omicron-nexus/src/db/update_and_check.rs @@ -0,0 +1,192 @@ +//! CTE implementation for "UPDATE with extended return status". + +use async_bb8_diesel::{AsyncRunQueryDsl, DieselConnectionManager}; +use diesel::associations::HasTable; +use diesel::helper_types::*; +use diesel::pg::Pg; +use diesel::prelude::*; +use diesel::query_builder::*; +use diesel::query_dsl::methods::LoadQuery; +use diesel::query_source::Table; +use diesel::sql_types::Nullable; + +/// Wrapper around [`diesel::update`] for a Table, which allows +/// callers to distinguish between "not found", "found but not updated", and +/// "updated". +/// +/// T: Table on which the UpdateAndCheck should be applied. +/// K: Primary Key type. +/// U: Where clause of the update statement. +/// V: Changeset to be applied to the update statement. +pub trait UpdateAndCheck { + /// Nests the existing update statement in a CTE which + /// identifies if the row exists (by ID), even if the row + /// cannot be successfully updated. + fn check_if_exists(self, key: K) -> UpdateAndQueryStatement; +} + +// UpdateStatement has four generic parameters: +// - T: Table which is being updated +// - U: Where clause +// - V: Changeset to be applied (default = SetNotCalled) +// - Ret: Returning clause (default = NoReturningClause) +// +// As currently implemented, we only define "UpdateAndCheck" for +// UpdateStatements using the default "Ret" value. This means +// the UpdateAndCheck methods can only be invoked for update statements +// to which a "returning" clause has not yet been added. +// +// This allows our implementation of the CTE to overwrite +// the return behavior of the SQL statement. +impl UpdateAndCheck for UpdateStatement { + fn check_if_exists(self, key: K) -> UpdateAndQueryStatement { + UpdateAndQueryStatement { update_statement: self, key } + } +} + +/// An UPDATE statement which can be combined (via a CTE) +/// with other statements to also SELECT a row. +#[derive(Debug, Clone, Copy)] +#[must_use = "Queries must be executed"] +pub struct UpdateAndQueryStatement { + update_statement: UpdateStatement, + key: K, +} + +impl QueryId for UpdateAndQueryStatement { + type QueryId = (); + const HAS_STATIC_QUERY_ID: bool = false; +} + +/// Result of [`UpdateAndQueryStatement`]. +#[derive(Copy, Clone, PartialEq, Debug)] +pub struct UpdateAndQueryResult { + pub status: UpdateStatus, + pub found: Q, +} + +/// Status of [`UpdateAndQueryResult`]. +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum UpdateStatus { + /// The row exists and was updated. + Updated, + /// The row exists, but was not updated. + NotUpdatedButExists, +} + +// Representation of Primary Key in Rust. +type PrimaryKey = ::PrimaryKey; +// Representation of Primary Key in SQL. +type SerializedPrimaryKey = as diesel::Expression>::SqlType; + +impl UpdateAndQueryStatement +where + K: 'static + PartialEq + Send, + T: 'static + Table + Send, + U: 'static + Send, + V: 'static + Send, +{ + /// Issues the CTE and parses the result. + /// + /// The three outcomes are: + /// - Ok(Row exists and was updated) + /// - Ok(Row exists, but was not updated) + /// - Error (row doesn't exist, or other diesel error) + pub async fn execute_and_check( + self, + pool: &bb8::Pool>, + ) -> Result, diesel::result::Error> + where + Q: Queryable + std::fmt::Debug + Send + 'static, + Self: LoadQuery, Option, Q)>, + { + let (id0, id1, found) = + self.get_result_async::<(Option, Option, Q)>(pool).await?; + let status = if id0 == id1 { + UpdateStatus::Updated + } else { + UpdateStatus::NotUpdatedButExists + }; + + Ok(UpdateAndQueryResult { status, found }) + } +} + +impl Query for UpdateAndQueryStatement +where + T: Table, +{ + type SqlType = ( + Nullable>, + Nullable>, + T::SqlType, + ); +} + +impl RunQueryDsl + for UpdateAndQueryStatement +where + T: Table, +{ +} + +/// This implementation uses the following CTE: +/// +/// ```text +/// // WITH found AS (SELECT FROM T WHERE ) +/// // updated AS (UPDATE T SET RETURNING *) +/// // SELECT +/// // found. +/// // updated. +/// // found.* +/// // FROM +/// // found +/// // LEFT JOIN +/// // updated +/// // ON +/// // found. = updated.; +/// ``` +impl QueryFragment for UpdateAndQueryStatement +where + T: HasTable + Table + diesel::query_dsl::methods::FindDsl, + K: Copy, + Find: QueryFragment, + PrimaryKey: diesel::Column, + UpdateStatement: QueryFragment, +{ + fn walk_ast(&self, mut out: AstPass) -> QueryResult<()> { + out.push_sql("WITH found AS ("); + let subquery = T::table().find(self.key); + subquery.walk_ast(out.reborrow())?; + out.push_sql("), updated AS ("); + self.update_statement.walk_ast(out.reborrow())?; + // TODO: Only need primary? Or would we actually want + // to pass the returned rows back through the result? + out.push_sql(" RETURNING *) "); + + out.push_sql("SELECT"); + + let name = ::NAME; + out.push_sql(" found."); + out.push_identifier(name)?; + out.push_sql(", updated."); + out.push_identifier(name)?; + // TODO: I'd prefer to list all columns explicitly. But how? + // The types exist within Table::AllColumns, and each one + // has a name as "::Name". + // But Table::AllColumns is a tuple, which makes iteration + // a pain. + // + // TODO: Technically, we're repeating the PK here. + out.push_sql(", found.*"); + + out.push_sql(" FROM found LEFT JOIN updated ON"); + out.push_sql(" found."); + out.push_identifier(name)?; + out.push_sql(" = "); + out.push_sql("updated."); + out.push_identifier(name)?; + + Ok(()) + } +} diff --git a/omicron-nexus/src/http_entrypoints_external.rs b/omicron-nexus/src/http_entrypoints_external.rs index cebc3c1e38..1de7b49034 100644 --- a/omicron-nexus/src/http_entrypoints_external.rs +++ b/omicron-nexus/src/http_entrypoints_external.rs @@ -163,7 +163,7 @@ async fn projects_get( let params = ScanByNameOrId::from_query(&query)?; let field = pagination_field_for_scan_params(params); - let project_stream = match field { + let projects = match field { PagField::Id => { let page_selector = data_page_params_nameid_id(&rqctx, &query)?; nexus.projects_list_by_id(&page_selector).await? @@ -173,11 +173,11 @@ async fn projects_get( let page_selector = data_page_params_nameid_name(&rqctx, &query)?; nexus.projects_list_by_name(&page_selector).await? } - }; - - let view_list = - to_list::(project_stream).await; - Ok(HttpResponseOk(ScanByNameOrId::results_page(&query, view_list)?)) + } + .into_iter() + .map(|p| p.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page(&query, projects)?)) } /** @@ -293,15 +293,16 @@ async fn project_disks_get( let query = query_params.into_inner(); let path = path_params.into_inner(); let project_name = &path.project_name; - let disk_stream = nexus + let disks = nexus .project_list_disks( project_name, &data_page_params_for(&rqctx, &query)?, ) - .await?; - - let disk_list = to_list::(disk_stream).await; - Ok(HttpResponseOk(ScanByName::results_page(&query, disk_list)?)) + .await? + .into_iter() + .map(|d| d.into()) + .collect(); + Ok(HttpResponseOk(ScanByName::results_page(&query, disks)?)) } /** @@ -398,15 +399,16 @@ async fn project_instances_get( let query = query_params.into_inner(); let path = path_params.into_inner(); let project_name = &path.project_name; - let instance_stream = nexus + let instances = nexus .project_list_instances( &project_name, &data_page_params_for(&rqctx, &query)?, ) - .await?; - let view_list = - to_list::(instance_stream).await; - Ok(HttpResponseOk(ScanByName::results_page(&query, view_list)?)) + .await? + .into_iter() + .map(|i| i.into()) + .collect(); + Ok(HttpResponseOk(ScanByName::results_page(&query, instances)?)) } /** @@ -573,12 +575,10 @@ async fn instance_disks_get( direction: PaginationOrder::Ascending, limit: NonZeroU32::new(std::u32::MAX).unwrap(), }; - let disk_list = nexus + let disks = nexus .instance_list_disks(&project_name, &instance_name, &fake_query) .await?; - let view_list = - to_list::(disk_list).await; - Ok(HttpResponseOk(view_list)) + Ok(HttpResponseOk(disks)) } /** @@ -681,14 +681,13 @@ async fn project_vpcs_get( let query = query_params.into_inner(); let path = path_params.into_inner(); let project_name = &path.project_name; - let vpc_stream = nexus + let vpcs = nexus .project_list_vpcs( &project_name, &data_page_params_for(&rqctx, &query)?, ) .await?; - let view_list = to_list(vpc_stream).await; - Ok(HttpResponseOk(ScanByName::results_page(&query, view_list)?)) + Ok(HttpResponseOk(ScanByName::results_page(&query, vpcs)?)) } /** diff --git a/omicron-nexus/src/lib.rs b/omicron-nexus/src/lib.rs index 19e40a0a69..07082ef7fb 100644 --- a/omicron-nexus/src/lib.rs +++ b/omicron-nexus/src/lib.rs @@ -37,6 +37,8 @@ use uuid::Uuid; extern crate slog; #[macro_use] extern crate newtype_derive; +#[macro_use] +extern crate diesel; /** * Run the OpenAPI generator for the external API, which emits the OpenAPI spec diff --git a/omicron-nexus/src/nexus.rs b/omicron-nexus/src/nexus.rs index 70e0c406f2..0e674d6552 100644 --- a/omicron-nexus/src/nexus.rs +++ b/omicron-nexus/src/nexus.rs @@ -19,12 +19,12 @@ use omicron_common::api::external::DiskAttachment; use omicron_common::api::external::DiskCreateParams; use omicron_common::api::external::DiskState; use omicron_common::api::external::Error; -use omicron_common::api::external::Generation; use omicron_common::api::external::IdentityMetadata; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::InstanceCreateParams; use omicron_common::api::external::InstanceState; use omicron_common::api::external::ListResult; +use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::Name; use omicron_common::api::external::ProjectCreateParams; @@ -298,9 +298,9 @@ impl Nexus { new_project: &ProjectCreateParams, ) -> CreateResult { // Create a project. - let db_project = db::model::Project::new(new_project); - let project: db::model::Project = - self.db_datastore.project_create(&db_project).await?; + let db_project = db::model::Project::new(new_project.clone()); + let db_project = self.db_datastore.project_create(db_project).await?; + // TODO: We probably want to have "project creation" and "default VPC // creation" co-located within a saga for atomicity. // @@ -312,7 +312,7 @@ impl Nexus { .db_datastore .project_create_vpc( &id, - project.id(), + db_project.id(), &VpcCreateParams { identity: IdentityMetadataCreateParams { name: Name::try_from("default").unwrap(), @@ -325,7 +325,7 @@ impl Nexus { ) .await?; - Ok(project) + Ok(db_project) } pub async fn project_fetch( @@ -338,14 +338,14 @@ impl Nexus { pub async fn projects_list_by_name( &self, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResultVec { self.db_datastore.projects_list_by_name(pagparams).await } pub async fn projects_list_by_id( &self, pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResult { + ) -> ListResultVec { self.db_datastore.projects_list_by_id(pagparams).await } @@ -358,7 +358,7 @@ impl Nexus { name: &Name, new_params: &ProjectUpdateParams, ) -> UpdateResult { - self.db_datastore.project_update(name, new_params).await + self.db_datastore.project_update(name, &new_params).await } /* @@ -369,7 +369,7 @@ impl Nexus { &self, project_name: &Name, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResultVec { let project_id = self.db_datastore.project_lookup_id_by_name(project_name).await?; self.db_datastore.project_list_disks(&project_id, pagparams).await @@ -398,13 +398,9 @@ impl Nexus { .db_datastore .project_create_disk( &disk_id, - &project.id(), + project.id(), params, - &db::model::DiskRuntimeState { - disk_state: db::model::DiskState::new(DiskState::Creating), - gen: Generation::new(), - time_updated: Utc::now(), - }, + &db::model::DiskRuntimeState::new(), ) .await?; @@ -420,14 +416,7 @@ impl Nexus { * to "Created". */ self.db_datastore - .disk_update_runtime( - &disk_id, - &db::model::DiskRuntimeState { - disk_state: db::model::DiskState::new(DiskState::Detached), - gen: disk_created.runtime.gen.next(), - time_updated: Utc::now(), - }, - ) + .disk_update_runtime(&disk_id, &disk_created.runtime().detach()) .await?; Ok(disk_created) @@ -449,9 +438,10 @@ impl Nexus { disk_name: &Name, ) -> DeleteResult { let disk = self.project_lookup_disk(project_name, disk_name).await?; - bail_unless!(disk.runtime.disk_state.state() != &DiskState::Destroyed); + let runtime = disk.runtime(); + bail_unless!(runtime.state().state() != &DiskState::Destroyed); - if disk.runtime.disk_state.is_attached() { + if runtime.state().is_attached() { return Err(Error::InvalidRequest { message: String::from("disk is attached"), }); @@ -475,7 +465,7 @@ impl Nexus { * before actually beginning the attach process. Sagas can maybe * address that. */ - self.db_datastore.project_delete_disk(&disk.identity.id).await + self.db_datastore.project_delete_disk(&disk.id).await } /* @@ -503,7 +493,7 @@ impl Nexus { &self, project_name: &Name, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResultVec { let project_id = self.db_datastore.project_lookup_id_by_name(project_name).await?; self.db_datastore.project_list_instances(&project_id, pagparams).await @@ -600,7 +590,7 @@ impl Nexus { .db_datastore .instance_fetch_by_name(&project_id, instance_name) .await?; - self.db_datastore.project_delete_instance(&instance.identity.id).await + self.db_datastore.project_delete_instance(&instance.id).await } pub async fn project_lookup_instance( @@ -670,8 +660,8 @@ impl Nexus { &self, instance: &db::model::Instance, ) -> Result, Error> { - let said = &instance.runtime.sled_uuid; - self.sled_client(&said).await + let sa_id = &instance.active_server_id; + self.sled_client(&sa_id).await } /** @@ -698,7 +688,7 @@ impl Nexus { let instance = self.project_lookup_instance(project_name, instance_name).await?; - self.check_runtime_change_allowed(&instance.runtime.clone().into())?; + self.check_runtime_change_allowed(&instance.runtime().into())?; self.instance_set_runtime( &instance, self.instance_sled(&instance).await?, @@ -707,7 +697,7 @@ impl Nexus { }, ) .await?; - self.db_datastore.instance_fetch(&instance.identity.id).await + self.db_datastore.instance_fetch(&instance.id).await } /** @@ -721,7 +711,7 @@ impl Nexus { let instance = self.project_lookup_instance(project_name, instance_name).await?; - self.check_runtime_change_allowed(&instance.runtime.clone().into())?; + self.check_runtime_change_allowed(&instance.runtime().into())?; self.instance_set_runtime( &instance, self.instance_sled(&instance).await?, @@ -730,7 +720,7 @@ impl Nexus { }, ) .await?; - self.db_datastore.instance_fetch(&instance.identity.id).await + self.db_datastore.instance_fetch(&instance.id).await } /** @@ -744,7 +734,7 @@ impl Nexus { let instance = self.project_lookup_instance(project_name, instance_name).await?; - self.check_runtime_change_allowed(&instance.runtime.clone().into())?; + self.check_runtime_change_allowed(&instance.runtime().into())?; self.instance_set_runtime( &instance, self.instance_sled(&instance).await?, @@ -753,7 +743,7 @@ impl Nexus { }, ) .await?; - self.db_datastore.instance_fetch(&instance.identity.id).await + self.db_datastore.instance_fetch(&instance.id).await } /** @@ -773,15 +763,11 @@ impl Nexus { * beat us to it. */ let new_runtime = sa - .instance_ensure( - instance.identity.id, - instance.runtime.clone().into(), - requested, - ) + .instance_ensure(instance.id, instance.runtime().into(), requested) .await?; self.db_datastore - .instance_update_runtime(&instance.identity.id, &new_runtime.into()) + .instance_update_runtime(&instance.id, &new_runtime.into()) .await .map(|_| ()) } @@ -794,12 +780,10 @@ impl Nexus { project_name: &Name, instance_name: &Name, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResultVec { let instance = self.project_lookup_instance(project_name, instance_name).await?; - self.db_datastore - .instance_list_disks(&instance.identity.id, pagparams) - .await + self.db_datastore.instance_list_disks(&instance.id, pagparams).await } /** @@ -814,15 +798,13 @@ impl Nexus { let instance = self.project_lookup_instance(project_name, instance_name).await?; let disk = self.project_lookup_disk(project_name, disk_name).await?; - if let Some(instance_id) = - disk.runtime.disk_state.attached_instance_id() - { - if instance_id == &instance.identity.id { + if let Some(instance_id) = disk.attach_instance_id { + if instance_id == instance.id { return Ok(DiskAttachment { - instance_id: instance.identity.id, - disk_name: disk.identity.name.clone(), - disk_id: disk.identity.id, - disk_state: disk.runtime.disk_state.clone().into(), + instance_id: instance.id, + disk_name: disk.name.clone(), + disk_id: disk.id, + disk_state: disk.state().into(), }); } } @@ -849,29 +831,25 @@ impl Nexus { let instance = self.project_lookup_instance(project_name, instance_name).await?; let disk = self.project_lookup_disk(project_name, disk_name).await?; - let instance_id = &instance.identity.id; + let instance_id = &instance.id; fn disk_attachment_for( instance: &db::model::Instance, disk: &db::model::Disk, ) -> CreateResult { - let instance_id = &instance.identity.id; - assert_eq!( - instance_id, - disk.runtime.disk_state.attached_instance_id().unwrap() - ); + assert_eq!(instance.id, disk.attach_instance_id.unwrap()); Ok(DiskAttachment { - instance_id: *instance_id, - disk_id: disk.identity.id, - disk_name: disk.identity.name.clone(), - disk_state: disk.runtime.disk_state.clone().into(), + instance_id: instance.id, + disk_id: disk.id, + disk_name: disk.name.clone(), + disk_state: disk.runtime().state().into(), }) } fn disk_attachment_error( disk: &db::model::Disk, ) -> CreateResult { - let disk_status = match disk.runtime.disk_state.clone().into() { + let disk_status = match disk.runtime().state().into() { DiskState::Destroyed => "disk is destroyed", DiskState::Faulted => "disk is faulted", DiskState::Creating => "disk is detached", @@ -895,13 +873,13 @@ impl Nexus { }; let message = format!( "cannot attach disk \"{}\": {}", - disk.identity.name.as_str(), + disk.name.as_str(), disk_status ); Err(Error::InvalidRequest { message }) } - match &disk.runtime.disk_state.clone().into() { + match &disk.state().into() { /* * If we're already attaching or attached to the requested instance, * there's nothing else to do. @@ -949,7 +927,7 @@ impl Nexus { DiskStateRequested::Attached(*instance_id), ) .await?; - let disk = self.db_datastore.disk_fetch(&disk.identity.id).await?; + let disk = self.db_datastore.disk_fetch(&disk.id).await?; disk_attachment_for(&instance, &disk) } @@ -965,9 +943,9 @@ impl Nexus { let instance = self.project_lookup_instance(project_name, instance_name).await?; let disk = self.project_lookup_disk(project_name, disk_name).await?; - let instance_id = &instance.identity.id; + let instance_id = &instance.id; - match &disk.runtime.disk_state.clone().into() { + match &disk.state().into() { /* * This operation is a noop if the disk is not attached or already * detaching from the same instance. @@ -1026,15 +1004,10 @@ impl Nexus { * Ask the SA to begin the state change. Then update the database to * reflect the new intermediate state. */ - let new_runtime = sa - .disk_ensure( - disk.identity.id, - disk.runtime.clone().into(), - requested, - ) - .await?; + let new_runtime = + sa.disk_ensure(disk.id, disk.runtime().into(), requested).await?; self.db_datastore - .disk_update_runtime(&disk.identity.id, &new_runtime.into()) + .disk_update_runtime(&disk.id, &new_runtime.into()) .await .map(|_| ()) } @@ -1043,15 +1016,17 @@ impl Nexus { &self, project_name: &Name, pagparams: &DataPageParams<'_, Name>, - ) -> ListResult { + ) -> ListResultVec { let project_id = self.db_datastore.project_lookup_id_by_name(project_name).await?; - let db_stream = - self.db_datastore.project_list_vpcs(&project_id, pagparams).await?; - let api_stream = Box::pin( - db_stream.map(|result| result.map(|db_vpc| db_vpc.into())), - ); - Ok(api_stream) + let vpcs = self + .db_datastore + .project_list_vpcs(&project_id, pagparams) + .await? + .into_iter() + .map(|vpc| vpc.into()) + .collect::>(); + Ok(vpcs) } pub async fn project_create_vpc( @@ -1093,10 +1068,7 @@ impl Nexus { self.db_datastore.project_lookup_id_by_name(project_name).await?; let vpc = self.db_datastore.vpc_fetch_by_name(&project_id, vpc_name).await?; - Ok(self - .db_datastore - .project_update_vpc(&vpc.identity.id, params) - .await?) + Ok(self.db_datastore.project_update_vpc(&vpc.id, params).await?) } pub async fn project_delete_vpc( @@ -1165,6 +1137,7 @@ impl Nexus { description: String::from(""), time_created: Utc::now(), time_modified: Utc::now(), + time_deleted: None, }, service_address: sa.service_address, }) @@ -1191,6 +1164,7 @@ impl Nexus { description: String::from(""), time_created: Utc::now(), time_modified: Utc::now(), + time_deleted: None, }, service_address: sa.service_address, }) @@ -1415,9 +1389,8 @@ impl TestInterfaces for Nexus { id: &Uuid, ) -> Result, Error> { let disk = self.db_datastore.disk_fetch(id).await?; - let instance_id = - disk.runtime.disk_state.attached_instance_id().unwrap(); - let instance = self.db_datastore.instance_fetch(instance_id).await?; + let instance_id = disk.runtime().attach_instance_id.unwrap(); + let instance = self.db_datastore.instance_fetch(&instance_id).await?; self.instance_sled(&instance).await } } diff --git a/omicron-nexus/src/sagas.rs b/omicron-nexus/src/sagas.rs index 6d8dda8ea5..a7ab297daa 100644 --- a/omicron-nexus/src/sagas.rs +++ b/omicron-nexus/src/sagas.rs @@ -148,7 +148,7 @@ async fn sic_create_instance_record( ) .await .map_err(ActionError::action_failed)?; - Ok(instance.runtime.into()) + Ok(instance.runtime().into()) } async fn sic_instance_ensure( diff --git a/omicron-nexus/tests/test_projects.rs b/omicron-nexus/tests/test_projects.rs new file mode 100644 index 0000000000..fc3626e7c6 --- /dev/null +++ b/omicron-nexus/tests/test_projects.rs @@ -0,0 +1,61 @@ +use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::api::external::Name; +use omicron_common::api::external::Project; +use omicron_common::api::external::ProjectCreateParams; +use std::convert::TryFrom; + +use dropshot::test_util::object_get; +use dropshot::test_util::objects_list_page; +use dropshot::test_util::objects_post; +use dropshot::test_util::ClientTestContext; + +pub mod common; +use common::test_setup; + +extern crate slog; + +#[tokio::test] +async fn test_projects() { + let cptestctx = test_setup("test_projects").await; + let client = &cptestctx.external_client; + + /* Create a project that we'll use for testing. */ + let p1_name = "springfield-squidport"; + let p2_name = "cairo-airport"; + create_project(&client, &p1_name).await; + create_project(&client, &p2_name).await; + + let p1_url = format!("/projects/{}", p1_name); + let project: Project = object_get(&client, &p1_url).await; + assert_eq!(project.identity.name, p1_name); + + let p2_url = format!("/projects/{}", p2_name); + let project: Project = object_get(&client, &p2_url).await; + assert_eq!(project.identity.name, p2_name); + + let projects = + objects_list_page::(client, "/projects").await.items; + assert_eq!(projects.len(), 2); + // alphabetical order for now + assert_eq!(projects[0].identity.name, p2_name); + assert_eq!(projects[1].identity.name, p1_name); + + cptestctx.teardown().await; +} + +async fn create_project( + client: &ClientTestContext, + project_name: &str, +) -> Project { + objects_post( + &client, + "/projects", + ProjectCreateParams { + identity: IdentityMetadataCreateParams { + name: Name::try_from(project_name).unwrap(), + description: "a pier".to_string(), + }, + }, + ) + .await +}