diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index 767d8f53d5..9043592414 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -38,6 +38,7 @@ progenitor::generate_api!( NewPasswordHash = omicron_passwords::NewPasswordHash, TypedUuidForCollectionKind = omicron_uuid_kinds::CollectionUuid, TypedUuidForDownstairsKind = omicron_uuid_kinds::TypedUuid, + TypedUuidForPropolisKind = omicron_uuid_kinds::TypedUuid, TypedUuidForSledKind = omicron_uuid_kinds::TypedUuid, TypedUuidForUpstairsKind = omicron_uuid_kinds::TypedUuid, TypedUuidForUpstairsRepairKind = omicron_uuid_kinds::TypedUuid, diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index aa42429089..e4a407ef3d 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -53,6 +53,8 @@ progenitor::generate_api!( PortSpeed = omicron_common::api::internal::shared::PortSpeed, SourceNatConfig = omicron_common::api::internal::shared::SourceNatConfig, SwitchLocation = omicron_common::api::external::SwitchLocation, + TypedUuidForInstanceKind = omicron_uuid_kinds::InstanceUuid, + TypedUuidForPropolisKind = omicron_uuid_kinds::PropolisUuid, TypedUuidForZpoolKind = omicron_uuid_kinds::ZpoolUuid, Vni = omicron_common::api::external::Vni, ZpoolKind = omicron_common::zpool_name::ZpoolKind, diff --git a/common/src/api/internal/nexus.rs b/common/src/api/internal/nexus.rs index 4f990c56e1..b4a73f4168 100644 --- a/common/src/api/internal/nexus.rs +++ b/common/src/api/internal/nexus.rs @@ -10,6 +10,7 @@ use crate::api::external::{ }; use chrono::{DateTime, Utc}; use omicron_uuid_kinds::DownstairsRegionKind; +use omicron_uuid_kinds::PropolisUuid; use omicron_uuid_kinds::TypedUuid; use omicron_uuid_kinds::UpstairsRepairKind; use omicron_uuid_kinds::UpstairsSessionKind; @@ -50,9 +51,9 @@ pub struct InstanceProperties { #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct InstanceRuntimeState { /// The instance's currently active VMM ID. - pub propolis_id: Option, + pub propolis_id: Option, /// If a migration is active, the ID of the target VMM. - pub dst_propolis_id: Option, + pub dst_propolis_id: Option, /// If a migration is active, the ID of that migration. pub migration_id: Option, /// Generation number for this state. @@ -105,7 +106,7 @@ pub struct SledInstanceState { pub instance_state: InstanceRuntimeState, /// The ID of the VMM whose state is being reported. - pub propolis_id: Uuid, + pub propolis_id: PropolisUuid, /// The most recent state of the sled's VMM process. pub vmm_state: VmmRuntimeState, diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index be4e1e8696..7df457b247 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -111,6 +111,7 @@ use omicron_common::api::external::InstanceState; use omicron_common::api::external::MacAddr; use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::PropolisUuid; use omicron_uuid_kinds::SledUuid; use sled_agent_client::types::VolumeConstructionRequest; use std::borrow::Cow; @@ -129,12 +130,18 @@ use uuid::Uuid; const NO_ACTIVE_PROPOLIS_MSG: &str = ""; const NOT_ON_SLED_MSG: &str = ""; -struct MaybePropolisId(Option); -struct MaybeSledId(Option); +struct MaybePropolisId(Option); +struct MaybeSledId(Option); impl From<&InstanceAndActiveVmm> for MaybePropolisId { fn from(value: &InstanceAndActiveVmm) -> Self { - Self(value.instance().runtime().propolis_id) + Self( + value + .instance() + .runtime() + .propolis_id + .map(PropolisUuid::from_untyped_uuid), + ) } } @@ -1040,7 +1047,7 @@ async fn cmd_db_disk_info( let my_sled_id = instance.sled_id().unwrap(); let (_, my_sled) = LookupPath::new(opctx, datastore) - .sled_id(my_sled_id) + .sled_id(my_sled_id.into_untyped_uuid()) .fetch() .await .context("failed to look up sled")?; @@ -1948,7 +1955,7 @@ async fn cmd_db_instances( check_limit(&instances, limit, ctx); let mut rows = Vec::new(); - let mut h_to_s: HashMap = HashMap::new(); + let mut h_to_s: HashMap = HashMap::new(); for i in instances { let host_serial = if i.vmm().is_some() { @@ -1956,7 +1963,7 @@ async fn cmd_db_instances( h_to_s.entry(i.sled_id().unwrap()) { let (_, my_sled) = LookupPath::new(opctx, datastore) - .sled_id(i.sled_id().unwrap()) + .sled_id(i.sled_id().unwrap().into_untyped_uuid()) .fetch() .await .context("failed to look up sled")?; diff --git a/nexus/db-model/src/instance.rs b/nexus/db-model/src/instance.rs index 14f79aea39..dadf4e89ae 100644 --- a/nexus/db-model/src/instance.rs +++ b/nexus/db-model/src/instance.rs @@ -10,6 +10,7 @@ use crate::schema::{disk, external_ip, instance}; use chrono::{DateTime, Utc}; use db_macros::Resource; use nexus_types::external_api::params; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid, PropolisUuid}; use serde::Deserialize; use serde::Serialize; use uuid::Uuid; @@ -82,12 +83,14 @@ impl Instance { /// Constructs a new instance record with no VMM that will initially appear /// to be in the Creating state. pub fn new( - instance_id: Uuid, + instance_id: InstanceUuid, project_id: Uuid, params: ¶ms::InstanceCreate, ) -> Self { - let identity = - InstanceIdentity::new(instance_id, params.identity.clone()); + let identity = InstanceIdentity::new( + instance_id.into_untyped_uuid(), + params.identity.clone(), + ); let runtime_state = InstanceRuntimeState::new( InstanceState::Creating, @@ -229,8 +232,10 @@ impl From nexus_state, time_updated: state.time_updated, gen: state.gen.into(), - propolis_id: state.propolis_id, - dst_propolis_id: state.dst_propolis_id, + propolis_id: state.propolis_id.map(|id| id.into_untyped_uuid()), + dst_propolis_id: state + .dst_propolis_id + .map(|id| id.into_untyped_uuid()), migration_id: state.migration_id, } } @@ -241,10 +246,12 @@ impl From { fn from(state: InstanceRuntimeState) -> Self { Self { - dst_propolis_id: state.dst_propolis_id, + dst_propolis_id: state + .dst_propolis_id + .map(PropolisUuid::from_untyped_uuid), gen: state.gen.into(), migration_id: state.migration_id, - propolis_id: state.propolis_id, + propolis_id: state.propolis_id.map(PropolisUuid::from_untyped_uuid), time_updated: state.time_updated, } } diff --git a/nexus/db-model/src/network_interface.rs b/nexus/db-model/src/network_interface.rs index 95bb8bb7f2..6d347ecd37 100644 --- a/nexus/db-model/src/network_interface.rs +++ b/nexus/db-model/src/network_interface.rs @@ -18,6 +18,7 @@ use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::{external, internal}; use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::VnicUuid; use sled_agent_client::ZoneKind; @@ -391,7 +392,7 @@ impl IncompleteNetworkInterface { pub fn new_instance( interface_id: Uuid, - instance_id: Uuid, + instance_id: InstanceUuid, subnet: VpcSubnet, identity: external::IdentityMetadataCreateParams, ip: Option, @@ -399,7 +400,7 @@ impl IncompleteNetworkInterface { Self::new( interface_id, NetworkInterfaceKind::Instance, - instance_id, + instance_id.into_untyped_uuid(), subnet, identity, ip, diff --git a/nexus/db-model/src/vmm.rs b/nexus/db-model/src/vmm.rs index ceaef2e709..4cc6235e1d 100644 --- a/nexus/db-model/src/vmm.rs +++ b/nexus/db-model/src/vmm.rs @@ -16,6 +16,7 @@ use super::{Generation, VmmState}; use crate::schema::vmm; use crate::SqlU16; use chrono::{DateTime, Utc}; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid, PropolisUuid, SledUuid}; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -68,9 +69,9 @@ pub enum VmmInitialState { impl Vmm { /// Creates a new VMM record. pub fn new( - id: Uuid, - instance_id: Uuid, - sled_id: Uuid, + id: PropolisUuid, + instance_id: InstanceUuid, + sled_id: SledUuid, propolis_ip: ipnetwork::IpNetwork, propolis_port: u16, initial_state: VmmInitialState, @@ -82,11 +83,11 @@ impl Vmm { }; Self { - id, + id: id.into_untyped_uuid(), time_created: now, time_deleted: None, - instance_id, - sled_id, + instance_id: instance_id.into_untyped_uuid(), + sled_id: sled_id.into_untyped_uuid(), propolis_ip, propolis_port: SqlU16(propolis_port), runtime: VmmRuntimeState { diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 8dda8041fa..0614cd0d9f 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -51,6 +51,8 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::OmicronZoneUuid; use ref_cast::RefCast; use sled_agent_client::ZoneKind; @@ -65,12 +67,12 @@ impl DataStore { &self, opctx: &OpContext, ip_id: Uuid, - instance_id: Uuid, + instance_id: InstanceUuid, pool_id: Uuid, ) -> CreateResult { let data = IncompleteExternalIp::for_instance_source_nat( ip_id, - instance_id, + instance_id.into_untyped_uuid(), pool_id, ); self.allocate_external_ip(opctx, data).await @@ -109,7 +111,7 @@ impl DataStore { &self, opctx: &OpContext, ip_id: Uuid, - instance_id: Uuid, + instance_id: InstanceUuid, pool: Option, creating_instance: bool, ) -> CreateResult<(ExternalIp, bool)> { @@ -385,7 +387,7 @@ impl DataStore { &self, opctx: &OpContext, ip_id: Uuid, - instance_id: Uuid, + instance_id: InstanceUuid, kind: IpKind, creating_instance: bool, ) -> Result, Error> { @@ -403,7 +405,7 @@ impl DataStore { }; let query = Instance::attach_resource( - instance_id, + instance_id.into_untyped_uuid(), ip_id, inst_table .into_boxed() @@ -416,7 +418,7 @@ impl DataStore { .filter(dsl::parent_id.is_null()), MAX_EXTERNAL_IPS_PLUS_SNAT, diesel::update(dsl::external_ip).set(( - dsl::parent_id.eq(Some(instance_id)), + dsl::parent_id.eq(Some(instance_id.into_untyped_uuid())), dsl::time_modified.eq(Utc::now()), dsl::state.eq(IpAttachState::Attaching), )), @@ -430,7 +432,7 @@ impl DataStore { AttachError::CollectionNotFound => { Err(Error::not_found_by_id( ResourceType::Instance, - &instance_id, + &instance_id.into_untyped_uuid(), )) }, AttachError::ResourceNotFound => { @@ -446,9 +448,9 @@ impl DataStore { AttachError::NoUpdate { attached_count, resource, collection } => { match resource.state { // Idempotent errors: is in progress or complete for same resource pair -- this is fine. - IpAttachState::Attaching if resource.parent_id == Some(instance_id) => + IpAttachState::Attaching if resource.parent_id == Some(instance_id.into_untyped_uuid()) => return Ok(Some(resource)), - IpAttachState::Attached if resource.parent_id == Some(instance_id) => { + IpAttachState::Attached if resource.parent_id == Some(instance_id.into_untyped_uuid()) => { do_saga = false; return Ok(Some(resource)) }, @@ -518,7 +520,7 @@ impl DataStore { &self, opctx: &OpContext, ip_id: Uuid, - instance_id: Uuid, + instance_id: InstanceUuid, kind: IpKind, creating_instance: bool, ) -> UpdateResult> { @@ -534,7 +536,7 @@ impl DataStore { }; let query = Instance::detach_resource( - instance_id, + instance_id.into_untyped_uuid(), ip_id, inst_table .into_boxed() @@ -558,7 +560,7 @@ impl DataStore { DetachError::CollectionNotFound => { Error::not_found_by_id( ResourceType::Instance, - &instance_id, + &instance_id.into_untyped_uuid(), ) }, DetachError::ResourceNotFound => { @@ -572,7 +574,7 @@ impl DataStore { } }, DetachError::NoUpdate { resource, collection } => { - let parent_match = resource.parent_id == Some(instance_id); + let parent_match = resource.parent_id == Some(instance_id.into_untyped_uuid()); match resource.state { // Idempotent cases: already detached OR detaching from same instance. IpAttachState::Detached => { @@ -660,10 +662,10 @@ impl DataStore { &self, opctx: &OpContext, ip_id: Uuid, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> Result, Error> { let _ = LookupPath::new(&opctx, self) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -763,13 +765,13 @@ impl DataStore { pub async fn instance_lookup_external_ips( &self, opctx: &OpContext, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> LookupResult> { use db::schema::external_ip::dsl; dsl::external_ip .filter(dsl::is_service.eq(false)) .filter(dsl::is_probe.eq(false)) - .filter(dsl::parent_id.eq(instance_id)) + .filter(dsl::parent_id.eq(instance_id.into_untyped_uuid())) .filter(dsl::time_deleted.is_null()) .select(ExternalIp::as_select()) .get_results_async(&*self.pool_connection_authorized(opctx).await?) @@ -782,7 +784,7 @@ impl DataStore { pub async fn instance_lookup_ephemeral_ip( &self, opctx: &OpContext, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> LookupResult> { Ok(self .instance_lookup_external_ips(opctx, instance_id) @@ -915,11 +917,11 @@ impl DataStore { &self, opctx: &OpContext, authz_fip: &authz::FloatingIp, - instance_id: Uuid, + instance_id: InstanceUuid, creating_instance: bool, ) -> UpdateResult<(ExternalIp, bool)> { let (.., authz_instance) = LookupPath::new(&opctx, self) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -957,11 +959,11 @@ impl DataStore { &self, opctx: &OpContext, authz_fip: &authz::FloatingIp, - instance_id: Uuid, + instance_id: InstanceUuid, creating_instance: bool, ) -> UpdateResult<(ExternalIp, bool)> { let (.., authz_instance) = LookupPath::new(&opctx, self) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; diff --git a/nexus/db-queries/src/db/datastore/instance.rs b/nexus/db-queries/src/db/datastore/instance.rs index 6542b03fd1..f4b31b24ff 100644 --- a/nexus/db-queries/src/db/datastore/instance.rs +++ b/nexus/db-queries/src/db/datastore/instance.rs @@ -49,6 +49,10 @@ use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::internal::nexus::MigrationRuntimeState; use omicron_common::bail_unless; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; +use omicron_uuid_kinds::PropolisUuid; +use omicron_uuid_kinds::SledUuid; use ref_cast::RefCast; use uuid::Uuid; @@ -68,8 +72,8 @@ impl InstanceAndActiveVmm { &self.vmm } - pub fn sled_id(&self) -> Option { - self.vmm.as_ref().map(|v| v.sled_id) + pub fn sled_id(&self) -> Option { + self.vmm.as_ref().map(|v| SledUuid::from_untyped_uuid(v.sled_id)) } pub fn effective_state( @@ -446,21 +450,21 @@ impl DataStore { // to explicitly fetch the state if they want that. pub async fn instance_update_runtime( &self, - instance_id: &Uuid, + instance_id: &InstanceUuid, new_runtime: &InstanceRuntimeState, ) -> Result { use db::schema::instance::dsl; let updated = diesel::update(dsl::instance) .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(*instance_id)) + .filter(dsl::id.eq(instance_id.into_untyped_uuid())) // Runtime state updates are allowed if either: // - the active Propolis ID will not change, the state generation // increased, and the Propolis generation will not change, or // - the Propolis generation increased. .filter(dsl::state_generation.lt(new_runtime.gen)) .set(new_runtime.clone()) - .check_if_exists::(*instance_id) + .check_if_exists::(instance_id.into_untyped_uuid()) .execute_and_check(&*self.pool_connection_unauthorized().await?) .await .map(|r| match r.status { @@ -472,7 +476,7 @@ impl DataStore { e, ErrorHandler::NotFoundByLookup( ResourceType::Instance, - LookupType::ById(*instance_id), + LookupType::ById(instance_id.into_untyped_uuid()), ), ) })?; @@ -507,9 +511,9 @@ impl DataStore { /// - `Err` if another error occurred while accessing the database. pub async fn instance_and_vmm_update_runtime( &self, - instance_id: &Uuid, + instance_id: &InstanceUuid, new_instance: &InstanceRuntimeState, - vmm_id: &Uuid, + vmm_id: &PropolisUuid, new_vmm: &VmmRuntimeState, migration: &Option, ) -> Result { @@ -694,7 +698,11 @@ impl DataStore { } })?; - self.instance_ssh_keys_delete(opctx, authz_instance.id()).await?; + self.instance_ssh_keys_delete( + opctx, + InstanceUuid::from_untyped_uuid(authz_instance.id()), + ) + .await?; Ok(()) } @@ -943,7 +951,7 @@ mod tests { ) -> authz::Instance { let silo_id = *nexus_db_fixed_data::silo::DEFAULT_SILO_ID; let project_id = Uuid::new_v4(); - let instance_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); let (authz_project, _project) = datastore .project_create( @@ -990,7 +998,7 @@ mod tests { .expect("instance must be created successfully"); let (.., authz_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await .expect("instance must exist"); @@ -1270,9 +1278,11 @@ mod tests { ) .await .expect("active VMM should be inserted successfully!"); + + let instance_id = InstanceUuid::from_untyped_uuid(authz_instance.id()); datastore .instance_update_runtime( - &authz_instance.id(), + &instance_id, &InstanceRuntimeState { time_updated: Utc::now(), gen: Generation( @@ -1339,7 +1349,7 @@ mod tests { .expect("migration should be inserted successfully!"); datastore .instance_update_runtime( - &authz_instance.id(), + &instance_id, &InstanceRuntimeState { time_updated: Utc::now(), gen: Generation( diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 0a48f33ebd..ca7c76c0ae 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -41,11 +41,11 @@ use omicron_common::api::external::SemverVersion; use omicron_common::backoff::{ retry_notify, retry_policy_internal_service, BackoffError, }; +use omicron_uuid_kinds::{GenericUuid, SledUuid}; use slog::Logger; use std::net::Ipv6Addr; use std::num::NonZeroU32; use std::sync::Arc; -use uuid::Uuid; mod address_lot; mod allow_list; @@ -307,11 +307,13 @@ impl DataStore { pub async fn next_ipv6_address( &self, opctx: &OpContext, - sled_id: Uuid, + sled_id: SledUuid, ) -> Result { use db::schema::sled::dsl; let net = diesel::update( - dsl::sled.find(sled_id).filter(dsl::time_deleted.is_null()), + dsl::sled + .find(sled_id.into_untyped_uuid()) + .filter(dsl::time_deleted.is_null()), ) .set(dsl::last_used_address.eq(dsl::last_used_address + 1)) .returning(dsl::last_used_address) @@ -322,7 +324,7 @@ impl DataStore { e, ErrorHandler::NotFoundByLookup( ResourceType::Sled, - LookupType::ById(sled_id), + LookupType::ById(sled_id.into_untyped_uuid()), ), ) })?; @@ -1656,6 +1658,8 @@ mod test { ); datastore.sled_upsert(sled2).await.unwrap(); + let sled1_id = SledUuid::from_untyped_uuid(sled1_id); + let sled2_id = SledUuid::from_untyped_uuid(sled2_id); let ip = datastore.next_ipv6_address(&opctx, sled1_id).await.unwrap(); let expected_ip = Ipv6Addr::new(0xfd00, 0x1de, 0, 0, 0, 0, 1, 0); assert_eq!(ip, expected_ip); diff --git a/nexus/db-queries/src/db/datastore/ssh_key.rs b/nexus/db-queries/src/db/datastore/ssh_key.rs index a5f7427267..9e9b6230b7 100644 --- a/nexus/db-queries/src/db/datastore/ssh_key.rs +++ b/nexus/db-queries/src/db/datastore/ssh_key.rs @@ -27,6 +27,8 @@ use omicron_common::api::external::LookupType; use omicron_common::api::external::NameOrId; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use ref_cast::RefCast; use uuid::Uuid; @@ -120,7 +122,7 @@ impl DataStore { &self, opctx: &OpContext, authz_user: &authz::SiloUser, - instance_id: Uuid, + instance_id: InstanceUuid, keys: &Option>, ) -> UpdateResult<()> { opctx.authorize(authz::Action::ListChildren, authz_user).await?; @@ -142,7 +144,7 @@ impl DataStore { })? .iter() .map(|key| db::model::InstanceSshKey { - instance_id, + instance_id: instance_id.into_untyped_uuid(), ssh_key_id: *key, }) .collect() @@ -153,7 +155,7 @@ impl DataStore { Some(vec) => vec .iter() .map(|key| db::model::InstanceSshKey { - instance_id, + instance_id: instance_id.into_untyped_uuid(), ssh_key_id: *key, }) .collect(), @@ -203,11 +205,11 @@ impl DataStore { pub async fn instance_ssh_keys_delete( &self, opctx: &OpContext, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> DeleteResult { use db::schema::instance_ssh_key::dsl; diesel::delete(dsl::instance_ssh_key) - .filter(dsl::instance_id.eq(instance_id)) + .filter(dsl::instance_id.eq(instance_id.into_untyped_uuid())) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; diff --git a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs index 9738f05ff6..247eefd3d5 100644 --- a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs @@ -17,6 +17,7 @@ use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; use diesel::result::Error as DieselError; use omicron_common::api::external::{DeleteResult, Error}; +use omicron_uuid_kinds::InstanceUuid; use uuid::Uuid; /// The types of resources which can consume storage space. @@ -261,7 +262,7 @@ impl DataStore { pub async fn virtual_provisioning_collection_insert_instance( &self, opctx: &OpContext, - id: Uuid, + id: InstanceUuid, project_id: Uuid, cpus_diff: i64, ram_diff: ByteCount, @@ -286,7 +287,7 @@ impl DataStore { pub async fn virtual_provisioning_collection_delete_instance( &self, opctx: &OpContext, - id: Uuid, + id: InstanceUuid, project_id: Uuid, cpus_diff: i64, ram_diff: ByteCount, @@ -433,7 +434,7 @@ mod test { datastore: &DataStore, opctx: &OpContext, authz_project: &crate::authz::Project, - instance_id: Uuid, + instance_id: InstanceUuid, project_id: Uuid, cpus: i64, memory: ByteCount, @@ -480,7 +481,7 @@ mod test { // Actually provision the instance - let instance_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); let cpus = 12; let ram = ByteCount::try_from(1 << 30).unwrap(); @@ -553,7 +554,7 @@ mod test { // Actually provision the instance - let instance_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); let cpus = 12; let ram = ByteCount::try_from(1 << 30).unwrap(); diff --git a/nexus/db-queries/src/db/datastore/vmm.rs b/nexus/db-queries/src/db/datastore/vmm.rs index bcb615411e..7ce8c1551e 100644 --- a/nexus/db-queries/src/db/datastore/vmm.rs +++ b/nexus/db-queries/src/db/datastore/vmm.rs @@ -27,6 +27,8 @@ use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::PropolisUuid; use std::net::SocketAddr; use uuid::Uuid; @@ -52,16 +54,16 @@ impl DataStore { pub async fn vmm_mark_deleted( &self, opctx: &OpContext, - vmm_id: &Uuid, + vmm_id: &PropolisUuid, ) -> UpdateResult { let valid_states = vec![DbVmmState::Destroyed, DbVmmState::Failed]; let updated = diesel::update(dsl::vmm) - .filter(dsl::id.eq(*vmm_id)) + .filter(dsl::id.eq(vmm_id.into_untyped_uuid())) .filter(dsl::state.eq_any(valid_states)) .filter(dsl::time_deleted.is_null()) .set(dsl::time_deleted.eq(Utc::now())) - .check_if_exists::(*vmm_id) + .check_if_exists::(vmm_id.into_untyped_uuid()) .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { @@ -73,7 +75,7 @@ impl DataStore { e, ErrorHandler::NotFoundByLookup( ResourceType::Vmm, - LookupType::ById(*vmm_id), + LookupType::ById(vmm_id.into_untyped_uuid()), ), ) })?; @@ -85,12 +87,12 @@ impl DataStore { &self, opctx: &OpContext, authz_instance: &authz::Instance, - vmm_id: &Uuid, + vmm_id: &PropolisUuid, ) -> LookupResult { opctx.authorize(authz::Action::Read, authz_instance).await?; let vmm = dsl::vmm - .filter(dsl::id.eq(*vmm_id)) + .filter(dsl::id.eq(vmm_id.into_untyped_uuid())) .filter(dsl::instance_id.eq(authz_instance.id())) .filter(dsl::time_deleted.is_null()) .select(Vmm::as_select()) @@ -101,7 +103,7 @@ impl DataStore { e, ErrorHandler::NotFoundByLookup( ResourceType::Vmm, - LookupType::ById(*vmm_id), + LookupType::ById(vmm_id.into_untyped_uuid()), ), ) })?; @@ -111,15 +113,15 @@ impl DataStore { pub async fn vmm_update_runtime( &self, - vmm_id: &Uuid, + vmm_id: &PropolisUuid, new_runtime: &VmmRuntimeState, ) -> Result { let updated = diesel::update(dsl::vmm) .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(*vmm_id)) + .filter(dsl::id.eq(vmm_id.into_untyped_uuid())) .filter(dsl::state_generation.lt(new_runtime.gen)) .set(new_runtime.clone()) - .check_if_exists::(*vmm_id) + .check_if_exists::(vmm_id.into_untyped_uuid()) .execute_and_check(&*self.pool_connection_unauthorized().await?) .await .map(|r| match r.status { @@ -131,7 +133,7 @@ impl DataStore { e, ErrorHandler::NotFoundByLookup( ResourceType::Vmm, - LookupType::ById(*vmm_id), + LookupType::ById(vmm_id.into_untyped_uuid()), ), ) })?; @@ -150,13 +152,13 @@ impl DataStore { pub async fn vmm_overwrite_addr_for_test( &self, opctx: &OpContext, - vmm_id: &Uuid, + vmm_id: &PropolisUuid, new_addr: SocketAddr, ) -> UpdateResult { let new_ip = ipnetwork::IpNetwork::from(new_addr.ip()); let new_port = new_addr.port(); let vmm = diesel::update(dsl::vmm) - .filter(dsl::id.eq(*vmm_id)) + .filter(dsl::id.eq(vmm_id.into_untyped_uuid())) .set(( dsl::propolis_ip.eq(new_ip), dsl::propolis_port.eq(i32::from(new_port)), diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index 9e2c2f55e9..f8cfd4789c 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -38,6 +38,8 @@ use omicron_common::api::internal::nexus::DownstairsClientStopRequest; use omicron_common::api::internal::nexus::DownstairsClientStopped; use omicron_common::api::internal::nexus::RepairProgress; use omicron_uuid_kinds::DownstairsKind; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::PropolisUuid; use omicron_uuid_kinds::TypedUuid; use omicron_uuid_kinds::UpstairsKind; use omicron_uuid_kinds::UpstairsRepairKind; @@ -58,10 +60,10 @@ pub enum VolumeCheckoutReason { CopyAndModify, /// Check out a Volume to send to Propolis to start an instance. - InstanceStart { vmm_id: Uuid }, + InstanceStart { vmm_id: PropolisUuid }, /// Check out a Volume to send to a migration destination Propolis. - InstanceMigrate { vmm_id: Uuid, target_vmm_id: Uuid }, + InstanceMigrate { vmm_id: PropolisUuid, target_vmm_id: PropolisUuid }, /// Check out a Volume to send to a Pantry (for background maintenance /// operations). @@ -312,7 +314,7 @@ impl DataStore { } (Some(propolis_id), None) => { - if propolis_id != *vmm_id { + if propolis_id != vmm_id.into_untyped_uuid() { return Err(VolumeGetError::CheckoutConditionFailed( format!( "InstanceStart {}: instance {} propolis id {} mismatch", @@ -356,7 +358,7 @@ impl DataStore { let runtime = instance.runtime(); match (runtime.propolis_id, runtime.dst_propolis_id) { (Some(propolis_id), Some(dst_propolis_id)) => { - if propolis_id != *vmm_id || dst_propolis_id != *target_vmm_id { + if propolis_id != vmm_id.into_untyped_uuid() || dst_propolis_id != target_vmm_id.into_untyped_uuid() { return Err(VolumeGetError::CheckoutConditionFailed( format!( "InstanceMigrate {} {}: instance {} propolis id mismatches {} {}", @@ -385,7 +387,7 @@ impl DataStore { (Some(propolis_id), None) => { // XXX is this right? - if propolis_id != *vmm_id { + if propolis_id != vmm_id.into_untyped_uuid() { return Err(VolumeGetError::CheckoutConditionFailed( format!( "InstanceMigrate {} {}: instance {} propolis id {} mismatch", diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 9bb6a44ea7..b48196bde8 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -895,6 +895,7 @@ mod tests { use omicron_test_utils::dev::db::CockroachInstance; use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::OmicronZoneUuid; use sled_agent_client::ZoneKind; use std::net::IpAddr; @@ -1000,8 +1001,8 @@ mod tests { .expect("Failed to create IP Pool range"); } - async fn create_instance(&self, name: &str) -> Uuid { - let instance_id = Uuid::new_v4(); + async fn create_instance(&self, name: &str) -> InstanceUuid { + let instance_id = InstanceUuid::new_v4(); let project_id = Uuid::new_v4(); let instance = Instance::new(instance_id, project_id, &InstanceCreate { identity: IdentityMetadataCreateParams { name: String::from(name).parse().unwrap(), description: format!("instance {}", name) }, @@ -1062,7 +1063,7 @@ mod tests { (0..super::MAX_PORT).step_by(NUM_SOURCE_NAT_PORTS.into()) { let id = Uuid::new_v4(); - let instance_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); let ip = context .db_datastore .allocate_instance_snat_ip( @@ -1079,7 +1080,7 @@ mod tests { } // The next allocation should fail, due to IP exhaustion - let instance_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); let err = context .db_datastore .allocate_instance_snat_ip( @@ -1212,7 +1213,7 @@ mod tests { // Allocate two addresses let mut ips = Vec::with_capacity(2); for (expected_ip, expected_first_port) in external_ips.clone().take(2) { - let instance_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); let ip = context .db_datastore .allocate_instance_snat_ip( @@ -1240,7 +1241,7 @@ mod tests { // Allocate a new one, ensure it's the same as the first one we // released. - let instance_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); let ip = context .db_datastore .allocate_instance_snat_ip( @@ -1267,7 +1268,7 @@ mod tests { // Allocate one more, ensure it's the next chunk after the second one // from the original loop. - let instance_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); let ip = context .db_datastore .allocate_instance_snat_ip( @@ -1579,7 +1580,7 @@ mod tests { context.create_ip_pool("default", range, true).await; // Create one SNAT IP address. - let instance_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); let id = Uuid::new_v4(); let ip = context .db_datastore diff --git a/nexus/db-queries/src/db/queries/instance.rs b/nexus/db-queries/src/db/queries/instance.rs index ed584c6ce6..fded585b67 100644 --- a/nexus/db-queries/src/db/queries/instance.rs +++ b/nexus/db-queries/src/db/queries/instance.rs @@ -21,6 +21,7 @@ use nexus_db_model::{ use omicron_common::api::internal::nexus::{ MigrationRole, MigrationRuntimeState, }; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid, PropolisUuid}; use uuid::Uuid; use crate::db::pool::DbConnection; @@ -154,26 +155,28 @@ where impl InstanceAndVmmUpdate { pub fn new( - instance_id: Uuid, + instance_id: InstanceUuid, new_instance_runtime_state: InstanceRuntimeState, - vmm_id: Uuid, + vmm_id: PropolisUuid, new_vmm_runtime_state: VmmRuntimeState, migration: Option, ) -> Self { let instance_find = Box::new( instance_dsl::instance - .filter(instance_dsl::id.eq(instance_id)) + .filter(instance_dsl::id.eq(instance_id.into_untyped_uuid())) .select(instance_dsl::id), ); let vmm_find = Box::new( - vmm_dsl::vmm.filter(vmm_dsl::id.eq(vmm_id)).select(vmm_dsl::id), + vmm_dsl::vmm + .filter(vmm_dsl::id.eq(vmm_id.into_untyped_uuid())) + .select(vmm_dsl::id), ); let instance_update = Box::new( diesel::update(instance_dsl::instance) .filter(instance_dsl::time_deleted.is_null()) - .filter(instance_dsl::id.eq(instance_id)) + .filter(instance_dsl::id.eq(instance_id.into_untyped_uuid())) .filter( instance_dsl::state_generation .lt(new_instance_runtime_state.gen), @@ -184,7 +187,7 @@ impl InstanceAndVmmUpdate { let vmm_update = Box::new( diesel::update(vmm_dsl::vmm) .filter(vmm_dsl::time_deleted.is_null()) - .filter(vmm_dsl::id.eq(vmm_id)) + .filter(vmm_dsl::id.eq(vmm_id.into_untyped_uuid())) .filter(vmm_dsl::state_generation.lt(new_vmm_runtime_state.gen)) .set(new_vmm_runtime_state), ); @@ -210,7 +213,8 @@ impl InstanceAndVmmUpdate { diesel::update(migration_dsl::migration) .filter(migration_dsl::id.eq(migration_id)) .filter( - migration_dsl::target_propolis_id.eq(vmm_id), + migration_dsl::target_propolis_id + .eq(vmm_id.into_untyped_uuid()), ) .filter(migration_dsl::target_gen.lt(gen)) .set(( @@ -223,7 +227,8 @@ impl InstanceAndVmmUpdate { diesel::update(migration_dsl::migration) .filter(migration_dsl::id.eq(migration_id)) .filter( - migration_dsl::source_propolis_id.eq(vmm_id), + migration_dsl::source_propolis_id + .eq(vmm_id.into_untyped_uuid()), ) .filter(migration_dsl::source_gen.lt(gen)) .set(( diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index e7ce4ca61a..3a5648cd86 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -1873,6 +1873,8 @@ mod tests { use omicron_common::api::external::MacAddr; use omicron_test_utils::dev; use omicron_test_utils::dev::db::CockroachInstance; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::InstanceUuid; use oxnet::Ipv4Net; use oxnet::Ipv6Net; use std::collections::HashSet; @@ -1890,7 +1892,7 @@ mod tests { project_id: Uuid, db_datastore: &DataStore, ) -> Instance { - let instance_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); // Use the first chunk of the UUID as the name, to avoid conflicts. // Start with a lower ascii character to satisfy the name constraints. let name = format!("a{}", instance_id)[..9].parse().unwrap(); @@ -1950,7 +1952,10 @@ mod tests { ..instance.runtime_state.clone() }; let res = db_datastore - .instance_update_runtime(&instance.id(), &new_runtime) + .instance_update_runtime( + &InstanceUuid::from_untyped_uuid(instance.id()), + &new_runtime, + ) .await; assert!(matches!(res, Ok(true)), "Failed to change instance state"); instance.runtime_state = new_runtime; @@ -2177,7 +2182,7 @@ mod tests { let context = TestContext::new("test_insert_running_instance_fails", 2).await; let instance = context.create_running_instance().await; - let instance_id = instance.id(); + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); let requested_ip = "172.30.0.5".parse().unwrap(); let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), @@ -2206,7 +2211,7 @@ mod tests { async fn test_insert_request_exact_ip() { let context = TestContext::new("test_insert_request_exact_ip", 2).await; let instance = context.create_stopped_instance().await; - let instance_id = instance.id(); + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); let requested_ip = "172.30.0.5".parse().unwrap(); let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), @@ -2242,7 +2247,7 @@ mod tests { TestContext::new("test_insert_no_instance_fails", 2).await; let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - Uuid::new_v4(), + InstanceUuid::new_v4(), context.net1.subnets[0].clone(), IdentityMetadataCreateParams { name: "interface-b".parse().unwrap(), @@ -2277,9 +2282,10 @@ mod tests { for (i, expected_address) in addresses.take(2).enumerate() { let instance = context.create_stopped_instance().await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net1.subnets[0].clone(), IdentityMetadataCreateParams { name: format!("interface-{}", i).parse().unwrap(), @@ -2315,12 +2321,15 @@ mod tests { TestContext::new("test_insert_request_same_ip_fails", 2).await; let instance = context.create_stopped_instance().await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); let new_instance = context.create_stopped_instance().await; + let new_instance_id = + InstanceUuid::from_untyped_uuid(new_instance.id()); // Insert an interface on the first instance. let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net1.subnets[0].clone(), IdentityMetadataCreateParams { name: "interface-c".parse().unwrap(), @@ -2339,7 +2348,7 @@ mod tests { // other parameters are valid. let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - new_instance.id(), + new_instance_id, context.net1.subnets[0].clone(), IdentityMetadataCreateParams { name: "interface-c".parse().unwrap(), @@ -2575,9 +2584,10 @@ mod tests { let context = TestContext::new("test_insert_with_duplicate_name_fails", 2).await; let instance = context.create_stopped_instance().await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net1.subnets[0].clone(), IdentityMetadataCreateParams { name: "interface-c".parse().unwrap(), @@ -2596,7 +2606,7 @@ mod tests { .expect("Failed to insert interface"); let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net1.subnets[1].clone(), IdentityMetadataCreateParams { name: "interface-c".parse().unwrap(), @@ -2624,9 +2634,10 @@ mod tests { let context = TestContext::new("test_insert_same_vpc_subnet_fails", 2).await; let instance = context.create_stopped_instance().await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net1.subnets[0].clone(), IdentityMetadataCreateParams { name: "interface-c".parse().unwrap(), @@ -2642,7 +2653,7 @@ mod tests { .expect("Failed to insert interface"); let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net1.subnets[0].clone(), IdentityMetadataCreateParams { name: "interface-d".parse().unwrap(), @@ -2667,9 +2678,10 @@ mod tests { let context = TestContext::new("test_insert_same_interface_fails", 2).await; let instance = context.create_stopped_instance().await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net1.subnets[0].clone(), IdentityMetadataCreateParams { name: "interface-c".parse().unwrap(), @@ -2708,9 +2720,10 @@ mod tests { let context = TestContext::new("test_insert_multiple_vpcs_fails", 2).await; let instance = context.create_stopped_instance().await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net1.subnets[0].clone(), IdentityMetadataCreateParams { name: "interface-c".parse().unwrap(), @@ -2728,7 +2741,7 @@ mod tests { for addr in [Some(expected_address), None] { let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net2.subnets[0].clone(), IdentityMetadataCreateParams { name: "interface-a".parse().unwrap(), @@ -2761,9 +2774,10 @@ mod tests { let n_interfaces = context.net1.available_ipv4_addresses()[0]; for _ in 0..n_interfaces { let instance = context.create_stopped_instance().await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net1.subnets[0].clone(), IdentityMetadataCreateParams { name: "interface-c".parse().unwrap(), @@ -2789,9 +2803,10 @@ mod tests { &context.db_datastore, ) .await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net1.subnets[0].clone(), IdentityMetadataCreateParams { name: "interface-d".parse().unwrap(), @@ -2819,10 +2834,11 @@ mod tests { TestContext::new("test_insert_multiple_vpc_subnets_succeeds", 2) .await; let instance = context.create_stopped_instance().await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); for (i, subnet) in context.net1.subnets.iter().enumerate() { let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, subnet.clone(), IdentityMetadataCreateParams { name: format!("if{}", i).parse().unwrap(), @@ -2885,11 +2901,12 @@ mod tests { ) .await; let instance = context.create_stopped_instance().await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); for slot in 0..MAX_NICS_PER_INSTANCE { let subnet = &context.net1.subnets[slot]; let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, subnet.clone(), IdentityMetadataCreateParams { name: format!("interface-{}", slot).parse().unwrap(), @@ -2924,7 +2941,7 @@ mod tests { // The next one should fail let interface = IncompleteNetworkInterface::new_instance( Uuid::new_v4(), - instance.id(), + instance_id, context.net1.subnets.last().unwrap().clone(), IdentityMetadataCreateParams { name: "interface-8".parse().unwrap(), diff --git a/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs b/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs index b3c1a569b0..fd86912107 100644 --- a/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs +++ b/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs @@ -19,6 +19,8 @@ use diesel::result::Error as DieselError; use diesel::sql_types; use omicron_common::api::external; use omicron_common::api::external::MessagePair; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; type AllColumnsOfVirtualResource = AllColumnsOf; @@ -458,13 +460,13 @@ FROM } pub fn new_insert_instance( - id: uuid::Uuid, + id: InstanceUuid, cpus_diff: i64, ram_diff: ByteCount, project_id: uuid::Uuid, ) -> TypedSqlQuery> { let mut provision = VirtualProvisioningResource::new( - id, + id.into_untyped_uuid(), ResourceTypeProvisioned::Instance, ); provision.cpus_provisioned = cpus_diff; @@ -474,7 +476,7 @@ FROM } pub fn new_delete_instance( - id: uuid::Uuid, + id: InstanceUuid, max_instance_gen: i64, cpus_diff: i64, ram_diff: ByteCount, @@ -482,7 +484,7 @@ FROM ) -> TypedSqlQuery> { Self::apply_update( UpdateKind::DeleteInstance { - id, + id: id.into_untyped_uuid(), max_instance_gen, cpus_diff, ram_diff, @@ -544,7 +546,7 @@ mod test { #[tokio::test] async fn expectorate_query_insert_instance() { - let id = Uuid::nil(); + let id = InstanceUuid::nil(); let project_id = Uuid::nil(); let cpus_diff = 4; let ram_diff = 2048.try_into().unwrap(); @@ -561,7 +563,7 @@ mod test { #[tokio::test] async fn expectorate_query_delete_instance() { - let id = Uuid::nil(); + let id = InstanceUuid::nil(); let project_id = Uuid::nil(); let cpus_diff = 4; let ram_diff = 2048.try_into().unwrap(); @@ -649,7 +651,7 @@ mod test { let pool = crate::db::Pool::new(&logctx.log, &cfg); let conn = pool.pool().get().await.unwrap(); - let id = Uuid::nil(); + let id = InstanceUuid::nil(); let project_id = Uuid::nil(); let cpus_diff = 16.try_into().unwrap(); let ram_diff = 2048.try_into().unwrap(); @@ -675,7 +677,7 @@ mod test { let pool = crate::db::Pool::new(&logctx.log, &cfg); let conn = pool.pool().get().await.unwrap(); - let id = Uuid::nil(); + let id = InstanceUuid::nil(); let max_instance_gen = 0; let project_id = Uuid::nil(); let cpus_diff = 16.try_into().unwrap(); diff --git a/nexus/src/app/background/abandoned_vmm_reaper.rs b/nexus/src/app/background/abandoned_vmm_reaper.rs index 4685012e28..3883185d9f 100644 --- a/nexus/src/app/background/abandoned_vmm_reaper.rs +++ b/nexus/src/app/background/abandoned_vmm_reaper.rs @@ -39,6 +39,7 @@ use nexus_db_model::Vmm; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::pagination::Paginator; use nexus_db_queries::db::DataStore; +use omicron_uuid_kinds::{GenericUuid, PropolisUuid}; use std::num::NonZeroU32; use std::sync::Arc; @@ -108,10 +109,14 @@ impl AbandonedVmmReaper { slog::debug!(opctx.log, "Found abandoned VMMs"; "count" => vmms.len()); for vmm in vmms { - let vmm_id = vmm.id; + let vmm_id = PropolisUuid::from_untyped_uuid(vmm.id); slog::trace!(opctx.log, "Deleting abandoned VMM"; "vmm" => %vmm_id); // Attempt to remove the abandoned VMM's sled resource reservation. - match self.datastore.sled_reservation_delete(opctx, vmm_id).await { + match self + .datastore + .sled_reservation_delete(opctx, vmm_id.into_untyped_uuid()) + .await + { Ok(_) => { slog::trace!( opctx.log, @@ -235,7 +240,7 @@ mod tests { const PROJECT_NAME: &str = "carcosa"; struct TestFixture { - destroyed_vmm_id: Uuid, + destroyed_vmm_id: PropolisUuid, } impl TestFixture { @@ -255,12 +260,12 @@ mod tests { ) .await; - let destroyed_vmm_id = Uuid::new_v4(); + let destroyed_vmm_id = PropolisUuid::new_v4(); datastore .vmm_insert( &opctx, dbg!(Vmm { - id: destroyed_vmm_id, + id: destroyed_vmm_id.into_untyped_uuid(), time_created: Utc::now(), time_deleted: None, instance_id: instance.identity.id, @@ -287,7 +292,7 @@ mod tests { dbg!(datastore .sled_reservation_create( &opctx, - destroyed_vmm_id, + destroyed_vmm_id.into_untyped_uuid(), SledResourceKind::Instance, resources.clone(), constraints, @@ -308,7 +313,9 @@ mod tests { let conn = datastore.pool_connection_for_tests().await.unwrap(); let fetched_vmm = vmm_dsl::vmm - .filter(vmm_dsl::id.eq(self.destroyed_vmm_id)) + .filter( + vmm_dsl::id.eq(self.destroyed_vmm_id.into_untyped_uuid()), + ) .filter(vmm_dsl::time_deleted.is_null()) .select(Vmm::as_select()) .first_async::(&*conn) @@ -321,7 +328,10 @@ mod tests { ); let fetched_sled_resource = sled_resource_dsl::sled_resource - .filter(sled_resource_dsl::id.eq(self.destroyed_vmm_id)) + .filter( + sled_resource_dsl::id + .eq(self.destroyed_vmm_id.into_untyped_uuid()), + ) .select(SledResource::as_select()) .first_async::(&*conn) .await @@ -439,7 +449,10 @@ mod tests { assert!(!abandoned_vmms.is_empty()); datastore - .sled_reservation_delete(&opctx, fixture.destroyed_vmm_id) + .sled_reservation_delete( + &opctx, + fixture.destroyed_vmm_id.into_untyped_uuid(), + ) .await .expect( "simulate another nexus marking the sled reservation deleted", diff --git a/nexus/src/app/background/instance_watcher.rs b/nexus/src/app/background/instance_watcher.rs index c4eda68594..1b10605c5e 100644 --- a/nexus/src/app/background/instance_watcher.rs +++ b/nexus/src/app/background/instance_watcher.rs @@ -18,6 +18,8 @@ use nexus_types::identity::Asset; use nexus_types::identity::Resource; use omicron_common::api::external::InstanceState; use omicron_common::api::internal::nexus::SledInstanceState; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use oximeter::types::ProducerRegistry; use sled_agent_client::Client as SledAgentClient; use std::borrow::Cow; @@ -79,7 +81,11 @@ impl InstanceWatcher { async move { slog::trace!(opctx.log, "checking on instance..."); - let rsp = client.instance_get_state(&target.instance_id).await; + let rsp = client + .instance_get_state(&InstanceUuid::from_untyped_uuid( + target.instance_id, + )) + .await; let mut check = Check { target, outcome: Default::default(), result: Ok(()) }; let state = match rsp { @@ -154,7 +160,7 @@ impl InstanceWatcher { &opctx, &opctx, &opctx.log, - &target.instance_id, + &InstanceUuid::from_untyped_uuid(target.instance_id), &new_runtime_state, v2p_notification_tx, ) diff --git a/nexus/src/app/external_ip.rs b/nexus/src/app/external_ip.rs index 62b8c4cbd6..06dfc7732f 100644 --- a/nexus/src/app/external_ip.rs +++ b/nexus/src/app/external_ip.rs @@ -24,6 +24,8 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; impl super::Nexus { pub(crate) async fn instance_list_external_ips( @@ -35,7 +37,10 @@ impl super::Nexus { instance_lookup.lookup_for(authz::Action::Read).await?; Ok(self .db_datastore - .instance_lookup_external_ips(opctx, authz_instance.id()) + .instance_lookup_external_ips( + opctx, + InstanceUuid::from_untyped_uuid(authz_instance.id()), + ) .await? .into_iter() .filter_map(|ip| { diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 943665cab3..517fbf218a 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -47,6 +47,10 @@ use omicron_common::api::external::UpdateResult; use omicron_common::api::internal::nexus; use omicron_common::api::internal::nexus::VmmState; use omicron_common::api::internal::shared::SourceNatConfig; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; +use omicron_uuid_kinds::PropolisUuid; +use omicron_uuid_kinds::SledUuid; use propolis_client::support::tungstenite::protocol::frame::coding::CloseCode; use propolis_client::support::tungstenite::protocol::CloseFrame; use propolis_client::support::tungstenite::Message as WebSocketMessage; @@ -174,14 +178,14 @@ enum InstanceStateChangeRequestAction { /// Request the appropriate state change from the sled with the specified /// UUID. - SendToSled(Uuid), + SendToSled(SledUuid), } /// What is the higher level operation that is calling /// `instance_ensure_registered`? pub(crate) enum InstanceRegisterReason { - Start { vmm_id: Uuid }, - Migrate { vmm_id: Uuid, target_vmm_id: Uuid }, + Start { vmm_id: PropolisUuid }, + Migrate { vmm_id: PropolisUuid, target_vmm_id: PropolisUuid }, } impl super::Nexus { @@ -534,8 +538,8 @@ impl super::Nexus { pub(crate) async fn instance_set_migration_ids( &self, opctx: &OpContext, - instance_id: Uuid, - sled_id: Uuid, + instance_id: InstanceUuid, + sled_id: SledUuid, prev_instance_runtime: &db::model::InstanceRuntimeState, migration_params: InstanceMigrationSourceParams, ) -> UpdateResult { @@ -543,7 +547,7 @@ impl super::Nexus { assert!(prev_instance_runtime.dst_propolis_id.is_none()); let (.., authz_instance) = LookupPath::new(opctx, &self.db_datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -613,8 +617,8 @@ impl super::Nexus { /// ID set. pub(crate) async fn instance_clear_migration_ids( &self, - instance_id: Uuid, - sled_id: Uuid, + instance_id: InstanceUuid, + sled_id: SledUuid, prev_instance_runtime: &db::model::InstanceRuntimeState, ) -> Result<(), Error> { assert!(prev_instance_runtime.migration_id.is_some()); @@ -682,7 +686,9 @@ impl super::Nexus { if inner.instance_unhealthy() { let _ = self .mark_instance_failed( - &authz_instance.id(), + &InstanceUuid::from_untyped_uuid( + authz_instance.id(), + ), state.instance().runtime(), inner, ) @@ -786,7 +792,9 @@ impl super::Nexus { if inner.instance_unhealthy() { let _ = self .mark_instance_failed( - &authz_instance.id(), + &InstanceUuid::from_untyped_uuid( + authz_instance.id(), + ), state.instance().runtime(), inner, ) @@ -807,19 +815,19 @@ impl super::Nexus { &self, opctx: &OpContext, authz_instance: &authz::Instance, - sled_id: &Uuid, + sled_id: &SledUuid, ) -> Result, InstanceStateChangeError> { opctx.authorize(authz::Action::Modify, authz_instance).await?; let sa = self.sled_client(&sled_id).await?; - sa.instance_unregister(&authz_instance.id()) - .await - .map(|res| res.into_inner().updated_runtime.map(Into::into)) - .map_err(|e| { - InstanceStateChangeError::SledAgent(SledAgentInstancePutError( - e, - )) - }) + sa.instance_unregister(&InstanceUuid::from_untyped_uuid( + authz_instance.id(), + )) + .await + .map(|res| res.into_inner().updated_runtime.map(Into::into)) + .map_err(|e| { + InstanceStateChangeError::SledAgent(SledAgentInstancePutError(e)) + }) } /// Determines the action to take on an instance's active VMM given a @@ -855,7 +863,7 @@ impl super::Nexus { // instance's current sled agent. If there is none, the request needs to // be handled specially based on its type. let sled_id = if let Some(vmm) = vmm_state { - vmm.sled_id + SledUuid::from_untyped_uuid(vmm.sled_id) } else { match effective_state { // If there's no active sled because the instance is stopped, @@ -974,7 +982,7 @@ impl super::Nexus { requested: InstanceStateChangeRequest, ) -> Result<(), InstanceStateChangeError> { opctx.authorize(authz::Action::Modify, authz_instance).await?; - let instance_id = authz_instance.id(); + let instance_id = InstanceUuid::from_untyped_uuid(authz_instance.id()); match self.select_runtime_change_action( prev_instance_state, @@ -1021,7 +1029,7 @@ impl super::Nexus { opctx: &OpContext, authz_instance: &authz::Instance, db_instance: &db::model::Instance, - propolis_id: &Uuid, + propolis_id: &PropolisUuid, initial_vmm: &db::model::Vmm, operation: InstanceRegisterReason, ) -> Result<(), Error> { @@ -1111,7 +1119,10 @@ impl super::Nexus { // Collect the external IPs for the instance. let (snat_ip, external_ips): (Vec<_>, Vec<_>) = self .db_datastore - .instance_lookup_external_ips(&opctx, authz_instance.id()) + .instance_lookup_external_ips( + &opctx, + InstanceUuid::from_untyped_uuid(authz_instance.id()), + ) .await? .into_iter() .partition(|ip| ip.kind == IpKind::SNat); @@ -1264,10 +1275,13 @@ impl super::Nexus { )), }; - let sa = self.sled_client(&initial_vmm.sled_id).await?; + let instance_id = InstanceUuid::from_untyped_uuid(db_instance.id()); + let sa = self + .sled_client(&SledUuid::from_untyped_uuid(initial_vmm.sled_id)) + .await?; let instance_register_result = sa .instance_register( - &db_instance.id(), + &instance_id, &sled_agent_client::types::InstanceEnsureBody { hardware: instance_hardware, instance_runtime: db_instance.runtime().clone().into(), @@ -1287,14 +1301,13 @@ impl super::Nexus { match instance_register_result { Ok(state) => { - self.write_returned_instance_state(&db_instance.id(), state) - .await?; + self.write_returned_instance_state(&instance_id, state).await?; } Err(e) => { if e.instance_unhealthy() { let _ = self .mark_instance_failed( - &db_instance.id(), + &instance_id, db_instance.runtime(), &e, ) @@ -1322,7 +1335,7 @@ impl super::Nexus { /// owing to an outdated generation number) will return `Ok`. async fn write_returned_instance_state( &self, - instance_id: &Uuid, + instance_id: &InstanceUuid, state: Option, ) -> Result { slog::debug!(&self.log, @@ -1365,7 +1378,7 @@ impl super::Nexus { /// agent instance API, supplied in `reason`. pub(crate) async fn mark_instance_failed( &self, - instance_id: &Uuid, + instance_id: &InstanceUuid, prev_instance_runtime: &db::model::InstanceRuntimeState, reason: &SledAgentInstancePutError, ) -> Result<(), Error> { @@ -1523,7 +1536,7 @@ impl super::Nexus { pub(crate) async fn notify_instance_updated( &self, opctx: &OpContext, - instance_id: &Uuid, + instance_id: &InstanceUuid, new_runtime_state: &nexus::SledInstanceState, ) -> Result<(), Error> { notify_instance_updated( @@ -1973,7 +1986,7 @@ pub(crate) async fn notify_instance_updated( opctx_alloc: &OpContext, opctx: &OpContext, log: &slog::Logger, - instance_id: &Uuid, + instance_id: &InstanceUuid, new_runtime_state: &nexus::SledInstanceState, v2p_notification_tx: tokio::sync::watch::Sender<()>, ) -> Result, Error> { @@ -1989,7 +2002,7 @@ pub(crate) async fn notify_instance_updated( // Grab the current state of the instance in the DB to reason about // whether this update is stale or not. let (.., authz_instance, db_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(*instance_id) + .instance_id(instance_id.into_untyped_uuid()) .fetch() .await?; @@ -2054,8 +2067,13 @@ pub(crate) async fn notify_instance_updated( // an instance's state changes. // // Tracked in https://github.com/oxidecomputer/omicron/issues/3742. - super::oximeter::unassign_producer(datastore, log, opctx, instance_id) - .await?; + super::oximeter::unassign_producer( + datastore, + log, + opctx, + &instance_id.into_untyped_uuid(), + ) + .await?; } // Write the new instance and VMM states back to CRDB. This needs to be @@ -2136,7 +2154,9 @@ pub(crate) async fn notify_instance_updated( "instance_id" => %instance_id, "propolis_id" => %propolis_id); - datastore.sled_reservation_delete(opctx, propolis_id).await?; + datastore + .sled_reservation_delete(opctx, propolis_id.into_untyped_uuid()) + .await?; if !datastore.vmm_mark_deleted(opctx, &propolis_id).await? { warn!(log, "failed to mark vmm record as deleted"; diff --git a/nexus/src/app/instance_network.rs b/nexus/src/app/instance_network.rs index 3c607bae78..b1ad2bb6fc 100644 --- a/nexus/src/app/instance_network.rs +++ b/nexus/src/app/instance_network.rs @@ -20,6 +20,8 @@ use omicron_common::api::external::Error; use omicron_common::api::internal::nexus; use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::SwitchLocation; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use oxnet::Ipv4Net; use oxnet::Ipv6Net; use std::collections::HashSet; @@ -63,7 +65,7 @@ impl super::Nexus { pub(crate) async fn instance_ensure_dpd_config( &self, opctx: &OpContext, - instance_id: Uuid, + instance_id: InstanceUuid, sled_ip_address: &std::net::SocketAddrV6, ip_filter: Option, ) -> Result, Error> { @@ -266,7 +268,7 @@ pub(crate) async fn ensure_updated_instance_network_config( new_instance_state: &nexus::InstanceRuntimeState, v2p_notification_tx: tokio::sync::watch::Sender<()>, ) -> Result<(), Error> { - let instance_id = authz_instance.id(); + let instance_id = InstanceUuid::from_untyped_uuid(authz_instance.id()); // If this instance update is stale, do nothing, since the superseding // update may have allowed the instance's location to change further. @@ -335,7 +337,8 @@ pub(crate) async fn ensure_updated_instance_network_config( let migration_retired = prev_instance_state.migration_id.is_some() && new_instance_state.migration_id.is_none(); - if (prev_instance_state.propolis_id == new_instance_state.propolis_id) + if (prev_instance_state.propolis_id + == new_instance_state.propolis_id.map(GenericUuid::into_untyped_uuid)) && !migration_retired { debug!(log, "instance didn't move, won't touch network config"; @@ -438,7 +441,7 @@ pub(crate) async fn instance_ensure_dpd_config( resolver: &internal_dns::resolver::Resolver, opctx: &OpContext, opctx_alloc: &OpContext, - instance_id: Uuid, + instance_id: InstanceUuid, sled_ip_address: &std::net::SocketAddrV6, ip_filter: Option, ) -> Result, Error> { @@ -446,7 +449,7 @@ pub(crate) async fn instance_ensure_dpd_config( "instance_id" => %instance_id); let (.., authz_instance) = LookupPath::new(opctx, datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::ListChildren) .await?; @@ -724,7 +727,7 @@ async fn clear_instance_networking_state( log, resolver, opctx_alloc, - Some(authz_instance.id()), + Some(InstanceUuid::from_untyped_uuid(authz_instance.id())), true, ) .await @@ -760,7 +763,7 @@ pub(crate) async fn instance_delete_dpd_config( opctx_alloc: &OpContext, authz_instance: &authz::Instance, ) -> Result<(), Error> { - let instance_id = authz_instance.id(); + let instance_id = InstanceUuid::from_untyped_uuid(authz_instance.id()); info!(log, "deleting instance dpd configuration"; "instance_id" => %instance_id); @@ -949,7 +952,7 @@ async fn notify_dendrite_nat_state( log: &slog::Logger, resolver: &internal_dns::resolver::Resolver, opctx_alloc: &OpContext, - instance_id: Option, + instance_id: Option, fail_fast: bool, ) -> Result<(), Error> { // Querying boundary switches also requires fleet access and the use of the diff --git a/nexus/src/app/network_interface.rs b/nexus/src/app/network_interface.rs index d1fa87073e..fef8e9d1ac 100644 --- a/nexus/src/app/network_interface.rs +++ b/nexus/src/app/network_interface.rs @@ -11,6 +11,8 @@ use omicron_common::api::external::NameOrId; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use uuid::Uuid; use nexus_db_queries::authz; @@ -87,7 +89,7 @@ impl super::Nexus { let interface_id = Uuid::new_v4(); let interface = db::model::IncompleteNetworkInterface::new_instance( interface_id, - authz_instance.id(), + InstanceUuid::from_untyped_uuid(authz_instance.id()), db_subnet, params.identity.clone(), params.ip, diff --git a/nexus/src/app/sagas/instance_common.rs b/nexus/src/app/sagas/instance_common.rs index 14263df0ff..6e431aaca7 100644 --- a/nexus/src/app/sagas/instance_common.rs +++ b/nexus/src/app/sagas/instance_common.rs @@ -16,9 +16,9 @@ use nexus_db_queries::authz; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::{authn, context::OpContext, db, db::DataStore}; use omicron_common::api::external::{Error, NameOrId}; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid, PropolisUuid, SledUuid}; use serde::{Deserialize, Serialize}; use steno::ActionError; -use uuid::Uuid; use super::NexusActionContext; @@ -33,7 +33,7 @@ const DEFAULT_PROPOLIS_PORT: u16 = 12400; /// `propolis_id`. pub async fn reserve_vmm_resources( nexus: &Nexus, - propolis_id: Uuid, + propolis_id: PropolisUuid, ncpus: u32, guest_memory: ByteCount, constraints: SledReservationConstraints, @@ -68,7 +68,7 @@ pub async fn reserve_vmm_resources( let resource = nexus .reserve_on_random_sled( - propolis_id, + propolis_id.into_untyped_uuid(), nexus_db_model::SledResourceKind::Instance, resources, constraints, @@ -88,9 +88,9 @@ pub async fn reserve_vmm_resources( pub async fn create_and_insert_vmm_record( datastore: &DataStore, opctx: &OpContext, - instance_id: Uuid, - propolis_id: Uuid, - sled_id: Uuid, + instance_id: InstanceUuid, + propolis_id: PropolisUuid, + sled_id: SledUuid, propolis_ip: Ipv6Addr, initial_state: nexus_db_model::VmmInitialState, ) -> Result { @@ -131,8 +131,9 @@ pub async fn unwind_vmm_record( gen: prev_record.runtime.gen.next().into(), }; - datastore.vmm_update_runtime(&prev_record.id, &new_runtime).await?; - datastore.vmm_mark_deleted(&opctx, &prev_record.id).await?; + let prev_id = PropolisUuid::from_untyped_uuid(prev_record.id); + datastore.vmm_update_runtime(&prev_id, &new_runtime).await?; + datastore.vmm_mark_deleted(&opctx, &prev_id).await?; Ok(()) } @@ -141,7 +142,7 @@ pub async fn unwind_vmm_record( pub(super) async fn allocate_vmm_ipv6( opctx: &OpContext, datastore: &DataStore, - sled_uuid: Uuid, + sled_uuid: SledUuid, ) -> Result { datastore .next_ipv6_address(opctx, sled_uuid) @@ -217,7 +218,7 @@ pub async fn instance_ip_get_instance_state( serialized_authn: &authn::saga::Serialized, authz_instance: &authz::Instance, verb: &str, -) -> Result, ActionError> { +) -> Result, ActionError> { // XXX: we can get instance state (but not sled ID) in same transaction // as attach (but not detach) wth current design. We need to re-query // for sled ID anyhow, so keep consistent between attach/detach. @@ -351,7 +352,7 @@ pub async fn instance_ip_add_nat( sagactx: &NexusActionContext, serialized_authn: &authn::saga::Serialized, authz_instance: &authz::Instance, - sled_uuid: Option, + sled_uuid: Option, target_ip: ModifyStateForExternalIp, ) -> Result, ActionError> { let osagactx = sagactx.user_data(); @@ -376,7 +377,7 @@ pub async fn instance_ip_add_nat( // Querying sleds requires fleet access; use the instance allocator context // for this. let (.., sled) = LookupPath::new(&osagactx.nexus().opctx_alloc, &datastore) - .sled_id(sled_uuid) + .sled_id(sled_uuid.into_untyped_uuid()) .fetch() .await .map_err(ActionError::action_failed)?; @@ -385,7 +386,7 @@ pub async fn instance_ip_add_nat( .nexus() .instance_ensure_dpd_config( &opctx, - authz_instance.id(), + InstanceUuid::from_untyped_uuid(authz_instance.id()), &sled.address(), Some(target_ip.id), ) @@ -407,7 +408,7 @@ pub async fn instance_ip_add_nat( pub async fn instance_ip_remove_nat( sagactx: &NexusActionContext, serialized_authn: &authn::saga::Serialized, - sled_uuid: Option, + sled_uuid: Option, target_ip: ModifyStateForExternalIp, ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); @@ -445,7 +446,7 @@ pub async fn instance_ip_remove_nat( pub async fn instance_ip_add_opte( sagactx: &NexusActionContext, authz_instance: &authz::Instance, - sled_uuid: Option, + sled_uuid: Option, target_ip: ModifyStateForExternalIp, ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); @@ -476,7 +477,10 @@ pub async fn instance_ip_add_opte( "sled agent client went away mid-attach/detach", )) })? - .instance_put_external_ip(&authz_instance.id(), &sled_agent_body) + .instance_put_external_ip( + &InstanceUuid::from_untyped_uuid(authz_instance.id()), + &sled_agent_body, + ) .await .map_err(|e| { ActionError::action_failed(match e { @@ -500,7 +504,7 @@ pub async fn instance_ip_add_opte( pub async fn instance_ip_remove_opte( sagactx: &NexusActionContext, authz_instance: &authz::Instance, - sled_uuid: Option, + sled_uuid: Option, target_ip: ModifyStateForExternalIp, ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); @@ -531,7 +535,10 @@ pub async fn instance_ip_remove_opte( "sled agent client went away mid-attach/detach", )) })? - .instance_delete_external_ip(&authz_instance.id(), &sled_agent_body) + .instance_delete_external_ip( + &InstanceUuid::from_untyped_uuid(authz_instance.id()), + &sled_agent_body, + ) .await .map_err(|e| { ActionError::action_failed(match e { diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index f336a01f0c..ffbd5ff2f5 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -22,6 +22,7 @@ use omicron_common::api::external::Name; use omicron_common::api::external::NameOrId; use omicron_common::api::external::{Error, InternalContext}; use omicron_common::api::internal::shared::SwitchLocation; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use ref_cast::RefCast; use serde::Deserialize; use serde::Serialize; @@ -51,14 +52,14 @@ pub(crate) struct Params { struct NetParams { saga_params: Params, which: usize, - instance_id: Uuid, + instance_id: InstanceUuid, new_id: Uuid, } #[derive(Debug, Deserialize, Serialize)] struct NetworkConfigParams { saga_params: Params, - instance_id: Uuid, + instance_id: InstanceUuid, which: usize, switch_location: SwitchLocation, } @@ -67,7 +68,7 @@ struct NetworkConfigParams { struct DiskAttachParams { serialized_authn: authn::saga::Serialized, project_id: Uuid, - instance_id: Uuid, + instance_id: InstanceUuid, attach_params: InstanceDiskAttachment, } @@ -122,7 +123,7 @@ impl NexusSaga for SagaInstanceCreate { ) -> Result { // Pre-create the instance ID so that it can be supplied as a constant // parameter to the subsagas that create and attach devices. - let instance_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); builder.append(Node::constant( "instance_id", @@ -307,7 +308,7 @@ async fn sic_associate_ssh_keys( &sagactx, &saga_params.serialized_authn, ); - let instance_id = sagactx.lookup::("instance_id")?; + let instance_id = sagactx.lookup::("instance_id")?; // Gather the SSH public keys of the actor making the request so // that they may be injected into the new image via cloud-init. @@ -359,7 +360,7 @@ async fn sic_associate_ssh_keys_undo( &sagactx, &saga_params.serialized_authn, ); - let instance_id = sagactx.lookup::("instance_id")?; + let instance_id = sagactx.lookup::("instance_id")?; datastore .instance_ssh_keys_delete(&opctx, instance_id) .await @@ -423,7 +424,7 @@ async fn sic_create_network_interface_undo( ); let interface_id = repeat_saga_params.new_id; let (.., authz_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; @@ -469,7 +470,7 @@ async fn sic_create_network_interface_undo( async fn create_custom_network_interface( sagactx: &NexusActionContext, saga_params: &Params, - instance_id: Uuid, + instance_id: InstanceUuid, interface_id: Uuid, interface_params: ¶ms::InstanceNetworkInterfaceCreate, ) -> Result<(), ActionError> { @@ -482,7 +483,7 @@ async fn create_custom_network_interface( // Lookup authz objects, used in the call to create the NIC itself. let (.., authz_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::CreateChild) .await .map_err(ActionError::action_failed)?; @@ -544,7 +545,7 @@ async fn create_default_primary_network_interface( sagactx: &NexusActionContext, saga_params: &Params, nic_index: usize, - instance_id: Uuid, + instance_id: InstanceUuid, interface_id: Uuid, ) -> Result<(), ActionError> { // We're statically creating up to MAX_NICS_PER_INSTANCE saga nodes, but @@ -588,7 +589,7 @@ async fn create_default_primary_network_interface( // Lookup authz objects, used in the call to actually create the NIC. let (.., authz_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::CreateChild) .await .map_err(ActionError::action_failed)?; @@ -643,7 +644,7 @@ async fn sic_allocate_instance_snat_ip( &sagactx, &saga_params.serialized_authn, ); - let instance_id = sagactx.lookup::("instance_id")?; + let instance_id = sagactx.lookup::("instance_id")?; let ip_id = sagactx.lookup::("snat_ip_id")?; let (.., pool) = datastore @@ -886,7 +887,7 @@ async fn ensure_instance_disk_attach_state( let (.., authz_instance, _db_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .fetch() .await .map_err(ActionError::action_failed)?; @@ -929,7 +930,7 @@ async fn sic_create_instance_record( &sagactx, ¶ms.serialized_authn, ); - let instance_id = sagactx.lookup::("instance_id")?; + let instance_id = sagactx.lookup::("instance_id")?; let new_instance = db::model::Instance::new( instance_id, @@ -962,7 +963,7 @@ async fn sic_delete_instance_record( &sagactx, ¶ms.serialized_authn, ); - let instance_id = sagactx.lookup::("instance_id")?; + let instance_id = sagactx.lookup::("instance_id")?; let instance_name = sagactx .lookup::("instance_record")? .name() @@ -1024,7 +1025,7 @@ async fn sic_move_to_stopped( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); - let instance_id = sagactx.lookup::("instance_id")?; + let instance_id = sagactx.lookup::("instance_id")?; let instance_record = sagactx.lookup::("instance_record")?; diff --git a/nexus/src/app/sagas/instance_ip_attach.rs b/nexus/src/app/sagas/instance_ip_attach.rs index f8edf37dc4..c4e209dccd 100644 --- a/nexus/src/app/sagas/instance_ip_attach.rs +++ b/nexus/src/app/sagas/instance_ip_attach.rs @@ -13,6 +13,7 @@ use crate::app::{authn, authz}; use nexus_db_model::{IpAttachState, Ipv4NatEntry}; use nexus_types::external_api::views; use omicron_common::api::external::Error; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid, SledUuid}; use serde::Deserialize; use serde::Serialize; use steno::ActionError; @@ -89,6 +90,8 @@ async fn siia_begin_attach_ip( ¶ms.serialized_authn, ); + let instance_id = + InstanceUuid::from_untyped_uuid(params.authz_instance.id()); match ¶ms.create_params { // Allocate a new IP address from the target, possibly default, pool ExternalIpAttach::Ephemeral { pool } => { @@ -111,7 +114,7 @@ async fn siia_begin_attach_ip( .allocate_instance_ephemeral_ip( &opctx, Uuid::new_v4(), - params.authz_instance.id(), + instance_id, pool, false, ) @@ -124,12 +127,7 @@ async fn siia_begin_attach_ip( } // Set the parent of an existing floating IP to the new instance's ID. ExternalIpAttach::Floating { floating_ip } => datastore - .floating_ip_begin_attach( - &opctx, - &floating_ip, - params.authz_instance.id(), - false, - ) + .floating_ip_begin_attach(&opctx, &floating_ip, instance_id, false) .await .map_err(ActionError::action_failed) .map(|(external_ip, do_saga)| ModifyStateForExternalIp { @@ -163,7 +161,7 @@ async fn siia_begin_attach_ip_undo( async fn siia_get_instance_state( sagactx: NexusActionContext, -) -> Result, ActionError> { +) -> Result, ActionError> { let params = sagactx.saga_params::()?; instance_ip_get_instance_state( &sagactx, @@ -179,7 +177,7 @@ async fn siia_nat( sagactx: NexusActionContext, ) -> Result, ActionError> { let params = sagactx.saga_params::()?; - let sled_id = sagactx.lookup::>("instance_state")?; + let sled_id = sagactx.lookup::>("instance_state")?; let target_ip = sagactx.lookup::("target_ip")?; instance_ip_add_nat( &sagactx, @@ -248,7 +246,7 @@ async fn siia_update_opte( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let params = sagactx.saga_params::()?; - let sled_id = sagactx.lookup::>("instance_state")?; + let sled_id = sagactx.lookup::>("instance_state")?; let target_ip = sagactx.lookup::("target_ip")?; instance_ip_add_opte(&sagactx, ¶ms.authz_instance, sled_id, target_ip) .await @@ -259,7 +257,7 @@ async fn siia_update_opte_undo( ) -> Result<(), anyhow::Error> { let log = sagactx.user_data().log(); let params = sagactx.saga_params::()?; - let sled_id = sagactx.lookup::>("instance_state")?; + let sled_id = sagactx.lookup::>("instance_state")?; let target_ip = sagactx.lookup::("target_ip")?; if let Err(e) = instance_ip_remove_opte( &sagactx, @@ -420,9 +418,10 @@ pub(crate) mod test { let instance = create_instance(client, PROJECT_NAME, INSTANCE_NAME).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); crate::app::sagas::test_helpers::instance_simulate( cptestctx, - &instance.identity.id, + &instance_id, ) .await; @@ -434,11 +433,9 @@ pub(crate) mod test { nexus.run_saga(saga).await.expect("Attach saga should succeed"); } - let instance_id = instance.id(); - // Sled agent has a record of the new external IPs. let mut eips = sled_agent.external_ips.lock().await; - let my_eips = eips.entry(instance_id).or_default(); + let my_eips = eips.entry(instance_id.into_untyped_uuid()).or_default(); assert!(my_eips.iter().any(|v| matches!( v, omicron_sled_agent::params::InstanceExternalIpBody::Floating(_) @@ -517,7 +514,7 @@ pub(crate) mod test { crate::app::sagas::test_helpers::instance_simulate( cptestctx, - &instance.identity.id, + &InstanceUuid::from_untyped_uuid(instance.identity.id), ) .await; @@ -549,7 +546,7 @@ pub(crate) mod test { crate::app::sagas::test_helpers::instance_simulate( cptestctx, - &instance.identity.id, + &InstanceUuid::from_untyped_uuid(instance.identity.id), ) .await; @@ -584,7 +581,7 @@ pub(crate) mod test { crate::app::sagas::test_helpers::instance_simulate( cptestctx, - &instance.identity.id, + &InstanceUuid::from_untyped_uuid(instance.identity.id), ) .await; diff --git a/nexus/src/app/sagas/instance_ip_detach.rs b/nexus/src/app/sagas/instance_ip_detach.rs index 9625d77bf9..474cfb18a6 100644 --- a/nexus/src/app/sagas/instance_ip_detach.rs +++ b/nexus/src/app/sagas/instance_ip_detach.rs @@ -15,6 +15,7 @@ use nexus_db_model::IpAttachState; use nexus_db_queries::db::lookup::LookupPath; use nexus_types::external_api::views; use omicron_common::api::external::NameOrId; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid, SledUuid}; use ref_cast::RefCast; use serde::Deserialize; use serde::Serialize; @@ -71,13 +72,12 @@ async fn siid_begin_detach_ip( ¶ms.serialized_authn, ); + let instance_id = + InstanceUuid::from_untyped_uuid(params.authz_instance.id()); match ¶ms.delete_params { params::ExternalIpDetach::Ephemeral => { let eip = datastore - .instance_lookup_ephemeral_ip( - &opctx, - params.authz_instance.id(), - ) + .instance_lookup_ephemeral_ip(&opctx, instance_id) .await .map_err(ActionError::action_failed)?; @@ -86,7 +86,7 @@ async fn siid_begin_detach_ip( .begin_deallocate_ephemeral_ip( &opctx, eph_ip.id, - params.authz_instance.id(), + instance_id, ) .await .map_err(ActionError::action_failed) @@ -118,7 +118,7 @@ async fn siid_begin_detach_ip( .floating_ip_begin_detach( &opctx, &authz_fip, - params.authz_instance.id(), + instance_id, false, ) .await @@ -155,7 +155,7 @@ async fn siid_begin_detach_ip_undo( async fn siid_get_instance_state( sagactx: NexusActionContext, -) -> Result, ActionError> { +) -> Result, ActionError> { let params = sagactx.saga_params::()?; instance_ip_get_instance_state( &sagactx, @@ -168,7 +168,7 @@ async fn siid_get_instance_state( async fn siid_nat(sagactx: NexusActionContext) -> Result<(), ActionError> { let params = sagactx.saga_params::()?; - let sled_id = sagactx.lookup::>("instance_state")?; + let sled_id = sagactx.lookup::>("instance_state")?; let target_ip = sagactx.lookup::("target_ip")?; instance_ip_remove_nat( &sagactx, @@ -184,7 +184,7 @@ async fn siid_nat_undo( ) -> Result<(), anyhow::Error> { let log = sagactx.user_data().log(); let params = sagactx.saga_params::()?; - let sled_id = sagactx.lookup::>("instance_state")?; + let sled_id = sagactx.lookup::>("instance_state")?; let target_ip = sagactx.lookup::("target_ip")?; if let Err(e) = instance_ip_add_nat( &sagactx, @@ -205,7 +205,7 @@ async fn siid_update_opte( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let params = sagactx.saga_params::()?; - let sled_id = sagactx.lookup::>("instance_state")?; + let sled_id = sagactx.lookup::>("instance_state")?; let target_ip = sagactx.lookup::("target_ip")?; instance_ip_remove_opte( &sagactx, @@ -221,7 +221,7 @@ async fn siid_update_opte_undo( ) -> Result<(), anyhow::Error> { let log = sagactx.user_data().log(); let params = sagactx.saga_params::()?; - let sled_id = sagactx.lookup::>("instance_state")?; + let sled_id = sagactx.lookup::>("instance_state")?; let target_ip = sagactx.lookup::("target_ip")?; if let Err(e) = instance_ip_add_opte( &sagactx, @@ -390,10 +390,11 @@ pub(crate) mod test { let _ = ip_manip_test_setup(&client).await; let instance = create_instance(client, PROJECT_NAME, INSTANCE_NAME).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); crate::app::sagas::test_helpers::instance_simulate( cptestctx, - &instance.identity.id, + &instance_id, ) .await; @@ -407,11 +408,9 @@ pub(crate) mod test { nexus.run_saga(saga).await.expect("Detach saga should succeed"); } - let instance_id = instance.id(); - // Sled agent has removed its records of the external IPs. let mut eips = sled_agent.external_ips.lock().await; - let my_eips = eips.entry(instance_id).or_default(); + let my_eips = eips.entry(instance_id.into_untyped_uuid()).or_default(); assert!(my_eips.is_empty()); // DB only has record for SNAT. @@ -460,7 +459,10 @@ pub(crate) mod test { // Instance still has one Ephemeral IP, and one Floating IP. let db_eips = datastore - .instance_lookup_external_ips(&opctx, instance_id) + .instance_lookup_external_ips( + &opctx, + InstanceUuid::from_untyped_uuid(instance_id), + ) .await .unwrap(); assert_eq!(db_eips.len(), 3); @@ -492,7 +494,7 @@ pub(crate) mod test { crate::app::sagas::test_helpers::instance_simulate( cptestctx, - &instance.identity.id, + &InstanceUuid::from_untyped_uuid(instance.identity.id), ) .await; @@ -526,7 +528,7 @@ pub(crate) mod test { crate::app::sagas::test_helpers::instance_simulate( cptestctx, - &instance.identity.id, + &InstanceUuid::from_untyped_uuid(instance.identity.id), ) .await; @@ -563,7 +565,7 @@ pub(crate) mod test { crate::app::sagas::test_helpers::instance_simulate( cptestctx, - &instance.identity.id, + &InstanceUuid::from_untyped_uuid(instance.identity.id), ) .await; diff --git a/nexus/src/app/sagas/instance_migrate.rs b/nexus/src/app/sagas/instance_migrate.rs index cbcad41a4f..3546642bbb 100644 --- a/nexus/src/app/sagas/instance_migrate.rs +++ b/nexus/src/app/sagas/instance_migrate.rs @@ -13,6 +13,7 @@ use crate::app::sagas::{ use crate::external_api::params; use nexus_db_queries::db::{identity::Resource, lookup::LookupPath}; use nexus_db_queries::{authn, authz, db}; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid, PropolisUuid, SledUuid}; use serde::Deserialize; use serde::Serialize; use sled_agent_client::types::{ @@ -41,6 +42,10 @@ pub struct Params { declare_saga_actions! { instance_migrate; + GENERATE_PROPOLIS_ID -> "dst_propolis_id" { + + sim_generate_propolis_id + } + // In order to set up migration, the saga needs to construct the following: // // - A migration ID and destination Propolis ID (added to the DAG inline as @@ -125,12 +130,7 @@ impl NexusSaga for SagaInstanceMigrate { ACTION_GENERATE_ID.as_ref(), )); - builder.append(Node::action( - "dst_propolis_id", - "GeneratePropolisId", - ACTION_GENERATE_ID.as_ref(), - )); - + builder.append(generate_propolis_id_action()); builder.append(reserve_resources_action()); builder.append(allocate_propolis_ip_action()); builder.append(create_vmm_record_action()); @@ -143,13 +143,19 @@ impl NexusSaga for SagaInstanceMigrate { } } +async fn sim_generate_propolis_id( + _sagactx: NexusActionContext, +) -> Result { + Ok(PropolisUuid::new_v4()) +} + /// Reserves resources for the destination on the specified target sled. async fn sim_reserve_sled_resources( sagactx: NexusActionContext, -) -> Result { +) -> Result { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let propolis_id = sagactx.lookup::("dst_propolis_id")?; + let propolis_id = sagactx.lookup::("dst_propolis_id")?; // Add a constraint that requires the allocator to reserve on the // migration's destination sled instead of a random sled. @@ -166,16 +172,19 @@ async fn sim_reserve_sled_resources( ) .await?; - Ok(resource.sled_id) + Ok(SledUuid::from_untyped_uuid(resource.sled_id)) } async fn sim_release_sled_resources( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { let osagactx = sagactx.user_data(); - let propolis_id = sagactx.lookup::("dst_propolis_id")?; + let propolis_id = sagactx.lookup::("dst_propolis_id")?; - osagactx.nexus().delete_sled_reservation(propolis_id).await?; + osagactx + .nexus() + .delete_sled_reservation(propolis_id.into_untyped_uuid()) + .await?; Ok(()) } @@ -191,7 +200,7 @@ async fn sim_allocate_propolis_ip( allocate_vmm_ipv6( &opctx, sagactx.user_data().datastore(), - params.migrate_params.dst_sled_id, + SledUuid::from_untyped_uuid(params.migrate_params.dst_sled_id), ) .await } @@ -258,8 +267,8 @@ async fn sim_create_vmm_record( ); let instance_id = params.instance.id(); - let propolis_id = sagactx.lookup::("dst_propolis_id")?; - let sled_id = sagactx.lookup::("dst_sled_id")?; + let propolis_id = sagactx.lookup::("dst_propolis_id")?; + let sled_id = sagactx.lookup::("dst_sled_id")?; let propolis_ip = sagactx.lookup::("dst_propolis_ip")?; info!(osagactx.log(), "creating vmm record for migration destination"; @@ -270,7 +279,7 @@ async fn sim_create_vmm_record( super::instance_common::create_and_insert_vmm_record( osagactx.datastore(), &opctx, - instance_id, + InstanceUuid::from_untyped_uuid(instance_id), propolis_id, sled_id, propolis_ip, @@ -312,9 +321,10 @@ async fn sim_set_migration_ids( ); let db_instance = ¶ms.instance; - let src_sled_id = params.src_vmm.sled_id; + let instance_id = InstanceUuid::from_untyped_uuid(db_instance.id()); + let src_sled_id = SledUuid::from_untyped_uuid(params.src_vmm.sled_id); let migration_id = sagactx.lookup::("migrate_id")?; - let dst_propolis_id = sagactx.lookup::("dst_propolis_id")?; + let dst_propolis_id = sagactx.lookup::("dst_propolis_id")?; info!(osagactx.log(), "setting migration IDs on migration source sled"; "instance_id" => %db_instance.id(), @@ -327,7 +337,7 @@ async fn sim_set_migration_ids( .nexus() .instance_set_migration_ids( &opctx, - db_instance.id(), + instance_id, src_sled_id, db_instance.runtime(), InstanceMigrationSourceParams { dst_propolis_id, migration_id }, @@ -343,9 +353,10 @@ async fn sim_clear_migration_ids( ) -> Result<(), anyhow::Error> { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let src_sled_id = params.src_vmm.sled_id; + let src_sled_id = SledUuid::from_untyped_uuid(params.src_vmm.sled_id); let db_instance = sagactx.lookup::("set_migration_ids")?; + let instance_id = InstanceUuid::from_untyped_uuid(db_instance.id()); info!(osagactx.log(), "clearing migration IDs for saga unwind"; "instance_id" => %db_instance.id(), @@ -367,7 +378,7 @@ async fn sim_clear_migration_ids( if let Err(e) = osagactx .nexus() .instance_clear_migration_ids( - db_instance.id(), + instance_id, src_sled_id, db_instance.runtime(), ) @@ -375,7 +386,7 @@ async fn sim_clear_migration_ids( { warn!(osagactx.log(), "Error clearing migration IDs during rollback"; - "instance_id" => %db_instance.id(), + "instance_id" => %instance_id, "error" => ?e); } @@ -407,17 +418,19 @@ async fn sim_ensure_destination_propolis( .await .map_err(ActionError::action_failed)?; + let src_propolis_id = PropolisUuid::from_untyped_uuid(params.src_vmm.id); + let dst_propolis_id = PropolisUuid::from_untyped_uuid(vmm.id); osagactx .nexus() .instance_ensure_registered( &opctx, &authz_instance, &db_instance, - &vmm.id, + &dst_propolis_id, &vmm, InstanceRegisterReason::Migrate { - vmm_id: params.src_vmm.id, - target_vmm_id: vmm.id, + vmm_id: src_propolis_id, + target_vmm_id: dst_propolis_id, }, ) .await @@ -436,7 +449,7 @@ async fn sim_ensure_destination_propolis_undo( ¶ms.serialized_authn, ); - let dst_sled_id = sagactx.lookup::("dst_sled_id")?; + let dst_sled_id = sagactx.lookup::("dst_sled_id")?; let db_instance = sagactx.lookup::("set_migration_ids")?; let (.., authz_instance) = LookupPath::new(&opctx, &osagactx.datastore()) @@ -592,10 +605,10 @@ mod tests { async fn add_sleds( cptestctx: &ControlPlaneTestContext, num_sleds: usize, - ) -> Vec<(Uuid, Server)> { + ) -> Vec<(SledUuid, Server)> { let mut sas = Vec::with_capacity(num_sleds); for _ in 0..num_sleds { - let sa_id = Uuid::new_v4(); + let sa_id = SledUuid::new_v4(); let log = cptestctx.logctx.log.new(o!("sled_id" => sa_id.to_string())); let addr = @@ -647,10 +660,10 @@ mod tests { fn select_first_alternate_sled( db_vmm: &db::model::Vmm, - other_sleds: &[(Uuid, Server)], - ) -> Uuid { - let default_sled_uuid = - Uuid::parse_str(nexus_test_utils::SLED_AGENT_UUID).unwrap(); + other_sleds: &[(SledUuid, Server)], + ) -> SledUuid { + let default_sled_uuid: SledUuid = + nexus_test_utils::SLED_AGENT_UUID.parse().unwrap(); if other_sleds.is_empty() { panic!("need at least one other sled"); } @@ -659,7 +672,7 @@ mod tests { panic!("default test sled agent was in other_sleds"); } - if db_vmm.sled_id == default_sled_uuid { + if db_vmm.sled_id == default_sled_uuid.into_untyped_uuid() { other_sleds[0].0 } else { default_sled_uuid @@ -677,19 +690,21 @@ mod tests { let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // Poke the instance to get it into the Running state. - test_helpers::instance_simulate(cptestctx, &instance.identity.id).await; + test_helpers::instance_simulate(cptestctx, &instance_id).await; - let state = - test_helpers::instance_fetch(cptestctx, instance.identity.id).await; + let state = test_helpers::instance_fetch(cptestctx, instance_id).await; let vmm = state.vmm().as_ref().unwrap(); let dst_sled_id = select_first_alternate_sled(vmm, &other_sleds); let params = Params { serialized_authn: authn::saga::Serialized::for_opctx(&opctx), instance: state.instance().clone(), src_vmm: vmm.clone(), - migrate_params: params::InstanceMigrate { dst_sled_id }, + migrate_params: params::InstanceMigrate { + dst_sled_id: dst_sled_id.into_untyped_uuid(), + }, }; let dag = create_saga_dag::(params).unwrap(); @@ -700,8 +715,7 @@ mod tests { // steps in the simulated agents) should not change where the instance // is running. let new_state = - test_helpers::instance_fetch(cptestctx, state.instance().id()) - .await; + test_helpers::instance_fetch(cptestctx, instance_id).await; assert_eq!( new_state.instance().runtime().propolis_id, @@ -721,18 +735,17 @@ mod tests { let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // Poke the instance to get it into the Running state. - test_helpers::instance_simulate(cptestctx, &instance.identity.id).await; + test_helpers::instance_simulate(cptestctx, &instance_id).await; let make_params = || -> futures::future::BoxFuture<'_, Params> { Box::pin({ async { - let old_state = test_helpers::instance_fetch( - cptestctx, - instance.identity.id, - ) - .await; + let old_state = + test_helpers::instance_fetch(cptestctx, instance_id) + .await; let old_instance = old_state.instance(); let old_vmm = old_state @@ -754,7 +767,9 @@ mod tests { ), instance: old_instance.clone(), src_vmm: old_vmm.clone(), - migrate_params: params::InstanceMigrate { dst_sled_id }, + migrate_params: params::InstanceMigrate { + dst_sled_id: dst_sled_id.into_untyped_uuid(), + }, } } }) @@ -766,11 +781,9 @@ mod tests { // Unwinding at any step should clear the migration IDs from // the instance record and leave the instance's location // otherwise untouched. - let new_state = test_helpers::instance_fetch( - cptestctx, - instance.identity.id, - ) - .await; + let new_state = + test_helpers::instance_fetch(cptestctx, instance_id) + .await; let new_instance = new_state.instance(); let new_vmm = @@ -793,22 +806,13 @@ mod tests { // destroying the migration destination (if one was ensured) // doesn't advance the Propolis ID generation in a way that // prevents the source from issuing further state updates. - test_helpers::instance_stop( - cptestctx, - &instance.identity.id, - ) - .await; - test_helpers::instance_simulate( - cptestctx, - &instance.identity.id, - ) - .await; - - let new_state = test_helpers::instance_fetch( - cptestctx, - instance.identity.id, - ) - .await; + test_helpers::instance_stop(cptestctx, &instance_id).await; + test_helpers::instance_simulate(cptestctx, &instance_id) + .await; + + let new_state = + test_helpers::instance_fetch(cptestctx, instance_id) + .await; let new_instance = new_state.instance(); let new_vmm = new_state.vmm().as_ref(); @@ -825,16 +829,9 @@ mod tests { "migration saga unwind: restarting instance after \ failed saga" ); - test_helpers::instance_start( - cptestctx, - &instance.identity.id, - ) - .await; - test_helpers::instance_simulate( - cptestctx, - &instance.identity.id, - ) - .await; + test_helpers::instance_start(cptestctx, &instance_id).await; + test_helpers::instance_simulate(cptestctx, &instance_id) + .await; } }) }; diff --git a/nexus/src/app/sagas/instance_start.rs b/nexus/src/app/sagas/instance_start.rs index 0013a63d1a..9730025099 100644 --- a/nexus/src/app/sagas/instance_start.rs +++ b/nexus/src/app/sagas/instance_start.rs @@ -8,7 +8,7 @@ use std::net::Ipv6Addr; use super::{ instance_common::allocate_vmm_ipv6, NexusActionContext, NexusSaga, - SagaInitError, ACTION_GENERATE_ID, + SagaInitError, }; use crate::app::instance::InstanceRegisterReason; use crate::app::instance::InstanceStateChangeError; @@ -17,10 +17,10 @@ use chrono::Utc; use nexus_db_queries::db::{identity::Resource, lookup::LookupPath}; use nexus_db_queries::{authn, authz, db}; use omicron_common::api::external::Error; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid, PropolisUuid, SledUuid}; use serde::{Deserialize, Serialize}; use slog::info; -use steno::{ActionError, Node}; -use uuid::Uuid; +use steno::ActionError; /// Parameters to the instance start saga. #[derive(Debug, Deserialize, Serialize)] @@ -35,6 +35,10 @@ pub(crate) struct Params { declare_saga_actions! { instance_start; + GENERATE_PROPOLIS_ID -> "propolis_id" { + + sis_generate_propolis_id + } + ALLOC_SERVER -> "sled_id" { + sis_alloc_server - sis_alloc_server_undo @@ -101,12 +105,7 @@ impl NexusSaga for SagaInstanceStart { _params: &Self::Params, mut builder: steno::DagBuilder, ) -> Result { - builder.append(Node::action( - "propolis_id", - "GeneratePropolisId", - ACTION_GENERATE_ID.as_ref(), - )); - + builder.append(generate_propolis_id_action()); builder.append(alloc_server_action()); builder.append(alloc_propolis_ip_action()); builder.append(create_vmm_record_action()); @@ -120,14 +119,20 @@ impl NexusSaga for SagaInstanceStart { } } +async fn sis_generate_propolis_id( + _sagactx: NexusActionContext, +) -> Result { + Ok(PropolisUuid::new_v4()) +} + async fn sis_alloc_server( sagactx: NexusActionContext, -) -> Result { +) -> Result { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; let hardware_threads = params.db_instance.ncpus.0; let reservoir_ram = params.db_instance.memory; - let propolis_id = sagactx.lookup::("propolis_id")?; + let propolis_id = sagactx.lookup::("propolis_id")?; let resource = super::instance_common::reserve_vmm_resources( osagactx.nexus(), @@ -138,16 +143,19 @@ async fn sis_alloc_server( ) .await?; - Ok(resource.sled_id) + Ok(SledUuid::from_untyped_uuid(resource.sled_id)) } async fn sis_alloc_server_undo( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { let osagactx = sagactx.user_data(); - let propolis_id = sagactx.lookup::("propolis_id")?; + let propolis_id = sagactx.lookup::("propolis_id")?; - osagactx.nexus().delete_sled_reservation(propolis_id).await?; + osagactx + .nexus() + .delete_sled_reservation(propolis_id.into_untyped_uuid()) + .await?; Ok(()) } @@ -159,7 +167,7 @@ async fn sis_alloc_propolis_ip( &sagactx, ¶ms.serialized_authn, ); - let sled_uuid = sagactx.lookup::("sled_id")?; + let sled_uuid = sagactx.lookup::("sled_id")?; allocate_vmm_ipv6(&opctx, sagactx.user_data().datastore(), sled_uuid).await } @@ -172,9 +180,9 @@ async fn sis_create_vmm_record( &sagactx, ¶ms.serialized_authn, ); - let instance_id = params.db_instance.id(); - let propolis_id = sagactx.lookup::("propolis_id")?; - let sled_id = sagactx.lookup::("sled_id")?; + let instance_id = InstanceUuid::from_untyped_uuid(params.db_instance.id()); + let propolis_id = sagactx.lookup::("propolis_id")?; + let sled_id = sagactx.lookup::("sled_id")?; let propolis_ip = sagactx.lookup::("propolis_ip")?; super::instance_common::create_and_insert_vmm_record( @@ -214,8 +222,8 @@ async fn sis_move_to_starting( let params = sagactx.saga_params::()?; let osagactx = sagactx.user_data(); let datastore = osagactx.datastore(); - let instance_id = params.db_instance.id(); - let propolis_id = sagactx.lookup::("propolis_id")?; + let instance_id = InstanceUuid::from_untyped_uuid(params.db_instance.id()); + let propolis_id = sagactx.lookup::("propolis_id")?; info!(osagactx.log(), "moving instance to Starting state via saga"; "instance_id" => %instance_id, "propolis_id" => %propolis_id); @@ -228,7 +236,7 @@ async fn sis_move_to_starting( // For idempotency, refetch the instance to see if this step already applied // its desired update. let (.., db_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .fetch_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; @@ -237,7 +245,7 @@ async fn sis_move_to_starting( // If this saga's Propolis ID is already written to the record, then // this step must have completed already and is being retried, so // proceed. - Some(db_id) if db_id == propolis_id => { + Some(db_id) if db_id == propolis_id.into_untyped_uuid() => { info!(osagactx.log(), "start saga: Propolis ID already set"; "instance_id" => %instance_id); @@ -261,7 +269,7 @@ async fn sis_move_to_starting( None => { let new_runtime = db::model::InstanceRuntimeState { nexus_state: db::model::InstanceState::Vmm, - propolis_id: Some(propolis_id), + propolis_id: Some(propolis_id.into_untyped_uuid()), time_updated: Utc::now(), gen: db_instance.runtime().gen.next().into(), ..db_instance.runtime_state @@ -293,7 +301,7 @@ async fn sis_move_to_starting_undo( let osagactx = sagactx.user_data(); let db_instance = sagactx.lookup::("started_record")?; - let instance_id = db_instance.id(); + let instance_id = InstanceUuid::from_untyped_uuid(db_instance.id()); info!(osagactx.log(), "start saga failed; returning instance to Stopped"; "instance_id" => %instance_id); @@ -322,7 +330,7 @@ async fn sis_account_virtual_resources( ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let instance_id = params.db_instance.id(); + let instance_id = InstanceUuid::from_untyped_uuid(params.db_instance.id()); let opctx = crate::context::op_context_for_saga_action( &sagactx, @@ -348,7 +356,7 @@ async fn sis_account_virtual_resources_undo( ) -> Result<(), anyhow::Error> { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let instance_id = params.db_instance.id(); + let instance_id = InstanceUuid::from_untyped_uuid(params.db_instance.id()); let opctx = crate::context::op_context_for_saga_action( &sagactx, @@ -384,7 +392,7 @@ async fn sis_dpd_ensure( let osagactx = sagactx.user_data(); let db_instance = sagactx.lookup::("started_record")?; - let instance_id = db_instance.id(); + let instance_id = InstanceUuid::from_untyped_uuid(db_instance.id()); info!(osagactx.log(), "start saga: ensuring instance dpd configuration"; "instance_id" => %instance_id); @@ -397,9 +405,9 @@ async fn sis_dpd_ensure( // Querying sleds requires fleet access; use the instance allocator context // for this. - let sled_uuid = sagactx.lookup::("sled_id")?; + let sled_uuid = sagactx.lookup::("sled_id")?; let (.., sled) = LookupPath::new(&osagactx.nexus().opctx_alloc, &datastore) - .sled_id(sled_uuid) + .sled_id(sled_uuid.into_untyped_uuid()) .fetch() .await .map_err(ActionError::action_failed)?; @@ -472,9 +480,9 @@ async fn sis_ensure_registered( let db_instance = sagactx.lookup::("started_record")?; let instance_id = db_instance.id(); - let sled_id = sagactx.lookup::("sled_id")?; + let sled_id = sagactx.lookup::("sled_id")?; let vmm_record = sagactx.lookup::("vmm_record")?; - let propolis_id = sagactx.lookup::("propolis_id")?; + let propolis_id = sagactx.lookup::("propolis_id")?; info!(osagactx.log(), "start saga: ensuring instance is registered on sled"; "instance_id" => %instance_id, @@ -508,8 +516,8 @@ async fn sis_ensure_registered_undo( let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; let datastore = osagactx.datastore(); - let instance_id = params.db_instance.id(); - let sled_id = sagactx.lookup::("sled_id")?; + let instance_id = InstanceUuid::from_untyped_uuid(params.db_instance.id()); + let sled_id = sagactx.lookup::("sled_id")?; let opctx = crate::context::op_context_for_saga_action( &sagactx, ¶ms.serialized_authn, @@ -522,7 +530,7 @@ async fn sis_ensure_registered_undo( // Fetch the latest record so that this callee can drive the instance into // a Failed state if the unregister call fails. let (.., authz_instance, db_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .fetch() .await .map_err(ActionError::action_failed)?; @@ -624,14 +632,14 @@ async fn sis_ensure_running( let db_instance = sagactx.lookup::("started_record")?; let db_vmm = sagactx.lookup::("vmm_record")?; - let instance_id = params.db_instance.id(); - let sled_id = sagactx.lookup::("sled_id")?; + let instance_id = InstanceUuid::from_untyped_uuid(params.db_instance.id()); + let sled_id = sagactx.lookup::("sled_id")?; info!(osagactx.log(), "start saga: ensuring instance is running"; "instance_id" => %instance_id, "sled_id" => %sled_id); let (.., authz_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; @@ -737,11 +745,11 @@ mod test { let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; - let db_instance = - test_helpers::instance_fetch(cptestctx, instance.identity.id) - .await - .instance() - .clone(); + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + let db_instance = test_helpers::instance_fetch(cptestctx, instance_id) + .await + .instance() + .clone(); let params = Params { serialized_authn: authn::saga::Serialized::for_opctx(&opctx), @@ -752,15 +760,14 @@ mod test { let saga = nexus.create_runnable_saga(dag).await.unwrap(); nexus.run_saga(saga).await.expect("Start saga should succeed"); - test_helpers::instance_simulate(cptestctx, &instance.identity.id).await; - let vmm_state = - test_helpers::instance_fetch(cptestctx, instance.identity.id) - .await - .vmm() - .as_ref() - .expect("running instance should have a vmm") - .runtime - .state; + test_helpers::instance_simulate(cptestctx, &instance_id).await; + let vmm_state = test_helpers::instance_fetch(cptestctx, instance_id) + .await + .vmm() + .as_ref() + .expect("running instance should have a vmm") + .runtime + .state; assert_eq!(vmm_state, nexus_db_model::VmmState::Running); } @@ -775,6 +782,7 @@ mod test { let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); test_helpers::action_failure_can_unwind::< SagaInstanceStart, @@ -787,7 +795,7 @@ mod test { async { let db_instance = test_helpers::instance_fetch( cptestctx, - instance.identity.id, + instance_id, ) .await.instance().clone(); @@ -804,7 +812,7 @@ mod test { async { let new_db_instance = test_helpers::instance_fetch( cptestctx, - instance.identity.id, + instance_id, ) .await.instance().clone(); @@ -837,11 +845,11 @@ mod test { let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; - let db_instance = - test_helpers::instance_fetch(cptestctx, instance.identity.id) - .await - .instance() - .clone(); + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + let db_instance = test_helpers::instance_fetch(cptestctx, instance_id) + .await + .instance() + .clone(); let params = Params { serialized_authn: authn::saga::Serialized::for_opctx(&opctx), @@ -850,15 +858,14 @@ mod test { let dag = create_saga_dag::(params).unwrap(); test_helpers::actions_succeed_idempotently(nexus, dag).await; - test_helpers::instance_simulate(cptestctx, &instance.identity.id).await; - let vmm_state = - test_helpers::instance_fetch(cptestctx, instance.identity.id) - .await - .vmm() - .as_ref() - .expect("running instance should have a vmm") - .runtime - .state; + test_helpers::instance_simulate(cptestctx, &instance_id).await; + let vmm_state = test_helpers::instance_fetch(cptestctx, instance_id) + .await + .vmm() + .as_ref() + .expect("running instance should have a vmm") + .runtime + .state; assert_eq!(vmm_state, nexus_db_model::VmmState::Running); } @@ -878,11 +885,11 @@ mod test { let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; - let db_instance = - test_helpers::instance_fetch(cptestctx, instance.identity.id) - .await - .instance() - .clone(); + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + let db_instance = test_helpers::instance_fetch(cptestctx, instance_id) + .await + .instance() + .clone(); let params = Params { serialized_authn: authn::saga::Serialized::for_opctx(&opctx), @@ -922,7 +929,7 @@ mod test { assert_eq!(saga_error.error_node_name, last_node_name); let db_instance = - test_helpers::instance_fetch(cptestctx, instance.identity.id).await; + test_helpers::instance_fetch(cptestctx, instance_id).await; assert_eq!( db_instance.instance().runtime_state.nexus_state, diff --git a/nexus/src/app/sagas/test_helpers.rs b/nexus/src/app/sagas/test_helpers.rs index bacd0f1c9d..684b84dd07 100644 --- a/nexus/src/app/sagas/test_helpers.rs +++ b/nexus/src/app/sagas/test_helpers.rs @@ -22,11 +22,11 @@ use nexus_db_queries::{ }; use nexus_types::identity::Resource; use omicron_common::api::external::NameOrId; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use sled_agent_client::TestInterfaces as _; use slog::{info, warn, Logger}; use std::{num::NonZeroU32, sync::Arc}; use steno::SagaDag; -use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -40,14 +40,14 @@ pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { pub(crate) async fn instance_start( cptestctx: &ControlPlaneTestContext, - id: &Uuid, + id: &InstanceUuid, ) { let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { project: None, - instance: NameOrId::from(*id), + instance: NameOrId::from(id.into_untyped_uuid()), }; let instance_lookup = @@ -60,14 +60,14 @@ pub(crate) async fn instance_start( pub(crate) async fn instance_stop( cptestctx: &ControlPlaneTestContext, - id: &Uuid, + id: &InstanceUuid, ) { let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { project: None, - instance: NameOrId::from(*id), + instance: NameOrId::from(id.into_untyped_uuid()), }; let instance_lookup = @@ -122,7 +122,7 @@ pub(crate) async fn instance_delete_by_name( pub(crate) async fn instance_simulate( cptestctx: &ControlPlaneTestContext, - instance_id: &Uuid, + instance_id: &InstanceUuid, ) { info!(&cptestctx.logctx.log, "Poking simulated instance"; "instance_id" => %instance_id); @@ -133,7 +133,7 @@ pub(crate) async fn instance_simulate( .unwrap() .expect("instance must be on a sled to simulate a state change"); - sa.instance_finish_transition(*instance_id).await; + sa.instance_finish_transition(instance_id.into_untyped_uuid()).await; } pub(crate) async fn instance_simulate_by_name( @@ -157,7 +157,7 @@ pub(crate) async fn instance_simulate_by_name( nexus.instance_lookup(&opctx, instance_selector).unwrap(); let (.., instance) = instance_lookup.fetch().await.unwrap(); let sa = nexus - .instance_sled_by_id(&instance.id()) + .instance_sled_by_id(&InstanceUuid::from_untyped_uuid(instance.id())) .await .unwrap() .expect("instance must be on a sled to simulate a state change"); @@ -166,12 +166,12 @@ pub(crate) async fn instance_simulate_by_name( pub async fn instance_fetch( cptestctx: &ControlPlaneTestContext, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> InstanceAndActiveVmm { let datastore = cptestctx.server.server_context().nexus.datastore().clone(); let opctx = test_opctx(&cptestctx); let (.., authz_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Read) .await .expect("test instance should be present in datastore"); diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index c7fc651823..fd5341ae80 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -20,6 +20,7 @@ use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; +use omicron_uuid_kinds::{GenericUuid, SledUuid}; use sled_agent_client::Client as SledAgentClient; use std::net::SocketAddrV6; use std::sync::Arc; @@ -123,7 +124,7 @@ impl super::Nexus { pub async fn sled_client( &self, - id: &Uuid, + id: &SledUuid, ) -> Result, Error> { // TODO: We should consider injecting connection pooling here, // but for now, connections to sled agents are constructed @@ -135,7 +136,7 @@ impl super::Nexus { let client = nexus_networking::sled_client( &self.db_datastore, &self.opctx_alloc, - *id, + id.into_untyped_uuid(), &self.log, ) .await?; diff --git a/nexus/src/app/test_interfaces.rs b/nexus/src/app/test_interfaces.rs index 9e7bd1582f..adfafa523d 100644 --- a/nexus/src/app/test_interfaces.rs +++ b/nexus/src/app/test_interfaces.rs @@ -6,6 +6,8 @@ use async_trait::async_trait; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::lookup::LookupPath; use omicron_common::api::external::Error; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::{InstanceUuid, SledUuid}; use sled_agent_client::Client as SledAgentClient; use std::sync::Arc; use uuid::Uuid; @@ -28,12 +30,12 @@ pub trait TestInterfaces { /// but after all it's a test suite special to begin with. async fn instance_sled_by_id( &self, - id: &Uuid, + id: &InstanceUuid, ) -> Result>, Error>; async fn instance_sled_by_id_with_opctx( &self, - id: &Uuid, + id: &InstanceUuid, opctx: &OpContext, ) -> Result>, Error>; @@ -47,14 +49,14 @@ pub trait TestInterfaces { /// Returns the supplied instance's current active sled ID. async fn instance_sled_id( &self, - instance_id: &Uuid, - ) -> Result, Error>; + instance_id: &InstanceUuid, + ) -> Result, Error>; async fn instance_sled_id_with_opctx( &self, - instance_id: &Uuid, + instance_id: &InstanceUuid, opctx: &OpContext, - ) -> Result, Error>; + ) -> Result, Error>; async fn set_disk_as_faulted(&self, disk_id: &Uuid) -> Result; @@ -69,7 +71,7 @@ impl TestInterfaces for super::Nexus { async fn instance_sled_by_id( &self, - id: &Uuid, + id: &InstanceUuid, ) -> Result>, Error> { let opctx = OpContext::for_tests( self.log.new(o!()), @@ -82,7 +84,7 @@ impl TestInterfaces for super::Nexus { async fn instance_sled_by_id_with_opctx( &self, - id: &Uuid, + id: &InstanceUuid, opctx: &OpContext, ) -> Result>, Error> { let sled_id = self.instance_sled_id_with_opctx(id, opctx).await?; @@ -107,11 +109,16 @@ impl TestInterfaces for super::Nexus { .fetch() .await?; - self.instance_sled_by_id(&db_disk.runtime().attach_instance_id.unwrap()) - .await + let instance_id = InstanceUuid::from_untyped_uuid( + db_disk.runtime().attach_instance_id.unwrap(), + ); + self.instance_sled_by_id(&instance_id).await } - async fn instance_sled_id(&self, id: &Uuid) -> Result, Error> { + async fn instance_sled_id( + &self, + id: &InstanceUuid, + ) -> Result, Error> { let opctx = OpContext::for_tests( self.log.new(o!()), Arc::clone(&self.db_datastore) @@ -123,11 +130,11 @@ impl TestInterfaces for super::Nexus { async fn instance_sled_id_with_opctx( &self, - id: &Uuid, + id: &InstanceUuid, opctx: &OpContext, - ) -> Result, Error> { + ) -> Result, Error> { let (.., authz_instance) = LookupPath::new(&opctx, &self.db_datastore) - .instance_id(*id) + .instance_id(id.into_untyped_uuid()) .lookup_for(nexus_db_queries::authz::Action::Read) .await?; diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index ceafe7f103..582f7cb608 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -51,6 +51,8 @@ use omicron_common::api::internal::nexus::RepairStartInfo; use omicron_common::api::internal::nexus::SledInstanceState; use omicron_common::update::ArtifactId; use omicron_uuid_kinds::DownstairsKind; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::TypedUuid; use omicron_uuid_kinds::UpstairsKind; @@ -274,7 +276,11 @@ async fn cpapi_instances_put( let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let handler = async { nexus - .notify_instance_updated(&opctx, &path.instance_id, &new_state) + .notify_instance_updated( + &opctx, + &InstanceUuid::from_untyped_uuid(path.instance_id), + &new_state, + ) .await?; Ok(HttpResponseUpdatedNoContent()) }; diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index de00290616..97fd66f949 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -880,13 +880,14 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { self.nexus_internal_addr.expect("Must launch Nexus first"); // Set up a single sled agent. - let sa_id: Uuid = if switch_location == SwitchLocation::Switch0 { + let sa_id: SledUuid = if switch_location == SwitchLocation::Switch0 { SLED_AGENT_UUID } else { SLED_AGENT2_UUID } .parse() .unwrap(); + let tempdir = camino_tempfile::tempdir().unwrap(); let sled_agent = start_sled_agent( self.logctx.log.new(o!( @@ -1382,12 +1383,12 @@ async fn setup_with_config_impl( pub async fn start_sled_agent( log: Logger, nexus_address: SocketAddr, - id: Uuid, + id: SledUuid, update_directory: &Utf8Path, sim_mode: sim::SimMode, ) -> Result { let config = sim::Config::for_testing( - id, + id.into_untyped_uuid(), sim_mode, Some(nexus_address), Some(update_directory), diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index a707f8240d..4c707ba0fa 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -43,7 +43,7 @@ use omicron_common::api::external::NameOrId; use omicron_nexus::app::{MAX_DISK_SIZE_BYTES, MIN_DISK_SIZE_BYTES}; use omicron_nexus::Nexus; use omicron_nexus::TestInterfaces as _; -use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use oximeter::types::Datum; use oximeter::types::Measurement; use sled_agent_client::TestInterfaces as _; @@ -180,13 +180,13 @@ async fn set_instance_state( .unwrap() } -async fn instance_simulate(nexus: &Arc, id: &Uuid) { +async fn instance_simulate(nexus: &Arc, id: &InstanceUuid) { let sa = nexus .instance_sled_by_id(id) .await .unwrap() .expect("instance must be on a sled to simulate a state change"); - sa.instance_finish_transition(*id).await; + sa.instance_finish_transition(id.into_untyped_uuid()).await; } #[nexus_test] @@ -238,7 +238,11 @@ async fn test_disk_create_attach_detach_delete( // is an artificial limitation without hotplug support. let instance_next = set_instance_state(&client, INSTANCE_NAME, "stop").await; - instance_simulate(nexus, &instance_next.identity.id).await; + instance_simulate( + nexus, + &InstanceUuid::from_untyped_uuid(instance_next.identity.id), + ) + .await; // Verify that there are no disks attached to the instance, and specifically // that our disk is not attached to this instance. @@ -383,10 +387,9 @@ async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { // to allow disks to be attached. There should be no disks attached // initially. let instance = create_instance(&client, PROJECT_NAME, INSTANCE_NAME).await; - let instance_id = &instance.identity.id; - let instance_next = - set_instance_state(&client, INSTANCE_NAME, "stop").await; - instance_simulate(nexus, &instance_next.identity.id).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + set_instance_state(&client, INSTANCE_NAME, "stop").await; + instance_simulate(nexus, &instance_id).await; let url_instance_disks = get_instance_disks_url(instance.identity.name.as_str()); let listed_disks = disks_list(&client, &url_instance_disks).await; @@ -422,7 +425,10 @@ async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { assert_eq!(attached_disk.identity.name, disk.identity.name); assert_eq!(attached_disk.identity.id, disk.identity.id); - assert_eq!(attached_disk.state, DiskState::Attached(*instance_id)); + assert_eq!( + attached_disk.state, + DiskState::Attached(instance_id.into_untyped_uuid()) + ); assert_eq!( get_disk_slot(cptestctx, attached_disk.identity.id).await, @@ -485,13 +491,14 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { // Create an instance to attach the disk. let instance = create_instance(&client, PROJECT_NAME, INSTANCE_NAME).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + // TODO(https://github.com/oxidecomputer/omicron/issues/811): // // Instances must be stopped before disks can be attached - this // is an artificial limitation without hotplug support. - let instance_next = - set_instance_state(&client, INSTANCE_NAME, "stop").await; - instance_simulate(nexus, &instance_next.identity.id).await; + set_instance_state(&client, INSTANCE_NAME, "stop").await; + instance_simulate(nexus, &instance_id).await; // Verify that there are no disks attached to the instance, and specifically // that our disk is not attached to this instance. @@ -526,8 +533,9 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { // Create a second instance and try to attach the disk to that. This should // fail and the disk should remain attached to the first instance. let instance2 = create_instance(&client, PROJECT_NAME, "instance2").await; - let instance_next = set_instance_state(&client, "instance2", "stop").await; - instance_simulate(nexus, &instance_next.identity.id).await; + let instance2_id = InstanceUuid::from_untyped_uuid(instance2.identity.id); + set_instance_state(&client, "instance2", "stop").await; + instance_simulate(nexus, &instance2_id).await; let url_instance2_attach_disk = get_disk_attach_url(&instance2.identity.id.into()); @@ -556,7 +564,10 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { ); let attached_disk = disk_get(&client, &disk_url).await; - assert_eq!(attached_disk.state, DiskState::Attached(*instance_id)); + assert_eq!( + attached_disk.state, + DiskState::Attached(instance_id.into_untyped_uuid()) + ); // Begin detaching the disk. let disk = @@ -583,10 +594,12 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { disk.identity.name.clone(), ) .await; - let instance2_id = &instance2.identity.id; assert_eq!(attached_disk.identity.name, disk.identity.name); assert_eq!(attached_disk.identity.id, disk.identity.id); - assert_eq!(attached_disk.state, DiskState::Attached(*instance2_id)); + assert_eq!( + attached_disk.state, + DiskState::Attached(instance2_id.into_untyped_uuid()) + ); // At this point, it's not legal to attempt to attach it to a different // instance (the first one). @@ -618,7 +631,10 @@ async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { disk.identity.name.clone(), ) .await; - assert_eq!(disk.state, DiskState::Attached(*instance2_id)); + assert_eq!( + disk.state, + DiskState::Attached(instance2_id.into_untyped_uuid()) + ); // It's not allowed to delete a disk that's attached. let error = NexusRequest::expect_failure( diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index 396edddc41..2789318855 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -49,6 +49,8 @@ use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; use omicron_common::api::external::Name; use omicron_common::api::external::NameOrId; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use uuid::Uuid; type ControlPlaneTestContext = @@ -653,6 +655,7 @@ async fn test_floating_ip_create_attachment( &FIP_NAMES[..1], ) .await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // Reacquire FIP: parent ID must have updated to match instance. let fetched_fip = @@ -673,8 +676,8 @@ async fn test_floating_ip_create_attachment( ); // Stop and delete the instance. - instance_simulate(nexus, &instance.identity.id).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; + instance_simulate(nexus, &instance_id).await; let _: Instance = NexusRequest::new( RequestBuilder::new( @@ -692,7 +695,7 @@ async fn test_floating_ip_create_attachment( .parsed_body() .unwrap(); - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; NexusRequest::object_delete( &client, @@ -762,10 +765,11 @@ async fn test_external_ip_live_attach_detach( &[], ) .await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); if *start { - instance_simulate(nexus, &instance.identity.id).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; + instance_simulate(nexus, &instance_id).await; } // Verify that each instance has no external IPs. @@ -1035,9 +1039,10 @@ async fn test_external_ip_attach_fail_if_in_use_by_other( &[FIP_NAMES[i]], ) .await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); - instance_simulate(nexus, &instance.identity.id).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; + instance_simulate(nexus, &instance_id).await; instances.push(instance); fips.push(fip); diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 948f8a18f3..4f7a1d1b77 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -66,6 +66,9 @@ use omicron_nexus::Nexus; use omicron_nexus::TestInterfaces as _; use omicron_sled_agent::sim::SledAgent; use omicron_test_utils::dev::poll::wait_for_condition; +use omicron_uuid_kinds::PropolisUuid; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use sled_agent_client::TestInterfaces as _; use std::convert::TryFrom; use std::net::Ipv4Addr; @@ -363,7 +366,8 @@ async fn test_instances_create_reboot_halt( ); // Now, simulate completion of instance boot and check the state reported. - instance_simulate(nexus, &instance.identity.id).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + instance_simulate(nexus, &instance_id).await; let instance_next = instance_get(&client, &instance_url).await; identity_eq(&instance.identity, &instance_next.identity); assert_eq!(instance_next.runtime.run_state, InstanceState::Running); @@ -392,7 +396,7 @@ async fn test_instances_create_reboot_halt( ); let instance = instance_next; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; let instance_next = instance_get(&client, &instance_url).await; assert_eq!(instance_next.runtime.run_state, InstanceState::Running); assert!( @@ -411,7 +415,7 @@ async fn test_instances_create_reboot_halt( ); let instance = instance_next; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; let instance_next = instance_get(&client, &instance_url).await; assert_eq!(instance_next.runtime.run_state, InstanceState::Stopped); assert!( @@ -472,7 +476,7 @@ async fn test_instances_create_reboot_halt( ); let instance = instance_next; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; let instance_next = instance_get(&client, &instance_url).await; assert_eq!(instance_next.runtime.run_state, InstanceState::Running); assert!( @@ -506,7 +510,7 @@ async fn test_instances_create_reboot_halt( .unwrap(); // assert_eq!(error.message, "cannot reboot instance in state \"stopping\""); let instance = instance_next; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; let instance_next = instance_get(&client, &instance_url).await; assert_eq!(instance_next.runtime.run_state, InstanceState::Stopped); assert!( @@ -593,7 +597,7 @@ async fn test_instance_start_creates_networking_state( let nsleds = 3; let mut additional_sleds = Vec::with_capacity(nsleds); for _ in 0..nsleds { - let sa_id = Uuid::new_v4(); + let sa_id = SledUuid::new_v4(); let log = cptestctx.logctx.log.new(o!( "sled_id" => sa_id.to_string() )); let addr = cptestctx.server.get_http_server_internal_address().await; @@ -614,12 +618,12 @@ async fn test_instance_start_creates_networking_state( create_project_and_pool(&client).await; let instance_url = get_instance_url(instance_name); let instance = create_instance(client, PROJECT_NAME, instance_name).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // Drive the instance to Running, then stop it. - instance_simulate(nexus, &instance.identity.id).await; - let instance = - instance_post(&client, instance_name, InstanceOp::Stop).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; + instance_post(&client, instance_name, InstanceOp::Stop).await; + instance_simulate(nexus, &instance_id).await; let instance = instance_get(&client, &instance_url).await; assert_eq!(instance.runtime.run_state, InstanceState::Stopped); @@ -634,9 +638,8 @@ async fn test_instance_start_creates_networking_state( } // Start the instance and make sure that it gets to Running. - let instance = - instance_post(&client, instance_name, InstanceOp::Start).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_post(&client, instance_name, InstanceOp::Start).await; + instance_simulate(nexus, &instance_id).await; let instance = instance_get(&client, &instance_url).await; assert_eq!(instance.runtime.run_state, InstanceState::Running); @@ -709,10 +712,10 @@ async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { let instance_name = "bird-ecology"; // Create a second sled to migrate to/from. - let default_sled_id: Uuid = + let default_sled_id: SledUuid = nexus_test_utils::SLED_AGENT_UUID.parse().unwrap(); let update_dir = Utf8Path::new("/should/be/unused"); - let other_sled_id = Uuid::new_v4(); + let other_sled_id = SledUuid::new_v4(); let _other_sa = nexus_test_utils::start_sled_agent( cptestctx.logctx.log.new(o!("sled_id" => other_sled_id.to_string())), cptestctx.server.get_http_server_internal_address().await, @@ -738,7 +741,7 @@ async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { true, ) .await; - let instance_id = instance.identity.id; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // Poke the instance into an active state. instance_simulate(nexus, &instance_id).await; @@ -761,7 +764,9 @@ async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { format!("/v1/instances/{}/migrate", &instance_id.to_string()); let instance = NexusRequest::new( RequestBuilder::new(client, Method::POST, &migrate_url) - .body(Some(¶ms::InstanceMigrate { dst_sled_id })) + .body(Some(¶ms::InstanceMigrate { + dst_sled_id: dst_sled_id.into_untyped_uuid(), + })) .expect_status(Some(StatusCode::OK)), ) .authn_as(AuthnMode::PrivilegedUser) @@ -844,7 +849,7 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { let nsleds = 3; let mut other_sleds = Vec::with_capacity(nsleds); for _ in 0..nsleds { - let sa_id = Uuid::new_v4(); + let sa_id = SledUuid::new_v4(); let log = cptestctx.logctx.log.new(o!("sled_id" => sa_id.to_string())); let update_dir = Utf8Path::new("/should/be/unused"); let sa = nexus_test_utils::start_sled_agent( @@ -875,7 +880,7 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { true, ) .await; - let instance_id = instance.identity.id; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // The default configuration gives one NIC. let nics_url = format!("/v1/network-interfaces?instance={}", instance_id); @@ -893,7 +898,7 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { // Ensure that all of the V2P information is correct. let (.., authz_instance) = LookupPath::new(&opctx, &datastore) - .instance_id(instance_id) + .instance_id(instance_id.into_untyped_uuid()) .lookup_for(nexus_db_queries::authz::Action::Read) .await .unwrap(); @@ -913,11 +918,12 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni).await; } - let dst_sled_id = if original_sled_id == cptestctx.sled_agent.sled_agent.id - { + let testctx_sled_id = + SledUuid::from_untyped_uuid(cptestctx.sled_agent.sled_agent.id); + let dst_sled_id = if original_sled_id == testctx_sled_id { other_sleds[0].0 } else { - cptestctx.sled_agent.sled_agent.id + testctx_sled_id }; // Kick off migration and simulate its completion on the target. @@ -925,7 +931,9 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { format!("/v1/instances/{}/migrate", &instance_id.to_string()); let _ = NexusRequest::new( RequestBuilder::new(client, Method::POST, &migrate_url) - .body(Some(¶ms::InstanceMigrate { dst_sled_id })) + .body(Some(¶ms::InstanceMigrate { + dst_sled_id: dst_sled_id.into_untyped_uuid(), + })) .expect_status(Some(StatusCode::OK)), ) .authn_as(AuthnMode::PrivilegedUser) @@ -954,7 +962,7 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { // updated here because Nexus presumes that the instance's new sled // agent will have updated any mappings there. Remove this bifurcation // when Nexus programs all mappings explicitly. - if sled_agent.id != dst_sled_id { + if sled_agent.id != dst_sled_id.into_untyped_uuid() { assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni) .await; } @@ -976,7 +984,8 @@ async fn test_instance_failed_after_sled_agent_error( create_project_and_pool(&client).await; let instance_url = get_instance_url(instance_name); let instance = create_instance(client, PROJECT_NAME, instance_name).await; - instance_simulate(nexus, &instance.identity.id).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + instance_simulate(nexus, &instance_id).await; let instance_next = instance_get(&client, &instance_url).await; assert_eq!(instance_next.runtime.run_state, InstanceState::Running); @@ -1013,7 +1022,8 @@ async fn test_instance_failed_after_sled_agent_error( sled_agent.set_instance_ensure_state_error(None).await; let instance = create_instance(client, PROJECT_NAME, instance_name).await; - instance_simulate(nexus, &instance.identity.id).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + instance_simulate(nexus, &instance_id).await; let instance_next = instance_get(&client, &instance_url).await; assert_eq!(instance_next.runtime.run_state, InstanceState::Running); @@ -1135,7 +1145,8 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { // deprovisioned. let instance = instance_post(&client, instance_name, InstanceOp::Stop).await; - instance_simulate(nexus, &instance.identity.id).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + instance_simulate(nexus, &instance_id).await; let instance = instance_get(&client, &get_instance_url(&instance_name)).await; assert_eq!(instance.runtime.run_state, InstanceState::Stopped); @@ -1187,10 +1198,10 @@ async fn test_instance_metrics_with_migration( .await; // Create a second sled to migrate to/from. - let default_sled_id: Uuid = + let default_sled_id: SledUuid = nexus_test_utils::SLED_AGENT_UUID.parse().unwrap(); let update_dir = Utf8Path::new("/should/be/unused"); - let other_sled_id = Uuid::new_v4(); + let other_sled_id = SledUuid::new_v4(); let _other_sa = nexus_test_utils::start_sled_agent( cptestctx.logctx.log.new(o!("sled_id" => other_sled_id.to_string())), cptestctx.server.get_http_server_internal_address().await, @@ -1217,7 +1228,7 @@ async fn test_instance_metrics_with_migration( true, ) .await; - let instance_id = instance.identity.id; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // Poke the instance into an active state. instance_simulate(nexus, &instance_id).await; @@ -1266,7 +1277,9 @@ async fn test_instance_metrics_with_migration( format!("/v1/instances/{}/migrate", &instance_id.to_string()); let _ = NexusRequest::new( RequestBuilder::new(client, Method::POST, &migrate_url) - .body(Some(¶ms::InstanceMigrate { dst_sled_id })) + .body(Some(¶ms::InstanceMigrate { + dst_sled_id: dst_sled_id.into_untyped_uuid(), + })) .expect_status(Some(StatusCode::OK)), ) .authn_as(AuthnMode::PrivilegedUser) @@ -1293,9 +1306,8 @@ async fn test_instance_metrics_with_migration( // instance (whose demise wasn't simulated here), but this is intentionally // not reflected in the virtual provisioning counters (which reflect the // logical states of instances ignoring migration). - let instance = - instance_post(&client, instance_name, InstanceOp::Stop).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_post(&client, instance_name, InstanceOp::Stop).await; + instance_simulate(nexus, &instance_id).await; let instance = instance_get(&client, &get_instance_url(&instance_name)).await; assert_eq!(instance.runtime.run_state, InstanceState::Stopped); @@ -1342,9 +1354,10 @@ async fn test_instances_create_stopped_start( let instance_url = get_instance_url(instance_name); let instance = instance_post(&client, instance_name, InstanceOp::Start).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // Now, simulate completion of instance boot and check the state reported. - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; let instance_next = instance_get(&client, &instance_url).await; identity_eq(&instance.identity, &instance_next.identity); assert_eq!(instance_next.runtime.run_state, InstanceState::Running); @@ -1368,9 +1381,10 @@ async fn test_instances_delete_fails_when_running_succeeds_when_stopped( // Create an instance. let instance_url = get_instance_url(instance_name); let instance = create_instance(client, PROJECT_NAME, instance_name).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // Simulate the instance booting. - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; let instance_next = instance_get(&client, &instance_url).await; identity_eq(&instance.identity, &instance_next.identity); assert_eq!(instance_next.runtime.run_state, InstanceState::Running); @@ -1394,9 +1408,8 @@ async fn test_instances_delete_fails_when_running_succeeds_when_stopped( ); // Stop the instance - let instance = - instance_post(&client, instance_name, InstanceOp::Stop).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_post(&client, instance_name, InstanceOp::Stop).await; + instance_simulate(nexus, &instance_id).await; let instance = instance_get(&client, &instance_url).await; assert_eq!(instance.runtime.run_state, InstanceState::Stopped); @@ -1995,7 +2008,8 @@ async fn test_instance_create_delete_network_interface( // Stop the instance let instance = instance_post(client, instance_name, InstanceOp::Stop).await; - instance_simulate(nexus, &instance.identity.id).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + instance_simulate(nexus, &instance_id).await; // Verify we can now make the requests again let mut interfaces = Vec::with_capacity(2); @@ -2021,9 +2035,8 @@ async fn test_instance_create_delete_network_interface( } // Restart the instance, verify the interfaces are still correct. - let instance = - instance_post(client, instance_name, InstanceOp::Start).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_post(client, instance_name, InstanceOp::Start).await; + instance_simulate(nexus, &instance_id).await; // Get all interfaces in one request. let other_interfaces = objects_list_page_authz::( @@ -2064,8 +2077,8 @@ async fn test_instance_create_delete_network_interface( } // Stop the instance and verify we can delete the interface - let instance = instance_post(client, instance_name, InstanceOp::Stop).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_post(client, instance_name, InstanceOp::Stop).await; + instance_simulate(nexus, &instance_id).await; // We should not be able to delete the primary interface, while the // secondary still exists @@ -2201,7 +2214,8 @@ async fn test_instance_update_network_interfaces( // Stop the instance let instance = instance_post(client, instance_name, InstanceOp::Stop).await; - instance_simulate(nexus, &instance.identity.id).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + instance_simulate(nexus, &instance_id).await; // Create the first interface on the instance. let primary_iface = NexusRequest::objects_post( @@ -2221,9 +2235,8 @@ async fn test_instance_update_network_interfaces( // Restart the instance, to ensure we can only modify things when it's // stopped. - let instance = - instance_post(client, instance_name, InstanceOp::Start).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_post(client, instance_name, InstanceOp::Start).await; + instance_simulate(nexus, &instance_id).await; // We'll change the interface's name and description let new_name = Name::try_from(String::from("new-if0")).unwrap(); @@ -2260,8 +2273,8 @@ async fn test_instance_update_network_interfaces( ); // Stop the instance again, and now verify that the update works. - let instance = instance_post(client, instance_name, InstanceOp::Stop).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_post(client, instance_name, InstanceOp::Stop).await; + instance_simulate(nexus, &instance_id).await; let updated_primary_iface = NexusRequest::object_put( client, &format!("/v1/network-interfaces/{}", primary_iface.identity.id), @@ -2363,9 +2376,8 @@ async fn test_instance_update_network_interfaces( ); // Restart the instance, and verify that we can't update either interface. - let instance = - instance_post(client, instance_name, InstanceOp::Start).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_post(client, instance_name, InstanceOp::Start).await; + instance_simulate(nexus, &instance_id).await; for if_id in [&updated_primary_iface.identity.id, &secondary_iface.identity.id] @@ -2393,8 +2405,8 @@ async fn test_instance_update_network_interfaces( } // Stop the instance again. - let instance = instance_post(client, instance_name, InstanceOp::Stop).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_post(client, instance_name, InstanceOp::Stop).await; + instance_simulate(nexus, &instance_id).await; // Verify that we can set the secondary as the new primary, and that nothing // else changes about the NICs. @@ -3161,19 +3173,19 @@ async fn test_disks_detached_when_instance_destroyed( // sled. let instance_url = format!("/v1/instances/nfs?project={}", PROJECT_NAME); let instance = instance_get(&client, &instance_url).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let sa = nexus - .instance_sled_by_id(&instance.identity.id) + .instance_sled_by_id(&instance_id) .await .unwrap() .expect("instance should be on a sled while it's running"); // Stop and delete instance - let instance = - instance_post(&client, instance_name, InstanceOp::Stop).await; + instance_post(&client, instance_name, InstanceOp::Stop).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; let instance = instance_get(&client, &instance_url).await; assert_eq!(instance.runtime.run_state, InstanceState::Stopped); @@ -3689,9 +3701,10 @@ async fn test_cannot_provision_instance_beyond_cpu_capacity( // Make the started instance transition to Running, shut it down, and verify // that the other reasonably-sized instance can now start. let nexus = &cptestctx.server.server_context().nexus; - instance_simulate(nexus, &instances[1].identity.id).await; + let instance_id = InstanceUuid::from_untyped_uuid(instances[1].identity.id); + instance_simulate(nexus, &instance_id).await; instances[1] = instance_post(client, configs[1].0, InstanceOp::Stop).await; - instance_simulate(nexus, &instances[1].identity.id).await; + instance_simulate(nexus, &instance_id).await; expect_instance_start_ok(client, configs[2].0).await; } @@ -3795,9 +3808,10 @@ async fn test_cannot_provision_instance_beyond_ram_capacity( // Make the started instance transition to Running, shut it down, and verify // that the other reasonably-sized instance can now start. let nexus = &cptestctx.server.server_context().nexus; - instance_simulate(nexus, &instances[1].identity.id).await; + let instance_id = InstanceUuid::from_untyped_uuid(instances[1].identity.id); + instance_simulate(nexus, &instance_id).await; instances[1] = instance_post(client, configs[1].0, InstanceOp::Stop).await; - instance_simulate(nexus, &instances[1].identity.id).await; + instance_simulate(nexus, &instance_id).await; expect_instance_start_ok(client, configs[2].0).await; } @@ -3840,9 +3854,10 @@ async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { // Create an instance and poke it to ensure it's running. let instance = create_instance(client, PROJECT_NAME, instance_name).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); let instance_next = poll::wait_for_condition( || async { - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; let instance_next = instance_get(&client, &instance_url).await; if instance_next.runtime.run_state == InstanceState::Running { Ok(instance_next) @@ -3873,10 +3888,12 @@ async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { .fetch() .await .unwrap(); - let propolis_id = db_instance - .runtime() - .propolis_id - .expect("running instance should have vmm"); + let propolis_id = PropolisUuid::from_untyped_uuid( + db_instance + .runtime() + .propolis_id + .expect("running instance should have vmm"), + ); let updated_vmm = datastore .vmm_overwrite_addr_for_test(&opctx, &propolis_id, propolis_addr) .await @@ -3916,7 +3933,7 @@ async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { ); let instance = instance_next; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; let instance_next = instance_get(&client, &instance_url).await; assert_eq!(instance_next.runtime.run_state, InstanceState::Stopped); assert!( @@ -4085,7 +4102,11 @@ async fn stop_and_delete_instance( let instance = instance_post(&client, instance_name, InstanceOp::Stop).await; let nexus = &cptestctx.server.server_context().nexus; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate( + nexus, + &InstanceUuid::from_untyped_uuid(instance.identity.id), + ) + .await; let url = format!("/v1/instances/{}?project={}", instance_name, PROJECT_NAME); object_delete(client, &url).await; @@ -4473,6 +4494,7 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { let authn = AuthnMode::SiloUser(user_id); let instance_url = get_instance_url(instance_name); let instance = instance_get_as(&client, &instance_url, authn.clone()).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); info!(&cptestctx.logctx.log, "test got instance"; "instance" => ?instance); assert_eq!(instance.runtime.run_state, InstanceState::Starting); @@ -4490,7 +4512,7 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { ), nexus.datastore().clone(), ); - instance_simulate_with_opctx(nexus, &instance.identity.id, &opctx).await; + instance_simulate_with_opctx(nexus, &instance_id, &opctx).await; let instance = instance_get_as(&client, &instance_url, authn).await; assert_eq!(instance.runtime.run_state, InstanceState::Running); @@ -4509,7 +4531,7 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { .await .expect("Failed to stop the instance"); - instance_simulate_with_opctx(nexus, &instance.identity.id, &opctx).await; + instance_simulate_with_opctx(nexus, &instance_id, &opctx).await; // Delete the instance NexusRequest::object_delete(client, &instance_url) @@ -4530,7 +4552,7 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { let nsleds = 3; let mut additional_sleds = Vec::with_capacity(nsleds); for _ in 0..nsleds { - let sa_id = Uuid::new_v4(); + let sa_id = SledUuid::new_v4(); let log = cptestctx.logctx.log.new(o!( "sled_id" => sa_id.to_string() )); let addr = cptestctx.server.get_http_server_internal_address().await; @@ -4551,6 +4573,7 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { // Create an instance. let instance_name = "test-instance"; let instance = create_instance(client, PROJECT_NAME, instance_name).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); let nics_url = format!("/v1/network-interfaces?instance={}", instance.identity.id,); @@ -4593,9 +4616,9 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { } // Delete the instance - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; instance_post(&client, instance_name, InstanceOp::Stop).await; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; let instance_url = get_instance_url(instance_name); NexusRequest::object_delete(client, &instance_url) @@ -4745,18 +4768,18 @@ async fn assert_sled_v2p_mappings( /// have to look up the instance, then get the sled agent associated with that /// instance, and then tell it to finish simulating whatever async transition is /// going on. -pub async fn instance_simulate(nexus: &Arc, id: &Uuid) { +pub async fn instance_simulate(nexus: &Arc, id: &InstanceUuid) { let sa = nexus .instance_sled_by_id(id) .await .unwrap() .expect("instance must be on a sled to simulate a state change"); - sa.instance_finish_transition(*id).await; + sa.instance_finish_transition(id.into_untyped_uuid()).await; } pub async fn instance_simulate_with_opctx( nexus: &Arc, - id: &Uuid, + id: &InstanceUuid, opctx: &OpContext, ) { let sa = nexus @@ -4764,7 +4787,7 @@ pub async fn instance_simulate_with_opctx( .await .unwrap() .expect("instance must be on a sled to simulate a state change"); - sa.instance_finish_transition(*id).await; + sa.instance_finish_transition(id.into_untyped_uuid()).await; } /// Simulates state transitions for the incarnation of the instance on the @@ -4773,11 +4796,11 @@ pub async fn instance_simulate_with_opctx( async fn instance_simulate_on_sled( cptestctx: &ControlPlaneTestContext, nexus: &Arc, - sled_id: Uuid, - instance_id: Uuid, + sled_id: SledUuid, + instance_id: InstanceUuid, ) { info!(&cptestctx.logctx.log, "Poking simulated instance on sled"; "instance_id" => %instance_id, "sled_id" => %sled_id); let sa = nexus.sled_client(&sled_id).await.unwrap(); - sa.instance_finish_transition(instance_id).await; + sa.instance_finish_transition(instance_id.into_untyped_uuid()).await; } diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index e3ddc98029..df3aa93c5f 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -54,6 +54,8 @@ use omicron_common::api::external::NameOrId; use omicron_common::api::external::SimpleIdentity; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_nexus::TestInterfaces; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use sled_agent_client::TestInterfaces as SledTestInterfaces; use uuid::Uuid; @@ -1254,6 +1256,7 @@ async fn test_ip_range_delete_with_allocated_external_ip_fails( const INSTANCE_NAME: &str = "myinst"; create_project(client, PROJECT_NAME).await; let instance = create_instance(client, PROJECT_NAME, INSTANCE_NAME).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // We should not be able to delete the range, since there's an external IP // address in use out of it. @@ -1293,7 +1296,7 @@ async fn test_ip_range_delete_with_allocated_external_ip_fails( // Simulate the transition, wait until it is in fact stopped. let sa = nexus - .instance_sled_by_id(&instance.identity.id) + .instance_sled_by_id(&instance_id) .await .unwrap() .expect("running instance should be on a sled"); diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index 5fbff216d9..9cfa0350e8 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -20,6 +20,7 @@ use nexus_test_utils::resource_helpers::{ use nexus_test_utils::ControlPlaneTestContext; use nexus_test_utils_macros::nexus_test; use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use oximeter::types::Datum; use oximeter::types::Measurement; use oximeter::TimeseriesSchema; @@ -429,11 +430,11 @@ async fn test_instance_watcher_metrics( #[track_caller] fn count_state( table: &oximeter_db::oxql::Table, - instance_id: Uuid, + instance_id: InstanceUuid, state: &'static str, ) -> i64 { use oximeter_db::oxql::point::ValueArray; - let uuid = FieldValue::Uuid(instance_id); + let uuid = FieldValue::Uuid(instance_id.into_untyped_uuid()); let state = FieldValue::String(state.into()); let mut timeserieses = table.timeseries().filter(|ts| { ts.fields.get(INSTANCE_ID_FIELD) == Some(&uuid) @@ -473,7 +474,7 @@ async fn test_instance_watcher_metrics( eprintln!("--- creating instance 1 ---"); let instance1 = create_instance(&client, project_name, "i-1").await; - let instance1_uuid = instance1.identity.id; + let instance1_uuid = InstanceUuid::from_untyped_uuid(instance1.identity.id); // activate the instance watcher background task. activate_instance_watcher().await; @@ -489,7 +490,7 @@ async fn test_instance_watcher_metrics( // okay, make another instance eprintln!("--- creating instance 2 ---"); let instance2 = create_instance(&client, project_name, "i-2").await; - let instance2_uuid = instance2.identity.id; + let instance2_uuid = InstanceUuid::from_untyped_uuid(instance2.identity.id); // activate the instance watcher background task. activate_instance_watcher().await; diff --git a/nexus/tests/integration_tests/pantry.rs b/nexus/tests/integration_tests/pantry.rs index c5d98709ac..29e590b1a9 100644 --- a/nexus/tests/integration_tests/pantry.rs +++ b/nexus/tests/integration_tests/pantry.rs @@ -26,6 +26,8 @@ use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_nexus::Nexus; use omicron_nexus::TestInterfaces as _; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use sled_agent_client::TestInterfaces as _; use std::sync::Arc; use uuid::Uuid; @@ -83,13 +85,13 @@ async fn set_instance_state( .unwrap() } -async fn instance_simulate(nexus: &Arc, id: &Uuid) { +async fn instance_simulate(nexus: &Arc, id: &InstanceUuid) { let sa = nexus .instance_sled_by_id(id) .await .unwrap() .expect("instance must be on a sled to simulate a state change"); - sa.instance_finish_transition(*id).await; + sa.instance_finish_transition(id.into_untyped_uuid()).await; } async fn disk_get(client: &ClientTestContext, disk_url: &str) -> Disk { @@ -147,14 +149,14 @@ async fn create_instance_and_attach_disk( ) { // Create an instance to attach the disk. let instance = create_instance(&client, PROJECT_NAME, INSTANCE_NAME).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // TODO(https://github.com/oxidecomputer/omicron/issues/811): // // Instances must be stopped before disks can be attached - this // is an artificial limitation without hotplug support. - let instance_next = - set_instance_state(&client, INSTANCE_NAME, "stop").await; - instance_simulate(nexus, &instance_next.identity.id).await; + set_instance_state(&client, INSTANCE_NAME, "stop").await; + instance_simulate(nexus, &instance_id).await; let url_instance_attach_disk = get_disk_attach_url(instance.identity.name.as_str()); diff --git a/nexus/tests/integration_tests/sleds.rs b/nexus/tests/integration_tests/sleds.rs index 97dbb39bc6..bd8a8877fc 100644 --- a/nexus/tests/integration_tests/sleds.rs +++ b/nexus/tests/integration_tests/sleds.rs @@ -20,6 +20,7 @@ use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::views::SledInstance; use nexus_types::external_api::views::{PhysicalDisk, Sled}; use omicron_sled_agent::sim; +use omicron_uuid_kinds::SledUuid; use std::str::FromStr; use uuid::Uuid; @@ -56,7 +57,7 @@ async fn test_sleds_list(cptestctx: &ControlPlaneTestContext) { let nsleds = 3; let mut sas = Vec::with_capacity(nsleds); for _ in 0..nsleds { - let sa_id = Uuid::new_v4(); + let sa_id = SledUuid::new_v4(); let log = cptestctx.logctx.log.new(o!( "sled_id" => sa_id.to_string() )); let addr = cptestctx.server.get_http_server_internal_address().await; diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index f0ce2cca72..b4b8ad3090 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -38,6 +38,8 @@ use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; use omicron_common::api::external::Name; use omicron_nexus::app::MIN_DISK_SIZE_BYTES; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use uuid::Uuid; type ControlPlaneTestContext = @@ -139,10 +141,11 @@ async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { }, ) .await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); // cannot snapshot attached disk for instance in state starting let nexus = &cptestctx.server.server_context().nexus; - instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance_id).await; // Issue snapshot request let snapshots_url = format!("/v1/snapshots?project={}", PROJECT_NAME); diff --git a/nexus/tests/integration_tests/vpc_subnets.rs b/nexus/tests/integration_tests/vpc_subnets.rs index dcc96d08bf..81e7156e8e 100644 --- a/nexus/tests/integration_tests/vpc_subnets.rs +++ b/nexus/tests/integration_tests/vpc_subnets.rs @@ -21,6 +21,8 @@ use nexus_types::external_api::{params, views::VpcSubnet}; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::IdentityMetadataUpdateParams; use omicron_common::api::external::Ipv6NetExt; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use oxnet::Ipv6Net; type ControlPlaneTestContext = @@ -55,7 +57,8 @@ async fn test_delete_vpc_subnet_with_interfaces_fails( let instance_url = format!("/v1/instances/{instance_name}?project={project_name}"); let instance = create_instance(client, &project_name, instance_name).await; - instance_simulate(nexus, &instance.identity.id).await; + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + instance_simulate(nexus, &instance_id).await; let err: HttpErrorResponseBody = NexusRequest::expect_failure( &client, StatusCode::BAD_REQUEST, @@ -76,7 +79,7 @@ async fn test_delete_vpc_subnet_with_interfaces_fails( // Stop and then delete the instance instance_post(client, instance_name, InstanceOp::Stop).await; - instance_simulate(&nexus, &instance.identity.id).await; + instance_simulate(&nexus, &instance_id).await; NexusRequest::object_delete(&client, &instance_url) .authn_as(AuthnMode::PrivilegedUser) .execute() diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index b27dc6ce00..c3cc3c059d 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -3145,8 +3145,11 @@ "dst_propolis_id": { "nullable": true, "description": "If a migration is active, the ID of the target VMM.", - "type": "string", - "format": "uuid" + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + ] }, "gen": { "description": "Generation number for this state.", @@ -3165,8 +3168,11 @@ "propolis_id": { "nullable": true, "description": "The instance's currently active VMM ID.", - "type": "string", - "format": "uuid" + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + ] }, "time_updated": { "description": "Timestamp for this information.", @@ -4665,8 +4671,11 @@ }, "propolis_id": { "description": "The ID of the VMM whose state is being reported.", - "type": "string", - "format": "uuid" + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + ] }, "vmm_state": { "description": "The most recent state of the sled's VMM process.", @@ -4905,6 +4914,10 @@ "type": "string", "format": "uuid" }, + "TypedUuidForPropolisKind": { + "type": "string", + "format": "uuid" + }, "TypedUuidForSledKind": { "type": "string", "format": "uuid" diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index 25e8d1c5da..e1349c2b59 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -230,8 +230,7 @@ "name": "instance_id", "required": true, "schema": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForInstanceKind" } } ], @@ -272,8 +271,7 @@ "name": "instance_id", "required": true, "schema": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForInstanceKind" } } ], @@ -360,8 +358,7 @@ "name": "instance_id", "required": true, "schema": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForInstanceKind" } } ], @@ -395,8 +392,7 @@ "name": "instance_id", "required": true, "schema": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForInstanceKind" } } ], @@ -432,8 +428,7 @@ "name": "instance_id", "required": true, "schema": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForInstanceKind" } } ], @@ -476,8 +471,7 @@ "name": "instance_id", "required": true, "schema": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForInstanceKind" } } ], @@ -508,8 +502,7 @@ "name": "instance_id", "required": true, "schema": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForInstanceKind" } } ], @@ -2812,8 +2805,11 @@ }, "propolis_id": { "description": "The ID of the VMM being registered. This may not be the active VMM ID in the instance runtime state (e.g. if the new VMM is going to be a migration target).", - "type": "string", - "format": "uuid" + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + ] }, "vmm_runtime": { "description": "The initial VMM runtime state for the VMM being registered.", @@ -2982,8 +2978,7 @@ "type": "object", "properties": { "dst_propolis_id": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForPropolisKind" }, "migration_id": { "type": "string", @@ -3104,8 +3099,11 @@ "dst_propolis_id": { "nullable": true, "description": "If a migration is active, the ID of the target VMM.", - "type": "string", - "format": "uuid" + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + ] }, "gen": { "description": "Generation number for this state.", @@ -3124,8 +3122,11 @@ "propolis_id": { "nullable": true, "description": "The instance's currently active VMM ID.", - "type": "string", - "format": "uuid" + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + ] }, "time_updated": { "description": "Timestamp for this information.", @@ -4280,8 +4281,11 @@ }, "propolis_id": { "description": "The ID of the VMM whose state is being reported.", - "type": "string", - "format": "uuid" + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForPropolisKind" + } + ] }, "vmm_state": { "description": "The most recent state of the sled's VMM process.", @@ -4499,6 +4503,10 @@ "sync" ] }, + "TypedUuidForPropolisKind": { + "type": "string", + "format": "uuid" + }, "TypedUuidForZpoolKind": { "type": "string", "format": "uuid" @@ -5044,6 +5052,10 @@ "A", "B" ] + }, + "TypedUuidForInstanceKind": { + "type": "string", + "format": "uuid" } }, "responses": { diff --git a/sled-agent/src/common/instance.rs b/sled-agent/src/common/instance.rs index b2c135fcf8..02623455f4 100644 --- a/sled-agent/src/common/instance.rs +++ b/sled-agent/src/common/instance.rs @@ -11,18 +11,18 @@ use omicron_common::api::internal::nexus::{ InstanceRuntimeState, MigrationRole, MigrationRuntimeState, MigrationState, SledInstanceState, VmmRuntimeState, VmmState, }; +use omicron_uuid_kinds::PropolisUuid; use propolis_client::types::{ InstanceState as PropolisApiState, InstanceStateMonitorResponse, MigrationState as PropolisMigrationState, }; -use uuid::Uuid; /// The instance and VMM state that sled agent maintains on a per-VMM basis. #[derive(Clone, Debug)] pub struct InstanceStates { instance: InstanceRuntimeState, vmm: VmmRuntimeState, - propolis_id: Uuid, + propolis_id: PropolisUuid, migration: Option, } @@ -209,7 +209,7 @@ impl InstanceStates { pub fn new( instance: InstanceRuntimeState, vmm: VmmRuntimeState, - propolis_id: Uuid, + propolis_id: PropolisUuid, ) -> Self { let migration = instance.migration_id.map(|migration_id| { let dst_propolis_id = instance.dst_propolis_id.expect("if an instance has a migration ID, it should also have a target VMM ID"); @@ -237,7 +237,7 @@ impl InstanceStates { &self.vmm } - pub fn propolis_id(&self) -> Uuid { + pub fn propolis_id(&self) -> PropolisUuid { self.propolis_id } @@ -602,7 +602,7 @@ mod test { use uuid::Uuid; fn make_instance() -> InstanceStates { - let propolis_id = Uuid::new_v4(); + let propolis_id = PropolisUuid::new_v4(); let now = Utc::now(); let instance = InstanceRuntimeState { propolis_id: Some(propolis_id), @@ -626,7 +626,7 @@ mod test { state.vmm.state = VmmState::Migrating; let migration_id = Uuid::new_v4(); state.instance.migration_id = Some(migration_id); - state.instance.dst_propolis_id = Some(Uuid::new_v4()); + state.instance.dst_propolis_id = Some(PropolisUuid::new_v4()); state.migration = Some(MigrationRuntimeState { migration_id, state: MigrationState::InProgress, @@ -636,6 +636,7 @@ mod test { gen: Generation::new().next(), time_updated: Utc::now(), }); + state } @@ -644,7 +645,7 @@ mod test { state.vmm.state = VmmState::Migrating; let migration_id = Uuid::new_v4(); state.instance.migration_id = Some(migration_id); - state.propolis_id = Uuid::new_v4(); + state.propolis_id = PropolisUuid::new_v4(); state.instance.dst_propolis_id = Some(state.propolis_id); state.migration = Some(MigrationRuntimeState { migration_id, @@ -935,7 +936,7 @@ mod test { state.set_migration_ids( &Some(InstanceMigrationSourceParams { migration_id, - dst_propolis_id: Uuid::new_v4(), + dst_propolis_id: PropolisUuid::new_v4(), }), Utc::now(), ); @@ -1020,7 +1021,7 @@ mod test { // new IDs are present should indicate that they are indeed present. let migration_ids = InstanceMigrationSourceParams { migration_id: Uuid::new_v4(), - dst_propolis_id: Uuid::new_v4(), + dst_propolis_id: PropolisUuid::new_v4(), }; new_instance.set_migration_ids(&Some(migration_ids), Utc::now()); @@ -1056,7 +1057,7 @@ mod test { )); new_instance.instance.migration_id = Some(migration_ids.migration_id); - new_instance.instance.dst_propolis_id = Some(Uuid::new_v4()); + new_instance.instance.dst_propolis_id = Some(PropolisUuid::new_v4()); assert!(!new_instance.migration_ids_already_set( old_instance.instance(), &Some(migration_ids) diff --git a/sled-agent/src/http_entrypoints.rs b/sled-agent/src/http_entrypoints.rs index c5cd88619f..6defd18a95 100644 --- a/sled-agent/src/http_entrypoints.rs +++ b/sled-agent/src/http_entrypoints.rs @@ -32,6 +32,7 @@ use omicron_common::api::internal::nexus::{ DiskRuntimeState, SledInstanceState, UpdateArtifactId, }; use omicron_common::api::internal::shared::SwitchPorts; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use sled_hardware::DiskVariant; @@ -414,7 +415,7 @@ async fn cockroachdb_init( /// Path parameters for Instance requests (sled agent API) #[derive(Deserialize, JsonSchema)] struct InstancePathParam { - instance_id: Uuid, + instance_id: InstanceUuid, } #[endpoint { @@ -614,7 +615,7 @@ async fn instance_issue_disk_snapshot_request( let body = body.into_inner(); sa.instance_issue_disk_snapshot_request( - path_params.instance_id, + InstanceUuid::from_untyped_uuid(path_params.instance_id), path_params.disk_id, body.snapshot_id, ) diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index c6c567595d..04b68ef752 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -39,6 +39,7 @@ use omicron_common::api::internal::shared::{ NetworkInterface, SourceNatConfig, }; use omicron_common::backoff; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid, PropolisUuid}; use propolis_client::Client as PropolisClient; use rand::prelude::SliceRandom; use rand::SeedableRng; @@ -108,10 +109,10 @@ pub enum Error { ResolveError(#[from] internal_dns::resolver::ResolveError), #[error("Instance {0} not running!")] - InstanceNotRunning(Uuid), + InstanceNotRunning(InstanceUuid), #[error("Instance already registered with Propolis ID {0}")] - InstanceAlreadyRegistered(Uuid), + InstanceAlreadyRegistered(PropolisUuid), #[error("No U.2 devices found")] U2NotFound, @@ -182,7 +183,7 @@ fn fmri_name() -> String { /// Return the expected name of a Propolis zone managing an instance with the /// provided ID. -pub fn propolis_zone_name(id: &Uuid) -> String { +pub fn propolis_zone_name(id: &PropolisUuid) -> String { format!("{}{}", PROPOLIS_ZONE_PREFIX, id) } @@ -316,7 +317,7 @@ struct InstanceRunner { properties: propolis_client::types::InstanceProperties, // The ID of the Propolis server (and zone) running this instance - propolis_id: Uuid, + propolis_id: PropolisUuid, // The socket address of the Propolis server running this instance propolis_addr: SocketAddr, @@ -470,12 +471,12 @@ impl InstanceRunner { } /// Yields this instance's ID. - fn id(&self) -> &Uuid { - &self.properties.id + fn id(&self) -> InstanceUuid { + InstanceUuid::from_untyped_uuid(self.properties.id) } /// Yields this instance's Propolis's ID. - fn propolis_id(&self) -> &Uuid { + fn propolis_id(&self) -> &PropolisUuid { &self.propolis_id } @@ -495,7 +496,10 @@ impl InstanceRunner { self.nexus_client .client() - .cpapi_instances_put(self.id(), &state.into()) + .cpapi_instances_put( + &self.id().into_untyped_uuid(), + &state.into(), + ) .await .map_err(|err| -> backoff::BackoffError { match &err { @@ -928,8 +932,8 @@ impl Instance { /// * `metadata`: Instance-related metadata used to track statistics. pub(crate) fn new( log: Logger, - id: Uuid, - propolis_id: Uuid, + id: InstanceUuid, + propolis_id: PropolisUuid, ticket: InstanceTicket, state: InstanceInitialState, services: InstanceManagerServices, @@ -1000,7 +1004,7 @@ impl Instance { monitor_handle: None, // NOTE: Mostly lies. properties: propolis_client::types::InstanceProperties { - id, + id: id.into_untyped_uuid(), name: hardware.properties.hostname.to_string(), description: "Test description".to_string(), image_id: Uuid::nil(), @@ -1262,7 +1266,7 @@ impl InstanceRunner { } InstanceStateRequested::Reboot => { if self.running_state.is_none() { - return Err(Error::InstanceNotRunning(*self.id())); + return Err(Error::InstanceNotRunning(self.id())); } ( Some(PropolisRequest::Reboot), @@ -1354,7 +1358,7 @@ impl InstanceRunner { .with_zone_root_path(&root) .with_zone_image_paths(&["/opt/oxide".into()]) .with_zone_type("propolis-server") - .with_unique_name(*self.propolis_id()) + .with_unique_name(self.propolis_id().into_untyped_uuid()) .with_datasets(&[]) .with_filesystems(&[]) .with_data_links(&[]) @@ -1473,7 +1477,9 @@ impl InstanceRunner { Ok(()) } else { - Err(Error::InstanceNotRunning(self.properties.id)) + Err(Error::InstanceNotRunning(InstanceUuid::from_untyped_uuid( + self.properties.id, + ))) } } @@ -1710,8 +1716,8 @@ mod tests { storage_handle: StorageHandle, temp_dir: &String, ) -> Instance { - let id = Uuid::new_v4(); - let propolis_id = Uuid::new_v4(); + let id = InstanceUuid::new_v4(); + let propolis_id = PropolisUuid::new_v4(); let ticket = InstanceTicket::new_without_manager_for_test(id); @@ -1743,7 +1749,7 @@ mod tests { } fn fake_instance_initial_state( - propolis_id: Uuid, + propolis_id: PropolisUuid, propolis_addr: SocketAddr, ) -> InstanceInitialState { let hardware = InstanceHardware { @@ -2104,8 +2110,8 @@ mod tests { propolis_mock_server(&logctx.log); let propolis_addr = propolis_server.local_addr(); - let instance_id = Uuid::new_v4(); - let propolis_id = Uuid::new_v4(); + let instance_id = InstanceUuid::new_v4(); + let propolis_id = PropolisUuid::new_v4(); let InstanceInitialState { hardware, instance_runtime, diff --git a/sled-agent/src/instance_manager.rs b/sled-agent/src/instance_manager.rs index ee1425f0d7..beeb8377d2 100644 --- a/sled-agent/src/instance_manager.rs +++ b/sled-agent/src/instance_manager.rs @@ -27,6 +27,8 @@ use illumos_utils::running_zone::ZoneBuilderFactory; use omicron_common::api::internal::nexus::InstanceRuntimeState; use omicron_common::api::internal::nexus::SledInstanceState; use omicron_common::api::internal::nexus::VmmRuntimeState; +use omicron_uuid_kinds::InstanceUuid; +use omicron_uuid_kinds::PropolisUuid; use sled_storage::manager::StorageHandle; use slog::Logger; use std::collections::BTreeMap; @@ -44,7 +46,7 @@ pub enum Error { Instance(#[from] crate::instance::Error), #[error("No such instance ID: {0}")] - NoSuchInstance(Uuid), + NoSuchInstance(InstanceUuid), #[error("OPTE port management error: {0}")] Opte(#[from] illumos_utils::opte::Error), @@ -137,8 +139,8 @@ impl InstanceManager { #[allow(clippy::too_many_arguments)] pub async fn ensure_registered( &self, - instance_id: Uuid, - propolis_id: Uuid, + instance_id: InstanceUuid, + propolis_id: PropolisUuid, hardware: InstanceHardware, instance_runtime: InstanceRuntimeState, vmm_runtime: VmmRuntimeState, @@ -165,7 +167,7 @@ impl InstanceManager { pub async fn ensure_unregistered( &self, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> Result { let (tx, rx) = oneshot::channel(); self.inner @@ -181,7 +183,7 @@ impl InstanceManager { pub async fn ensure_state( &self, - instance_id: Uuid, + instance_id: InstanceUuid, target: InstanceStateRequested, ) -> Result { let (tx, rx) = oneshot::channel(); @@ -215,7 +217,7 @@ impl InstanceManager { pub async fn put_migration_ids( &self, - instance_id: Uuid, + instance_id: InstanceUuid, old_runtime: &InstanceRuntimeState, migration_ids: &Option, ) -> Result { @@ -235,7 +237,7 @@ impl InstanceManager { pub async fn instance_issue_disk_snapshot_request( &self, - instance_id: Uuid, + instance_id: InstanceUuid, disk_id: Uuid, snapshot_id: Uuid, ) -> Result<(), Error> { @@ -272,7 +274,7 @@ impl InstanceManager { pub async fn add_external_ip( &self, - instance_id: Uuid, + instance_id: InstanceUuid, ip: &InstanceExternalIpBody, ) -> Result<(), Error> { let (tx, rx) = oneshot::channel(); @@ -290,7 +292,7 @@ impl InstanceManager { pub async fn delete_external_ip( &self, - instance_id: Uuid, + instance_id: InstanceUuid, ip: &InstanceExternalIpBody, ) -> Result<(), Error> { let (tx, rx) = oneshot::channel(); @@ -313,7 +315,7 @@ impl InstanceManager { pub async fn get_instance_state( &self, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> Result { let (tx, rx) = oneshot::channel(); self.inner @@ -334,8 +336,8 @@ impl InstanceManager { #[derive(strum::Display)] enum InstanceManagerRequest { EnsureRegistered { - instance_id: Uuid, - propolis_id: Uuid, + instance_id: InstanceUuid, + propolis_id: PropolisUuid, hardware: InstanceHardware, instance_runtime: InstanceRuntimeState, vmm_runtime: VmmRuntimeState, @@ -344,22 +346,22 @@ enum InstanceManagerRequest { tx: oneshot::Sender>, }, EnsureUnregistered { - instance_id: Uuid, + instance_id: InstanceUuid, tx: oneshot::Sender>, }, EnsureState { - instance_id: Uuid, + instance_id: InstanceUuid, target: InstanceStateRequested, tx: oneshot::Sender>, }, PutMigrationIds { - instance_id: Uuid, + instance_id: InstanceUuid, old_runtime: InstanceRuntimeState, migration_ids: Option, tx: oneshot::Sender>, }, InstanceIssueDiskSnapshot { - instance_id: Uuid, + instance_id: InstanceUuid, disk_id: Uuid, snapshot_id: Uuid, tx: oneshot::Sender>, @@ -369,17 +371,17 @@ enum InstanceManagerRequest { tx: oneshot::Sender>, }, InstanceAddExternalIp { - instance_id: Uuid, + instance_id: InstanceUuid, ip: InstanceExternalIpBody, tx: oneshot::Sender>, }, InstanceDeleteExternalIp { - instance_id: Uuid, + instance_id: InstanceUuid, ip: InstanceExternalIpBody, tx: oneshot::Sender>, }, GetState { - instance_id: Uuid, + instance_id: InstanceUuid, tx: oneshot::Sender>, }, } @@ -387,7 +389,7 @@ enum InstanceManagerRequest { // Requests that the instance manager stop processing information about a // particular instance. struct InstanceDeregisterRequest { - id: Uuid, + id: InstanceUuid, } struct InstanceManagerRunner { @@ -414,7 +416,7 @@ struct InstanceManagerRunner { // instance, we could avoid the methods within "instance.rs" that panic // if the Propolis client hasn't been initialized. /// A mapping from a Sled Agent "Instance ID" to ("Propolis ID", [Instance]). - instances: BTreeMap, + instances: BTreeMap, vnic_allocator: VnicAllocator, port_manager: PortManager, @@ -511,7 +513,7 @@ impl InstanceManagerRunner { } } - fn get_instance(&self, instance_id: Uuid) -> Option<&Instance> { + fn get_instance(&self, instance_id: InstanceUuid) -> Option<&Instance> { self.instances.get(&instance_id).map(|(_id, v)| v) } @@ -535,8 +537,8 @@ impl InstanceManagerRunner { #[allow(clippy::too_many_arguments)] pub async fn ensure_registered( &mut self, - instance_id: Uuid, - propolis_id: Uuid, + instance_id: InstanceUuid, + propolis_id: PropolisUuid, hardware: InstanceHardware, instance_runtime: InstanceRuntimeState, vmm_runtime: VmmRuntimeState, @@ -625,7 +627,7 @@ impl InstanceManagerRunner { async fn ensure_unregistered( &mut self, tx: oneshot::Sender>, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> Result<(), Error> { // If the instance does not exist, we response immediately. let Some(instance) = self.get_instance(instance_id) else { @@ -645,7 +647,7 @@ impl InstanceManagerRunner { async fn ensure_state( &mut self, tx: oneshot::Sender>, - instance_id: Uuid, + instance_id: InstanceUuid, target: InstanceStateRequested, ) -> Result<(), Error> { let Some(instance) = self.get_instance(instance_id) else { @@ -680,7 +682,7 @@ impl InstanceManagerRunner { async fn put_migration_ids( &mut self, tx: oneshot::Sender>, - instance_id: Uuid, + instance_id: InstanceUuid, old_runtime: &InstanceRuntimeState, migration_ids: &Option, ) -> Result<(), Error> { @@ -697,7 +699,7 @@ impl InstanceManagerRunner { async fn instance_issue_disk_snapshot_request( &self, tx: oneshot::Sender>, - instance_id: Uuid, + instance_id: InstanceUuid, disk_id: Uuid, snapshot_id: Uuid, ) -> Result<(), Error> { @@ -734,7 +736,7 @@ impl InstanceManagerRunner { async fn add_external_ip( &self, tx: oneshot::Sender>, - instance_id: Uuid, + instance_id: InstanceUuid, ip: &InstanceExternalIpBody, ) -> Result<(), Error> { let Some(instance) = self.get_instance(instance_id) else { @@ -747,7 +749,7 @@ impl InstanceManagerRunner { async fn delete_external_ip( &self, tx: oneshot::Sender>, - instance_id: Uuid, + instance_id: InstanceUuid, ip: &InstanceExternalIpBody, ) -> Result<(), Error> { let Some(instance) = self.get_instance(instance_id) else { @@ -761,7 +763,7 @@ impl InstanceManagerRunner { async fn get_instance_state( &self, tx: oneshot::Sender>, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> Result<(), Error> { let Some(instance) = self.get_instance(instance_id) else { return tx @@ -777,7 +779,7 @@ impl InstanceManagerRunner { /// Represents membership of an instance in the [`InstanceManager`]. pub struct InstanceTicket { - id: Uuid, + id: InstanceUuid, terminate_tx: Option>, } @@ -785,14 +787,14 @@ impl InstanceTicket { // Creates a new instance ticket for instance "id" to be removed // from the manger on destruction. fn new( - id: Uuid, + id: InstanceUuid, terminate_tx: mpsc::UnboundedSender, ) -> Self { InstanceTicket { id, terminate_tx: Some(terminate_tx) } } #[cfg(all(test, target_os = "illumos"))] - pub(crate) fn new_without_manager_for_test(id: Uuid) -> Self { + pub(crate) fn new_without_manager_for_test(id: InstanceUuid) -> Self { Self { id, terminate_tx: None } } diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index 2ed7ca5528..9575e08c17 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -19,6 +19,7 @@ use omicron_common::api::internal::nexus::{ use omicron_common::api::internal::shared::{ NetworkInterface, SourceNatConfig, }; +use omicron_uuid_kinds::PropolisUuid; use omicron_uuid_kinds::ZpoolUuid; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -117,7 +118,7 @@ pub struct InstanceEnsureBody { /// The ID of the VMM being registered. This may not be the active VMM ID in /// the instance runtime state (e.g. if the new VMM is going to be a /// migration target). - pub propolis_id: Uuid, + pub propolis_id: PropolisUuid, /// The address at which this VMM should serve a Propolis server API. pub propolis_addr: SocketAddr, @@ -214,7 +215,7 @@ impl InstanceStateRequested { #[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct InstanceMigrationSourceParams { pub migration_id: Uuid, - pub dst_propolis_id: Uuid, + pub dst_propolis_id: PropolisUuid, } /// The body of a request to set or clear the migration identifiers from a diff --git a/sled-agent/src/sim/collection.rs b/sled-agent/src/sim/collection.rs index 12e17cc3de..8af71ac026 100644 --- a/sled-agent/src/sim/collection.rs +++ b/sled-agent/src/sim/collection.rs @@ -427,12 +427,12 @@ mod test { use omicron_common::api::internal::nexus::VmmRuntimeState; use omicron_common::api::internal::nexus::VmmState; use omicron_test_utils::dev::test_setup_log; - use uuid::Uuid; + use omicron_uuid_kinds::PropolisUuid; fn make_instance( logctx: &LogContext, ) -> (SimObject, Receiver<()>) { - let propolis_id = Uuid::new_v4(); + let propolis_id = PropolisUuid::new_v4(); let instance_vmm = InstanceRuntimeState { propolis_id: Some(propolis_id), dst_propolis_id: None, diff --git a/sled-agent/src/sim/http_entrypoints.rs b/sled-agent/src/sim/http_entrypoints.rs index ae1318a8b1..012889c664 100644 --- a/sled-agent/src/sim/http_entrypoints.rs +++ b/sled-agent/src/sim/http_entrypoints.rs @@ -25,6 +25,7 @@ use omicron_common::api::internal::nexus::DiskRuntimeState; use omicron_common::api::internal::nexus::SledInstanceState; use omicron_common::api::internal::nexus::UpdateArtifactId; use omicron_common::api::internal::shared::SwitchPorts; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use sled_storage::resources::DisksManagementResult; @@ -77,7 +78,7 @@ pub fn api() -> SledApiDescription { /// Path parameters for Instance requests (sled agent API) #[derive(Deserialize, JsonSchema)] struct InstancePathParam { - instance_id: Uuid, + instance_id: InstanceUuid, } #[endpoint { @@ -309,7 +310,7 @@ async fn instance_issue_disk_snapshot_request( let body = body.into_inner(); sa.instance_issue_disk_snapshot_request( - path_params.instance_id, + InstanceUuid::from_untyped_uuid(path_params.instance_id), path_params.disk_id, body.snapshot_id, ) diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index 7f07dc199a..f47d8a9100 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -38,7 +38,7 @@ use omicron_common::api::internal::nexus::{ }; use omicron_common::api::internal::shared::RackNetworkConfig; use omicron_common::disk::DiskIdentity; -use omicron_uuid_kinds::ZpoolUuid; +use omicron_uuid_kinds::{GenericUuid, InstanceUuid, PropolisUuid, ZpoolUuid}; use oxnet::Ipv6Net; use propolis_client::{ types::VolumeConstructionRequest, Client as PropolisClient, @@ -253,8 +253,8 @@ impl SledAgent { /// (described by `target`). pub async fn instance_register( self: &Arc, - instance_id: Uuid, - propolis_id: Uuid, + instance_id: InstanceUuid, + propolis_id: PropolisUuid, hardware: InstanceHardware, instance_runtime: InstanceRuntimeState, vmm_runtime: VmmRuntimeState, @@ -271,7 +271,9 @@ impl SledAgent { for disk in &hardware.disks { let initial_state = DiskRuntimeState { - disk_state: DiskState::Attached(instance_id), + disk_state: DiskState::Attached( + instance_id.into_untyped_uuid(), + ), gen: omicron_common::api::external::Generation::new(), time_updated: chrono::Utc::now(), }; @@ -286,7 +288,9 @@ impl SledAgent { .sim_ensure( &id, initial_state, - Some(DiskStateRequested::Attached(instance_id)), + Some(DiskStateRequested::Attached( + instance_id.into_untyped_uuid(), + )), ) .await?; self.disks @@ -303,9 +307,13 @@ impl SledAgent { // point to the correct address. let mock_lock = self.mock_propolis.lock().await; if let Some((_srv, client)) = mock_lock.as_ref() { - if !self.instances.contains_key(&instance_id).await { + if !self + .instances + .contains_key(&instance_id.into_untyped_uuid()) + .await + { let properties = propolis_client::types::InstanceProperties { - id: propolis_id, + id: propolis_id.into_untyped_uuid(), name: hardware.properties.hostname.to_string(), description: "sled-agent-sim created instance".to_string(), image_id: Uuid::default(), @@ -336,7 +344,7 @@ impl SledAgent { let instance_run_time_state = self .instances .sim_ensure( - &instance_id, + &instance_id.into_untyped_uuid(), SledInstanceState { instance_state: instance_runtime, vmm_state: vmm_runtime, @@ -360,32 +368,33 @@ impl SledAgent { /// not notified. pub async fn instance_unregister( self: &Arc, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> Result { - let instance = - match self.instances.sim_get_cloned_object(&instance_id).await { - Ok(instance) => instance, - Err(Error::ObjectNotFound { .. }) => { - return Ok(InstanceUnregisterResponse { - updated_runtime: None, - }) - } - Err(e) => return Err(e), - }; + let instance = match self + .instances + .sim_get_cloned_object(&instance_id.into_untyped_uuid()) + .await + { + Ok(instance) => instance, + Err(Error::ObjectNotFound { .. }) => { + return Ok(InstanceUnregisterResponse { updated_runtime: None }) + } + Err(e) => return Err(e), + }; self.detach_disks_from_instance(instance_id).await?; let response = InstanceUnregisterResponse { updated_runtime: Some(instance.terminate()), }; - self.instances.sim_force_remove(instance_id).await; + self.instances.sim_force_remove(instance_id.into_untyped_uuid()).await; Ok(response) } /// Asks the supplied instance to transition to the requested state. pub async fn instance_ensure_state( self: &Arc, - instance_id: Uuid, + instance_id: InstanceUuid, state: InstanceStateRequested, ) -> Result { if let Some(e) = self.instance_ensure_state_error.lock().await.as_ref() @@ -393,23 +402,26 @@ impl SledAgent { return Err(e.clone()); } - let current = - match self.instances.sim_get_cloned_object(&instance_id).await { - Ok(i) => i.current().clone(), - Err(_) => match state { - InstanceStateRequested::Stopped => { - return Ok(InstancePutStateResponse { - updated_runtime: None, - }); - } - _ => { - return Err(Error::invalid_request(&format!( - "instance {} not registered on sled", - instance_id, - ))); - } - }, - }; + let current = match self + .instances + .sim_get_cloned_object(&instance_id.into_untyped_uuid()) + .await + { + Ok(i) => i.current().clone(), + Err(_) => match state { + InstanceStateRequested::Stopped => { + return Ok(InstancePutStateResponse { + updated_runtime: None, + }); + } + _ => { + return Err(Error::invalid_request(&format!( + "instance {} not registered on sled", + instance_id, + ))); + } + }, + }; let mock_lock = self.mock_propolis.lock().await; if let Some((_srv, client)) = mock_lock.as_ref() { @@ -427,7 +439,11 @@ impl SledAgent { tokio::spawn(async move { tokio::time::sleep(Duration::from_secs(10)).await; match instances - .sim_ensure(&instance_id, current, Some(state)) + .sim_ensure( + &instance_id.into_untyped_uuid(), + current, + Some(state), + ) .await { Ok(state) => { @@ -457,7 +473,7 @@ impl SledAgent { let new_state = self .instances - .sim_ensure(&instance_id, current, Some(state)) + .sim_ensure(&instance_id.into_untyped_uuid(), current, Some(state)) .await?; // If this request will shut down the simulated instance, look for any @@ -471,11 +487,11 @@ impl SledAgent { pub async fn instance_get_state( &self, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> Result { let instance = self .instances - .sim_get_cloned_object(&instance_id) + .sim_get_cloned_object(&instance_id.into_untyped_uuid()) .await .map_err(|_| { crate::sled_agent::Error::Instance( @@ -491,13 +507,13 @@ impl SledAgent { async fn detach_disks_from_instance( &self, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> Result<(), Error> { self.disks .sim_ensure_for_each_where( |disk| match disk.current().disk_state { DiskState::Attached(id) | DiskState::Attaching(id) => { - id == instance_id + id == instance_id.into_untyped_uuid() } _ => false, }, @@ -510,12 +526,14 @@ impl SledAgent { pub async fn instance_put_migration_ids( self: &Arc, - instance_id: Uuid, + instance_id: InstanceUuid, old_runtime: &InstanceRuntimeState, migration_ids: &Option, ) -> Result { - let instance = - self.instances.sim_get_cloned_object(&instance_id).await?; + let instance = self + .instances + .sim_get_cloned_object(&instance_id.into_untyped_uuid()) + .await?; instance.put_migration_ids(old_runtime, migration_ids).await } @@ -544,8 +562,8 @@ impl SledAgent { self.disks.size().await } - pub async fn instance_poke(&self, id: Uuid) { - self.instances.sim_poke(id, PokeMode::Drain).await; + pub async fn instance_poke(&self, id: InstanceUuid) { + self.instances.sim_poke(id.into_untyped_uuid(), PokeMode::Drain).await; } pub async fn disk_poke(&self, id: Uuid) { @@ -628,7 +646,7 @@ impl SledAgent { /// snapshot here. pub async fn instance_issue_disk_snapshot_request( &self, - _instance_id: Uuid, + _instance_id: InstanceUuid, disk_id: Uuid, snapshot_id: Uuid, ) -> Result<(), Error> { @@ -689,17 +707,18 @@ impl SledAgent { pub async fn instance_put_external_ip( &self, - instance_id: Uuid, + instance_id: InstanceUuid, body_args: &InstanceExternalIpBody, ) -> Result<(), Error> { - if !self.instances.contains_key(&instance_id).await { + if !self.instances.contains_key(&instance_id.into_untyped_uuid()).await + { return Err(Error::internal_error( "can't alter IP state for nonexistent instance", )); } let mut eips = self.external_ips.lock().await; - let my_eips = eips.entry(instance_id).or_default(); + let my_eips = eips.entry(instance_id.into_untyped_uuid()).or_default(); // High-level behaviour: this should always succeed UNLESS // trying to add a double ephemeral. @@ -722,17 +741,18 @@ impl SledAgent { pub async fn instance_delete_external_ip( &self, - instance_id: Uuid, + instance_id: InstanceUuid, body_args: &InstanceExternalIpBody, ) -> Result<(), Error> { - if !self.instances.contains_key(&instance_id).await { + if !self.instances.contains_key(&instance_id.into_untyped_uuid()).await + { return Err(Error::internal_error( "can't alter IP state for nonexistent instance", )); } let mut eips = self.external_ips.lock().await; - let my_eips = eips.entry(instance_id).or_default(); + let my_eips = eips.entry(instance_id.into_untyped_uuid()).or_default(); my_eips.remove(&body_args); diff --git a/sled-agent/src/sim/storage.rs b/sled-agent/src/sim/storage.rs index dac2a4cb48..5077120fdd 100644 --- a/sled-agent/src/sim/storage.rs +++ b/sled-agent/src/sim/storage.rs @@ -21,6 +21,7 @@ use dropshot::HttpError; use futures::lock::Mutex; use omicron_common::disk::DiskIdentity; use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::ZpoolUuid; use propolis_client::types::VolumeConstructionRequest; @@ -868,7 +869,7 @@ impl Pantry { self.sled_agent .instance_issue_disk_snapshot_request( - Uuid::new_v4(), // instance id, not used by function + InstanceUuid::new_v4(), // instance id, not used by function volume_id.parse().unwrap(), snapshot_id.parse().unwrap(), ) diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 670d486686..993e5f6a94 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -59,6 +59,7 @@ use omicron_common::backoff::{ retry_notify, retry_policy_internal_service_aggressive, BackoffError, }; use omicron_ddm_admin_client::Client as DdmAdminClient; +use omicron_uuid_kinds::{InstanceUuid, PropolisUuid}; use oximeter::types::ProducerRegistry; use sled_hardware::{underlay, HardwareManager}; use sled_hardware_types::underlay::BootstrapInterface; @@ -882,8 +883,8 @@ impl SledAgent { #[allow(clippy::too_many_arguments)] pub async fn instance_ensure_registered( &self, - instance_id: Uuid, - propolis_id: Uuid, + instance_id: InstanceUuid, + propolis_id: PropolisUuid, hardware: InstanceHardware, instance_runtime: InstanceRuntimeState, vmm_runtime: VmmRuntimeState, @@ -912,7 +913,7 @@ impl SledAgent { /// rudely terminates the instance. pub async fn instance_ensure_unregistered( &self, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> Result { self.inner .instances @@ -925,7 +926,7 @@ impl SledAgent { /// state. pub async fn instance_ensure_state( &self, - instance_id: Uuid, + instance_id: InstanceUuid, target: InstanceStateRequested, ) -> Result { self.inner @@ -941,7 +942,7 @@ impl SledAgent { /// [`crate::params::InstancePutMigrationIdsBody`]. pub async fn instance_put_migration_ids( &self, - instance_id: Uuid, + instance_id: InstanceUuid, old_runtime: &InstanceRuntimeState, migration_ids: &Option, ) -> Result { @@ -959,7 +960,7 @@ impl SledAgent { /// does not match the current ephemeral IP. pub async fn instance_put_external_ip( &self, - instance_id: Uuid, + instance_id: InstanceUuid, external_ip: &InstanceExternalIpBody, ) -> Result<(), Error> { self.inner @@ -973,7 +974,7 @@ impl SledAgent { /// specified external IP address in either its ephemeral or floating IP set. pub async fn instance_delete_external_ip( &self, - instance_id: Uuid, + instance_id: InstanceUuid, external_ip: &InstanceExternalIpBody, ) -> Result<(), Error> { self.inner @@ -986,7 +987,7 @@ impl SledAgent { /// Returns the state of the instance with the provided ID. pub async fn instance_get_state( &self, - instance_id: Uuid, + instance_id: InstanceUuid, ) -> Result { self.inner .instances @@ -1023,7 +1024,7 @@ impl SledAgent { /// Issue a snapshot request for a Crucible disk attached to an instance pub async fn instance_issue_disk_snapshot_request( &self, - instance_id: Uuid, + instance_id: InstanceUuid, disk_id: Uuid, snapshot_id: Uuid, ) -> Result<(), Error> { diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index 2fc08972a6..430b3f7f9f 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -57,6 +57,7 @@ impl_typed_uuid_kind! { LoopbackAddress => "loopback_address", OmicronZone => "service", PhysicalDisk => "physical_disk", + Propolis => "propolis", Sled => "sled", TufRepo => "tuf_repo", Upstairs => "upstairs",