diff --git a/Cargo.lock b/Cargo.lock index 95c18ecdddb..d33e6c94022 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6869,7 +6869,10 @@ name = "nexus-reconfigurator-planning" version = "0.1.0" dependencies = [ "anyhow", + "camino", + "camino-tempfile", "chrono", + "clap", "clickhouse-admin-types", "cockroach-admin-types", "daft", @@ -6909,8 +6912,12 @@ dependencies = [ "swrite", "test-strategy", "thiserror 2.0.16", + "tokio", + "tufaceous", "tufaceous-artifact", + "tufaceous-lib", "typed-rng", + "update-common", "uuid", ] diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index 06db0c99da0..6f35bfd7529 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -5,7 +5,7 @@ //! developer REPL for driving blueprint planning use anyhow::{Context, anyhow, bail, ensure}; -use camino::{Utf8Path, Utf8PathBuf}; +use camino::Utf8PathBuf; use chrono::{DateTime, Utc}; use clap::{ArgAction, ValueEnum}; use clap::{Args, Parser, Subcommand}; @@ -20,7 +20,9 @@ use nexus_inventory::CollectionBuilder; use nexus_reconfigurator_blippy::Blippy; use nexus_reconfigurator_blippy::BlippyReportSortKey; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; -use nexus_reconfigurator_planning::example::ExampleSystemBuilder; +use nexus_reconfigurator_planning::example::{ + ExampleSystemBuilder, extract_tuf_repo_description, tuf_assemble, +}; use nexus_reconfigurator_planning::planner::Planner; use nexus_reconfigurator_planning::system::{ RotStateOverrides, SledBuilder, SledInventoryVisibility, SystemDescription, @@ -48,8 +50,8 @@ use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledProvisionPolicy; use nexus_types::inventory::CollectionDisplayCliFilter; use omicron_common::address::REPO_DEPOT_PORT; +use omicron_common::api::external::Generation; use omicron_common::api::external::Name; -use omicron_common::api::external::{Generation, TufRepoDescription}; use omicron_common::disk::M2Slot; use omicron_common::policy::NEXUS_REDUNDANCY; use omicron_common::update::OmicronZoneManifestSource; @@ -75,20 +77,9 @@ use tufaceous_artifact::ArtifactHash; use tufaceous_artifact::ArtifactVersion; use tufaceous_artifact::ArtifactVersionError; use tufaceous_lib::assemble::ArtifactManifest; -use update_common::artifacts::{ - ArtifactsWithPlan, ControlPlaneZonesMode, VerificationMode, -}; mod log_capture; -/// The default key for TUF repository generation. -/// -/// This was randomly generated through a tufaceous invocation. -pub static DEFAULT_TUFACEOUS_KEY: &str = "ed25519:\ -MFECAQEwBQYDK2VwBCIEIJ9CnAhwk8PPt1x8icu\ -z9c12PdfCRHJpoUkuqJmIZ8GbgSEAbNGMpsHK5_w32\ -qwYdZH_BeVssmKzQlFsnPuaiHx2hy0="; - /// REPL state #[derive(Debug)] struct ReconfiguratorSim { @@ -2840,32 +2831,6 @@ fn mupdate_source_to_description( } } -fn extract_tuf_repo_description( - log: &slog::Logger, - filename: &Utf8Path, -) -> anyhow::Result { - let file = std::fs::File::open(filename) - .with_context(|| format!("open {:?}", filename))?; - let buf = std::io::BufReader::new(file); - let rt = - tokio::runtime::Runtime::new().context("creating tokio runtime")?; - let repo_hash = ArtifactHash([0; 32]); - let artifacts_with_plan = rt.block_on(async { - ArtifactsWithPlan::from_zip( - buf, - None, - repo_hash, - ControlPlaneZonesMode::Split, - VerificationMode::BlindlyTrustAnything, - log, - ) - .await - .with_context(|| format!("unpacking {:?}", filename)) - })?; - let description = artifacts_with_plan.description().clone(); - Ok(description) -} - fn cmd_tuf_assemble( sim: &ReconfiguratorSim, args: TufAssembleArgs, @@ -2896,30 +2861,12 @@ fn cmd_tuf_assemble( Utf8PathBuf::from(format!("repo-{}.zip", manifest.system_version)) }; - if output_path.exists() { - bail!("output path `{output_path}` already exists"); - } - - // Just use a fixed key for now. - // - // In the future we may want to test changing the TUF key. - let mut tufaceous_args = vec![ - "tufaceous", - "--key", - DEFAULT_TUFACEOUS_KEY, - "assemble", - manifest_path.as_str(), - output_path.as_str(), - ]; - if args.allow_non_semver { - tufaceous_args.push("--allow-non-semver"); - } - let args = tufaceous::Args::try_parse_from(tufaceous_args) - .expect("args are valid so this shouldn't fail"); - let rt = - tokio::runtime::Runtime::new().context("creating tokio runtime")?; - rt.block_on(async move { args.exec(&sim.log).await }) - .context("error executing tufaceous assemble")?; + tuf_assemble( + &sim.log, + &manifest_path, + &output_path, + args.allow_non_semver, + )?; let rv = format!( "created {} for system version {}", diff --git a/nexus/reconfigurator/planning/Cargo.toml b/nexus/reconfigurator/planning/Cargo.toml index cb441c9fde1..0fd3625db71 100644 --- a/nexus/reconfigurator/planning/Cargo.toml +++ b/nexus/reconfigurator/planning/Cargo.toml @@ -8,6 +8,9 @@ workspace = true [dependencies] anyhow.workspace = true +camino.workspace = true +camino-tempfile.workspace = true +clap.workspace = true clickhouse-admin-types.workspace = true cockroach-admin-types.workspace = true chrono.workspace = true @@ -41,8 +44,12 @@ static_assertions.workspace = true strum.workspace = true swrite.workspace = true thiserror.workspace = true +tokio.workspace = true +tufaceous.workspace = true tufaceous-artifact.workspace = true +tufaceous-lib.workspace = true typed-rng.workspace = true +update-common.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index 919ab594ad1..1f0b6c788d5 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -4,6 +4,7 @@ //! Example blueprints +use std::collections::HashMap; use std::fmt; use std::hash::Hash; use std::net::IpAddr; @@ -11,23 +12,40 @@ use std::net::Ipv4Addr; use crate::blueprint_builder::BlueprintBuilder; use crate::planner::rng::PlannerRng; +use crate::system::RotStateOverrides; use crate::system::SledBuilder; use crate::system::SystemDescription; +use anyhow::Context; use anyhow::bail; +use camino::Utf8Path; +use camino_tempfile::Utf8TempDir; +use clap::Parser; use nexus_inventory::CollectionBuilderRng; +use nexus_sled_agent_shared::inventory::ZoneKind; use nexus_types::deployment::Blueprint; -use nexus_types::deployment::BlueprintZoneImageSource; +use nexus_types::deployment::BlueprintArtifactVersion; +use nexus_types::deployment::BlueprintHostPhase2DesiredContents; +use nexus_types::deployment::BlueprintHostPhase2DesiredSlots; +use nexus_types::deployment::ExpectedVersion; use nexus_types::deployment::OmicronZoneNic; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; +use nexus_types::deployment::TargetReleaseDescription; use nexus_types::external_api::views::SledPolicy; use nexus_types::inventory::Collection; +use omicron_common::api::external::TufRepoDescription; use omicron_common::policy::CRUCIBLE_PANTRY_REDUNDANCY; use omicron_common::policy::INTERNAL_DNS_REDUNDANCY; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledKind; use omicron_uuid_kinds::VnicUuid; +use tufaceous_artifact::ArtifactHash; +use tufaceous_artifact::ArtifactKind; +use tufaceous_artifact::KnownArtifactKind; use typed_rng::TypedUuidRng; +use update_common::artifacts::ArtifactsWithPlan; +use update_common::artifacts::ControlPlaneZonesMode; +use update_common::artifacts::VerificationMode; /// Stateful PRNG for generating simulated systems. /// @@ -190,6 +208,7 @@ pub struct ExampleSystemBuilder { crucible_pantry_count: ZoneCount, create_zones: bool, create_disks_in_blueprint: bool, + target_release: TargetReleaseDescription, } impl ExampleSystemBuilder { @@ -225,6 +244,7 @@ impl ExampleSystemBuilder { crucible_pantry_count: ZoneCount(CRUCIBLE_PANTRY_REDUNDANCY), create_zones: true, create_disks_in_blueprint: true, + target_release: TargetReleaseDescription::Initial, } } @@ -350,6 +370,46 @@ impl ExampleSystemBuilder { Ok(self) } + /// Set the target release to an initial `0.0.1` version, and image sources to + /// Artifact corresponding to the release. + pub fn with_target_release_0_0_1(self) -> anyhow::Result { + // Find the 0.0.1 release relative to this crate's root directory. + let root_dir = Utf8Path::new(env!("CARGO_MANIFEST_DIR")); + let manifest_path = root_dir + .parent() + .unwrap() + .parent() + .unwrap() + .parent() + .unwrap() + .join("update-common/manifests/fake-0.0.1.toml"); + self.with_target_release_manifest( + &manifest_path, + // allow_non_semver is false because fake-0.0.1.toml doesn't contain + // non-semver artifacts. + false, + ) + } + + pub fn with_target_release_manifest( + mut self, + manifest_path: &Utf8Path, + allow_non_semver: bool, + ) -> anyhow::Result { + let dir = Utf8TempDir::with_prefix("reconfigurator-planning-example") + .context("failed to create temp dir")?; + let zip_path = dir.path().join("repo.zip"); + tuf_assemble(&self.log, manifest_path, &zip_path, allow_non_semver) + .context("failed to assemble TUF repo")?; + + let target_release = extract_tuf_repo_description(&self.log, &zip_path) + .context("failed to extract TUF repo description")?; + + self.target_release = TargetReleaseDescription::TufRepo(target_release); + + Ok(self) + } + fn get_nexus_zones(&self) -> ZoneCount { self.nexus_count.unwrap_or(ZoneCount(self.sled_settings.len())) } @@ -390,12 +450,40 @@ impl ExampleSystemBuilder { .target_nexus_zone_count(nexus_count.0) .target_internal_dns_zone_count(self.internal_dns_count.0) .target_crucible_pantry_zone_count(self.crucible_pantry_count.0); + + // Set the target release if one is available. We don't do this + // unconditionally because we don't want the target release generation + // number to be incremented if self.target_release is + // TargetReleaseDescription::Initial. + if let TargetReleaseDescription::TufRepo(_) = &self.target_release { + system.set_target_release(self.target_release.clone()); + } + let sled_ids_with_settings: Vec<_> = self .sled_settings .iter() .map(|settings| (rng.sled_rng.next(), settings)) .collect(); + let artifacts_by_kind = if let TargetReleaseDescription::TufRepo(repo) = + &self.target_release + { + // Build a map of artifact versions by kind. For the artifacts we + // care about, we currently only expect a single version, so this + // map is expected to be unique. + // + // TODO-correctness Need to choose gimlet vs cosmo here! Can we use + // update-common's UpdatePlan instead of this jank? + // https://github.com/oxidecomputer/omicron/issues/8777 + let mut artifacts_by_kind = HashMap::new(); + for artifact in &repo.artifacts { + artifacts_by_kind.insert(&artifact.id.kind, artifact); + } + Some(artifacts_by_kind) + } else { + None + }; + for (sled_id, settings) in &sled_ids_with_settings { let _ = system .sled( @@ -450,7 +538,7 @@ impl ExampleSystemBuilder { ))) .expect( "this shouldn't error because provided external IPs \ - are all unique", + are all unique", ); } @@ -469,9 +557,13 @@ impl ExampleSystemBuilder { .unwrap(); } if self.create_zones { - let image_source = BlueprintZoneImageSource::InstallDataset; let _ = builder - .sled_ensure_zone_ntp(sled_id, image_source.clone()) + .sled_ensure_zone_ntp( + sled_id, + self.target_release + .zone_image_source(ZoneKind::BoundaryNtp) + .expect("obtained BoundaryNtp image source"), + ) .unwrap(); // Create discretionary zones if allowed. @@ -484,7 +576,9 @@ impl ExampleSystemBuilder { sled_id, false, vec![], - image_source.clone(), + self.target_release + .zone_image_source(ZoneKind::Nexus) + .expect("obtained Nexus image source"), initial_blueprint.nexus_generation, ) .unwrap(); @@ -493,7 +587,9 @@ impl ExampleSystemBuilder { builder .sled_add_zone_clickhouse( sled_id, - image_source.clone(), + self.target_release + .zone_image_source(ZoneKind::Clickhouse) + .expect("obtained Clickhouse image source"), ) .unwrap(); } @@ -504,7 +600,11 @@ impl ExampleSystemBuilder { builder .sled_add_zone_internal_dns( sled_id, - image_source.clone(), + self.target_release + .zone_image_source(ZoneKind::InternalDns) + .expect( + "obtained InternalDNS image source", + ), ) .unwrap(); } @@ -515,7 +615,11 @@ impl ExampleSystemBuilder { builder .sled_add_zone_external_dns( sled_id, - image_source.clone(), + self.target_release + .zone_image_source(ZoneKind::ExternalDns) + .expect( + "obtained ExternalDNS image source", + ), ) .unwrap(); } @@ -526,7 +630,11 @@ impl ExampleSystemBuilder { builder .sled_add_zone_crucible_pantry( sled_id, - image_source.clone(), + self.target_release + .zone_image_source(ZoneKind::CruciblePantry) + .expect( + "obtained CruciblePantry image source", + ), ) .unwrap(); } @@ -538,12 +646,51 @@ impl ExampleSystemBuilder { .sled_ensure_zone_crucible( sled_id, *pool_name, - image_source.clone(), + self.target_release + .zone_image_source(ZoneKind::Crucible) + .expect("obtained Crucible image source"), ) .unwrap(); } } builder.sled_ensure_zone_datasets(sled_id).unwrap(); + + if let Some(artifacts_by_kind) = &artifacts_by_kind { + // Set the host phase 2 artifact version to Artifact to avoid a + // noop conversion in the first planning run. + let host_phase_2_artifact = + artifacts_by_kind.get(&ArtifactKind::HOST_PHASE_2).unwrap(); + + builder + .sled_set_host_phase_2( + sled_id, + BlueprintHostPhase2DesiredSlots { + slot_a: + BlueprintHostPhase2DesiredContents::Artifact { + version: + BlueprintArtifactVersion::Available { + version: host_phase_2_artifact + .id + .version + .clone(), + }, + hash: host_phase_2_artifact.hash, + }, + slot_b: + BlueprintHostPhase2DesiredContents::Artifact { + version: + BlueprintArtifactVersion::Available { + version: host_phase_2_artifact + .id + .version + .clone(), + }, + hash: host_phase_2_artifact.hash, + }, + }, + ) + .expect("sled is present in blueprint"); + }; } let blueprint = builder.build(); @@ -592,6 +739,90 @@ impl ExampleSystemBuilder { } } + if let Some(artifacts_by_kind) = &artifacts_by_kind { + // Set all MGS and host phase 2 versions out of the TUF repo to + // ensure that the planner is quiesced at the time the initial + // system is returned. + let sp_version = artifacts_by_kind + .get(&ArtifactKind::from(KnownArtifactKind::GimletSp)) + .unwrap() + .id + .version + .clone(); + let rot_a_version = artifacts_by_kind + .get(&ArtifactKind::GIMLET_ROT_IMAGE_A) + .unwrap() + .id + .version + .clone(); + let rot_b_version = artifacts_by_kind + .get(&ArtifactKind::GIMLET_ROT_IMAGE_B) + .unwrap() + .id + .version + .clone(); + let host_phase_1_hash = artifacts_by_kind + .get(&ArtifactKind::HOST_PHASE_1) + .unwrap() + .hash; + let host_phase_2_hash = artifacts_by_kind + .get(&ArtifactKind::HOST_PHASE_2) + .unwrap() + .hash; + + for sled_id in blueprint.sleds.keys() { + system + .sled_update_sp_versions( + *sled_id, + Some(sp_version.clone()), + Some(ExpectedVersion::Version(sp_version.clone())), + ) + .expect("sled was just added to the system"); + system + .sled_update_rot_versions( + *sled_id, + RotStateOverrides { + active_slot_override: None, + slot_a_version_override: Some( + ExpectedVersion::Version(rot_a_version.clone()), + ), + slot_b_version_override: Some( + ExpectedVersion::Version(rot_b_version.clone()), + ), + persistent_boot_preference_override: None, + pending_persistent_boot_preference_override: None, + transient_boot_preference_override: None, + }, + ) + .expect("sled was just added to the system"); + + // TODO-correctness Need to choose gimlet vs cosmo here! Need help + // from tufaceous to tell us which is which. + // https://github.com/oxidecomputer/omicron/issues/8777 + system + .sled_update_host_phase_1_artifacts( + *sled_id, + None, + Some(host_phase_1_hash), + Some(host_phase_1_hash), + ) + .expect("sled was just added to the system"); + + // We must set host phase 2 artifacts after sled_set_omicron_config + // is called, because sled_set_omicron_config resets + // last_reconciliation state and wipes away host phase 2 artifact + // hashes. + system + .sled_update_host_phase_2_artifacts( + *sled_id, + None, + Some(host_phase_2_hash), + Some(host_phase_2_hash), + ) + .expect("sled was just added to the system"); + } + } + let mut builder = system.to_collection_builder().expect("failed to build collection"); builder.set_rng(rng.collection_rng); @@ -635,6 +866,77 @@ impl ZoneCount { } } +/// The default key for TUF repository generation. +/// +/// This was randomly generated through a tufaceous invocation. +pub static DEFAULT_TUFACEOUS_KEY: &str = "ed25519:\ +MFECAQEwBQYDK2VwBCIEIJ9CnAhwk8PPt1x8icu\ +z9c12PdfCRHJpoUkuqJmIZ8GbgSEAbNGMpsHK5_w32\ +qwYdZH_BeVssmKzQlFsnPuaiHx2hy0="; + +/// Construct a TUF repository zip file from a manifest, writing the TUF zip +/// file out to `output_path`. +pub fn tuf_assemble( + log: &slog::Logger, + manifest_path: &Utf8Path, + output_path: &Utf8Path, + allow_non_semver: bool, +) -> anyhow::Result<()> { + if output_path.exists() { + bail!("output path `{output_path}` already exists"); + } + + // Just use a fixed key for now. + // + // In the future we may want to test changing the TUF key. + let mut tufaceous_args = vec![ + "tufaceous", + "--key", + DEFAULT_TUFACEOUS_KEY, + "assemble", + manifest_path.as_str(), + output_path.as_str(), + ]; + + if allow_non_semver { + tufaceous_args.push("--allow-non-semver"); + } + let args = tufaceous::Args::try_parse_from(tufaceous_args) + .expect("args are valid so this shouldn't fail"); + let rt = + tokio::runtime::Runtime::new().context("creating tokio runtime")?; + rt.block_on(async move { args.exec(log).await }) + .context("error executing tufaceous assemble")?; + + Ok(()) +} + +pub fn extract_tuf_repo_description( + log: &slog::Logger, + zip_path: &Utf8Path, +) -> anyhow::Result { + let file = std::fs::File::open(zip_path) + .with_context(|| format!("open {:?}", zip_path))?; + let buf = std::io::BufReader::new(file); + let rt = + tokio::runtime::Runtime::new().context("creating tokio runtime")?; + let repo_hash = ArtifactHash([0; 32]); + let artifacts_with_plan = rt.block_on(async { + ArtifactsWithPlan::from_zip( + buf, + None, + repo_hash, + ControlPlaneZonesMode::Split, + VerificationMode::BlindlyTrustAnything, + log, + ) + .await + .with_context(|| format!("unpacking {:?}", zip_path)) + })?; + let description = artifacts_with_plan.description().clone(); + Ok(description) +} + #[cfg(test)] mod tests { use chrono::{DateTime, Utc}; diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 72ec7b1718c..051fb64f2f9 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -5933,6 +5933,8 @@ pub(crate) mod test { &logctx.log, rng.next_system_rng(), ) + .with_target_release_0_0_1() + .expect("set target release to 0.0.1") .build(); verify_blueprint(&blueprint); @@ -5976,13 +5978,18 @@ pub(crate) mod test { TEST_NAME, ); - // All zones should be sourced from the install dataset by default. + // All zones should be sourced from the initial 0.0.1 target release by + // default. + eprintln!("{}", blueprint.display()); assert!( blueprint .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .all(|(_, z)| matches!( - z.image_source, - BlueprintZoneImageSource::InstallDataset + &z.image_source, + BlueprintZoneImageSource::Artifact { version, hash: _ } + if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } )) ); @@ -6045,8 +6052,11 @@ pub(crate) mod test { let is_old_cockroach = |zone: &BlueprintZoneConfig| -> bool { zone.zone_type.is_cockroach() && matches!( - zone.image_source, - BlueprintZoneImageSource::InstallDataset + &zone.image_source, + BlueprintZoneImageSource::Artifact { version, hash: _ } + if version == &BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("0.0.1") + } ) }; let is_up_to_date_cockroach = |zone: &BlueprintZoneConfig| -> bool { diff --git a/update-common/manifests/fake-0.0.1.toml b/update-common/manifests/fake-0.0.1.toml new file mode 100644 index 00000000000..f7126835b03 --- /dev/null +++ b/update-common/manifests/fake-0.0.1.toml @@ -0,0 +1,94 @@ +# This is an artifact manifest that generates fake entries for all components. +# This is completely non-functional and is only useful for testing archive +# extraction in other parts of the repository. + +system_version = "0.0.1" + +[[artifact.gimlet_sp]] +name = "fake-gimlet-sp" +version = "0.0.1" +source = { kind = "fake", size = "1MiB" } + +[[artifact.gimlet_rot]] +name = "fake-rot" +version = "0.0.1" +[artifact.gimlet_rot.source] +kind = "composite-rot" +archive_a = { kind = "fake", size = "512KiB" } +archive_b = { kind = "fake", size = "512KiB" } + +[[artifact.host]] +name = "fake-host" +version = "0.0.1" +[artifact.host.source] +kind = "composite-host" +phase_1 = { kind = "fake", size = "512KiB" } +phase_2 = { kind = "fake", size = "1MiB" } + +[[artifact.trampoline]] +name = "fake-trampoline" +version = "0.0.1" +[artifact.trampoline.source] +kind = "composite-host" +phase_1 = { kind = "fake", size = "512KiB" } +phase_2 = { kind = "fake", size = "1MiB" } + +[[artifact.control_plane]] +name = "fake-control-plane" +version = "0.0.1" +[artifact.control_plane.source] +kind = "composite-control-plane" +zones = [ + { kind = "fake", artifact_name = "clickhouse", file_name = "clickhouse.tar.gz", size = "128KiB" }, + { kind = "fake", artifact_name = "clickhouse_keeper", file_name = "clickhouse_keeper.tar.gz", size = "128KiB" }, + { kind = "fake", artifact_name = "clickhouse_server", file_name = "clickhouse_server.tar.gz", size = "128KiB" }, + { kind = "fake", artifact_name = "cockroachdb", file_name = "cockroachdb.tar.gz", size = "128KiB" }, + { kind = "fake", artifact_name = "crucible-zone", file_name = "crucible.tar.gz", size = "128KiB" }, + { kind = "fake", artifact_name = "crucible-pantry-zone", file_name = "crucible_pantry.tar.gz", size = "128KiB" }, + { kind = "fake", artifact_name = "external-dns", file_name = "external_dns.tar.gz", size = "128KiB" }, + { kind = "fake", artifact_name = "internal-dns", file_name = "internal_dns.tar.gz", size = "128KiB" }, + { kind = "fake", artifact_name = "ntp", file_name = "ntp.tar.gz", size = "128KiB" }, + { kind = "fake", artifact_name = "nexus", file_name = "nexus.tar.gz", size = "128KiB" }, + { kind = "fake", artifact_name = "oximeter", file_name = "oximeter.tar.gz", size = "128KiB" }, +] + +[[artifact.psc_sp]] +name = "fake-psc-sp" +version = "0.0.1" +source = { kind = "fake", size = "1MiB" } + +[[artifact.psc_rot]] +name = "fake-psc-rot" +version = "0.0.1" +[artifact.psc_rot.source] +kind = "composite-rot" +archive_a = { kind = "fake", size = "512KiB" } +archive_b = { kind = "fake", size = "512KiB" } + +[[artifact.switch_sp]] +name = "fake-switch-sp" +version = "0.0.1" +source = { kind = "fake", size = "1MiB" } + +[[artifact.switch_rot]] +name = "fake-switch-rot" +version = "0.0.1" +[artifact.switch_rot.source] +kind = "composite-rot" +archive_a = { kind = "fake", size = "512KiB" } +archive_b = { kind = "fake", size = "512KiB" } + +[[artifact.gimlet_rot_bootloader]] +name = "fake-rot-bootloader" +version = "0.0.1" +source = { kind = "fake", size = "1MiB" } + +[[artifact.psc_rot_bootloader]] +name = "fake-psc-rot-bootloader" +version = "0.0.1" +source = { kind = "fake", size = "1MiB" } + +[[artifact.switch_rot_bootloader]] +name = "fake-switch-rot-bootloader" +version = "0.0.1" +source = { kind = "fake", size = "1MiB" }