Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 8 additions & 2 deletions dev-tools/omdb/src/bin/omdb/db/ereport.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,10 @@ async fn cmd_db_ereport_list(
restart_id: restart_id.into_untyped_uuid(),
ena: ena.into(),
class: class.clone(),
source: db::model::Reporter::Sp { sp_type, slot: sp_slot.0 },
source: db::model::Reporter::Sp {
sp_type: sp_type.into(),
slot: sp_slot.0,
},
serial: serial_number.as_deref(),
part_number: part_number.as_deref(),
}
Expand Down Expand Up @@ -547,7 +550,10 @@ async fn cmd_db_ereporters(
)| {
ReporterRow {
first_seen,
identity: db::model::Reporter::Sp { slot: slot.0, sp_type },
identity: db::model::Reporter::Sp {
slot: slot.0,
sp_type: sp_type.into(),
},
serial,
part_number,
id: restart_id,
Expand Down
23 changes: 23 additions & 0 deletions dev-tools/omdb/src/bin/omdb/nexus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ use nexus_types::internal_api::background::RegionSnapshotReplacementStartStatus;
use nexus_types::internal_api::background::RegionSnapshotReplacementStepStatus;
use nexus_types::internal_api::background::SupportBundleCleanupReport;
use nexus_types::internal_api::background::SupportBundleCollectionReport;
use nexus_types::internal_api::background::SupportBundleEreportStatus;
use nexus_types::internal_api::background::TufArtifactReplicationCounters;
use nexus_types::internal_api::background::TufArtifactReplicationRequest;
use nexus_types::internal_api::background::TufArtifactReplicationStatus;
Expand Down Expand Up @@ -2414,6 +2415,8 @@ fn print_task_support_bundle_collector(details: &serde_json::Value) {
listed_in_service_sleds,
listed_sps,
activated_in_db_ok,
sp_ereports,
host_ereports,
}) = collection_report
{
println!(" Support Bundle Collection Report:");
Expand All @@ -2427,6 +2430,26 @@ fn print_task_support_bundle_collector(details: &serde_json::Value) {
println!(
" Bundle was activated in the database: {activated_in_db_ok}"
);
print_ereport_status("SP", &sp_ereports);
print_ereport_status("Host OS", &host_ereports);
}
}
}

fn print_ereport_status(which: &str, status: &SupportBundleEreportStatus) {
match status {
SupportBundleEreportStatus::NotRequested => {
println!(" {which} ereport collection was not requested");
}
SupportBundleEreportStatus::Failed { error, n_collected } => {
println!(" {which} ereport collection failed:");
println!(
" ereports collected successfully: {n_collected}"
);
println!(" error: {error}");
}
SupportBundleEreportStatus::Collected { n_collected } => {
println!(" {which} ereports collected: {n_collected}");
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion nexus/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ tempfile.workspace = true
thiserror.workspace = true
tokio = { workspace = true, features = ["full"] }
tokio-postgres = { workspace = true, features = ["with-serde_json-1"] }
tokio-util = { workspace = true, features = ["codec"] }
tokio-util = { workspace = true, features = ["codec", "rt"] }
tough.workspace = true
tufaceous-artifact.workspace = true
usdt.workspace = true
Expand Down
37 changes: 29 additions & 8 deletions nexus/db-model/src/ereport.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,14 @@ where
}
}

#[derive(Clone, Debug)]
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct Ereport {
#[serde(flatten)]
pub id: EreportId,
#[serde(flatten)]
pub metadata: EreportMetadata,
pub reporter: Reporter,
#[serde(flatten)]
pub report: serde_json::Value,
}

Expand Down Expand Up @@ -96,7 +99,7 @@ impl From<SpEreport> for Ereport {
serial_number,
class,
},
reporter: Reporter::Sp { sp_type, slot: sp_slot.0 },
reporter: Reporter::Sp { sp_type: sp_type.into(), slot: sp_slot.0 },
report,
}
}
Expand Down Expand Up @@ -131,7 +134,7 @@ impl From<HostEreport> for Ereport {
}
}

#[derive(Clone, Debug)]
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct EreportMetadata {
pub time_collected: DateTime<Utc>,
pub time_deleted: Option<DateTime<Utc>>,
Expand All @@ -141,22 +144,40 @@ pub struct EreportMetadata {
pub class: Option<String>,
}

#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
#[derive(
Clone,
Debug,
Eq,
PartialEq,
Ord,
PartialOrd,
serde::Serialize,
serde::Deserialize,
)]
pub enum Reporter {
Sp { sp_type: SpType, slot: u16 },
Sp { sp_type: nexus_types::inventory::SpType, slot: u16 },
HostOs { sled: SledUuid },
}

impl std::fmt::Display for Reporter {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Sp { sp_type: SpType::Sled, slot } => {
Self::Sp {
sp_type: nexus_types::inventory::SpType::Sled,
slot,
} => {
write!(f, "Sled (SP) {slot:02}")
}
Self::Sp { sp_type: SpType::Switch, slot } => {
Self::Sp {
sp_type: nexus_types::inventory::SpType::Switch,
slot,
} => {
write!(f, "Switch {slot}")
}
Self::Sp { sp_type: SpType::Power, slot } => {
Self::Sp {
sp_type: nexus_types::inventory::SpType::Power,
slot,
} => {
write!(f, "PSC {slot}")
}
Self::HostOs { sled } => {
Expand Down
169 changes: 166 additions & 3 deletions nexus/db-queries/src/db/datastore/ereport.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use crate::db::model::SpMgsSlot;
use crate::db::model::SpType;
use crate::db::model::SqlU16;
use crate::db::model::SqlU32;
use crate::db::pagination::paginated;
use crate::db::pagination::{paginated, paginated_multicolumn};
use async_bb8_diesel::AsyncRunQueryDsl;
use chrono::DateTime;
use chrono::Utc;
Expand Down Expand Up @@ -48,6 +48,44 @@ pub struct EreporterRestartBySerial {
pub ereports: u32,
}

/// A set of filters for fetching ereports.
#[derive(Clone, Debug, Default)]
pub struct EreportFilters {
/// If present, include only ereports that were collected at the specified
/// timestamp or later.
///
/// If `end_time` is also present, this value *must* be earlier than
/// `end_time`.
pub start_time: Option<DateTime<Utc>>,
/// If present, include only ereports that were collected at the specified
/// timestamp or before.
///
/// If `start_time` is also present, this value *must* be later than
/// `start_time`.
pub end_time: Option<DateTime<Utc>>,
/// If this list is non-empty, include only ereports that were reported by
/// systems with the provided serial numbers.
pub only_serials: Vec<String>,
/// If this list is non-empty, include only ereports with the provided class
/// strings.
// TODO(eliza): globbing could be nice to add here eventually...
pub only_classes: Vec<String>,
}

impl EreportFilters {
fn check_time_range(&self) -> Result<(), Error> {
if let (Some(start), Some(end)) = (self.start_time, self.end_time) {
if start > end {
return Err(Error::invalid_request(
"start time must be before end time",
));
}
}

Ok(())
}
}

impl DataStore {
/// Fetch an ereport by its restart ID and ENA.
///
Expand Down Expand Up @@ -93,6 +131,90 @@ impl DataStore {
Err(Error::non_resourcetype_not_found(format!("ereport {id}")))
}

pub async fn host_ereports_fetch_matching(
&self,
opctx: &OpContext,
filters: &EreportFilters,
pagparams: &DataPageParams<'_, (Uuid, DbEna)>,
) -> ListResultVec<HostEreport> {
opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?;
filters.check_time_range()?;

let mut query = paginated_multicolumn(
host_dsl::host_ereport,
(host_dsl::restart_id, host_dsl::ena),
pagparams,
)
.filter(host_dsl::time_deleted.is_null())
.select(HostEreport::as_select());

if let Some(start) = filters.start_time {
query = query.filter(host_dsl::time_collected.ge(start));
}

if let Some(end) = filters.end_time {
query = query.filter(host_dsl::time_collected.le(end));
}

if !filters.only_serials.is_empty() {
query = query.filter(
host_dsl::sled_serial.eq_any(filters.only_serials.clone()),
);
}

if !filters.only_classes.is_empty() {
query = query
.filter(host_dsl::class.eq_any(filters.only_classes.clone()));
}

query
.load_async(&*self.pool_connection_authorized(opctx).await?)
.await
.map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))
}

pub async fn sp_ereports_fetch_matching(
&self,
opctx: &OpContext,
filters: &EreportFilters,
pagparams: &DataPageParams<'_, (Uuid, DbEna)>,
) -> ListResultVec<SpEreport> {
opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?;
filters.check_time_range()?;

let mut query = paginated_multicolumn(
sp_dsl::sp_ereport,
(sp_dsl::restart_id, sp_dsl::ena),
pagparams,
)
.filter(sp_dsl::time_deleted.is_null())
.select(SpEreport::as_select());

if let Some(start) = filters.start_time {
query = query.filter(sp_dsl::time_collected.ge(start));
}

if let Some(end) = filters.end_time {
query = query.filter(sp_dsl::time_collected.le(end));
}

if !filters.only_serials.is_empty() {
query = query.filter(
sp_dsl::serial_number.eq_any(filters.only_serials.clone()),
);
}

if !filters.only_classes.is_empty() {
query = query
.filter(sp_dsl::class.eq_any(filters.only_classes.clone()));
}

query
.load_async(&*self.pool_connection_authorized(opctx).await?)
.await
.map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))
}

/// List ereports from the SP with the given restart ID.
pub async fn sp_ereport_list_by_restart(
&self,
Expand Down Expand Up @@ -144,7 +266,7 @@ impl DataStore {
EreporterRestartBySerial {
id: EreporterRestartUuid::from_untyped_uuid(restart_id),
reporter_kind: Reporter::Sp {
sp_type,
sp_type: sp_type.into(),
slot: sp_slot.into(),
},
first_seen_at: first_seen.expect(FIRST_SEEN_NOT_NULL),
Expand Down Expand Up @@ -261,8 +383,20 @@ impl DataStore {
sled_id: SledUuid,
) -> Result<Option<ereport_types::EreportId>, Error> {
opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?;
self.host_latest_ereport_id_on_conn(
&*self.pool_connection_authorized(opctx).await?,
sled_id,
)
.await
}

async fn host_latest_ereport_id_on_conn(
&self,
conn: &async_bb8_diesel::Connection<DbConnection>,
sled_id: SledUuid,
) -> Result<Option<ereport_types::EreportId>, Error> {
let id = Self::host_latest_ereport_id_query(sled_id)
.get_result_async(&*self.pool_connection_authorized(opctx).await?)
.get_result_async(conn)
.await
.optional()
.map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?
Expand Down Expand Up @@ -314,6 +448,35 @@ impl DataStore {
})?;
Ok((created, latest))
}

pub async fn host_ereports_insert(
&self,
opctx: &OpContext,
sled_id: SledUuid,
ereports: Vec<HostEreport>,
) -> CreateResult<(usize, Option<ereport_types::EreportId>)> {
opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?;
let conn = self.pool_connection_authorized(opctx).await?;
let created = diesel::insert_into(host_dsl::host_ereport)
.values(ereports)
.on_conflict((host_dsl::restart_id, host_dsl::ena))
.do_nothing()
.execute_async(&*conn)
.await
.map_err(|e| {
public_error_from_diesel(e, ErrorHandler::Server)
.internal_context("failed to insert ereports")
})?;
let latest = self
.host_latest_ereport_id_on_conn(&conn, sled_id)
.await
.map_err(|e| {
e.internal_context(format!(
"failed to refresh latest ereport ID for {sled_id}"
))
})?;
Ok((created, latest))
}
}

fn id_from_tuple(
Expand Down
1 change: 1 addition & 0 deletions nexus/db-queries/src/db/datastore/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ mod zpool;
pub use address_lot::AddressLotCreateResult;
pub use dns::DataStoreDnsTest;
pub use dns::DnsVersionUpdateBuilder;
pub use ereport::EreportFilters;
pub use instance::{
InstanceAndActiveVmm, InstanceGestalt, InstanceStateComputer,
};
Expand Down
Loading
Loading