Skip to content

Commit f63707a

Browse files
authored
[nexus] Add zpool to inventory (#5249)
This PR adds zpools to the inventory, and updates the existing `omicron.public.zpool` table to only include aspects of the zpool which are fully controlled by Nexus. This PR is intended to solve a problem introduced by #5172 . In that PR, physical disk and zpool provisioning will be decided by Nexus, and sent to the Sled Agent as a request. As a result, the `total_size` value of zpools is not known when a pool is provisioned. Here, we add zpools to the inventory, and JOIN with that table to access the "total_size" of the zpool if it is known. Additionally, this PR adds a test to validate that the database (appropriately) throws "Insufficient capacity" errors if the Zpool exists, but has not appeared in the inventory.
1 parent cc59eff commit f63707a

File tree

29 files changed

+509
-78
lines changed

29 files changed

+509
-78
lines changed

nexus/db-model/src/inventory.rs

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@ use crate::schema::{
99
hw_baseboard_id, inv_caboose, inv_collection, inv_collection_error,
1010
inv_omicron_zone, inv_omicron_zone_nic, inv_physical_disk,
1111
inv_root_of_trust, inv_root_of_trust_page, inv_service_processor,
12-
inv_sled_agent, inv_sled_omicron_zones, sw_caboose, sw_root_of_trust_page,
12+
inv_sled_agent, inv_sled_omicron_zones, inv_zpool, sw_caboose,
13+
sw_root_of_trust_page,
1314
};
1415
use crate::PhysicalDiskKind;
1516
use crate::{
@@ -699,6 +700,43 @@ impl From<InvPhysicalDisk> for nexus_types::inventory::PhysicalDisk {
699700
}
700701
}
701702

703+
/// See [`nexus_types::inventory::Zpool`].
704+
#[derive(Queryable, Clone, Debug, Selectable, Insertable)]
705+
#[diesel(table_name = inv_zpool)]
706+
pub struct InvZpool {
707+
pub inv_collection_id: Uuid,
708+
pub time_collected: DateTime<Utc>,
709+
pub id: Uuid,
710+
pub sled_id: Uuid,
711+
pub total_size: ByteCount,
712+
}
713+
714+
impl InvZpool {
715+
pub fn new(
716+
inv_collection_id: Uuid,
717+
sled_id: Uuid,
718+
zpool: &nexus_types::inventory::Zpool,
719+
) -> Self {
720+
Self {
721+
inv_collection_id,
722+
time_collected: zpool.time_collected,
723+
id: zpool.id,
724+
sled_id,
725+
total_size: zpool.total_size.into(),
726+
}
727+
}
728+
}
729+
730+
impl From<InvZpool> for nexus_types::inventory::Zpool {
731+
fn from(pool: InvZpool) -> Self {
732+
Self {
733+
time_collected: pool.time_collected,
734+
id: pool.id,
735+
total_size: *pool.total_size,
736+
}
737+
}
738+
}
739+
702740
/// See [`nexus_types::inventory::OmicronZonesFound`].
703741
#[derive(Queryable, Clone, Debug, Selectable, Insertable)]
704742
#[diesel(table_name = inv_sled_omicron_zones)]

nexus/db-model/src/schema.rs

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ use omicron_common::api::external::SemverVersion;
1313
///
1414
/// This should be updated whenever the schema is changed. For more details,
1515
/// refer to: schema/crdb/README.adoc
16-
pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(43, 0, 0);
16+
pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(44, 0, 0);
1717

1818
table! {
1919
disk (id) {
@@ -960,8 +960,6 @@ table! {
960960

961961
sled_id -> Uuid,
962962
physical_disk_id -> Uuid,
963-
964-
total_size -> Int8,
965963
}
966964
}
967965

@@ -1375,6 +1373,16 @@ table! {
13751373
}
13761374
}
13771375

1376+
table! {
1377+
inv_zpool (inv_collection_id, sled_id, id) {
1378+
inv_collection_id -> Uuid,
1379+
time_collected -> Timestamptz,
1380+
id -> Uuid,
1381+
sled_id -> Uuid,
1382+
total_size -> Int8,
1383+
}
1384+
}
1385+
13781386
table! {
13791387
inv_sled_omicron_zones (inv_collection_id, sled_id) {
13801388
inv_collection_id -> Uuid,

nexus/db-model/src/zpool.rs

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
// License, v. 2.0. If a copy of the MPL was not distributed with this
33
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
44

5-
use super::{ByteCount, Dataset, Generation};
5+
use super::{Dataset, Generation};
66
use crate::collection::DatastoreCollectionConfig;
77
use crate::schema::{dataset, zpool};
88
use chrono::{DateTime, Utc};
@@ -26,26 +26,16 @@ pub struct Zpool {
2626

2727
// The physical disk to which this Zpool is attached.
2828
pub physical_disk_id: Uuid,
29-
30-
// TODO: In the future, we may expand this structure to include
31-
// size, allocation, and health information.
32-
pub total_size: ByteCount,
3329
}
3430

3531
impl Zpool {
36-
pub fn new(
37-
id: Uuid,
38-
sled_id: Uuid,
39-
physical_disk_id: Uuid,
40-
total_size: ByteCount,
41-
) -> Self {
32+
pub fn new(id: Uuid, sled_id: Uuid, physical_disk_id: Uuid) -> Self {
4233
Self {
4334
identity: ZpoolIdentity::new(id),
4435
time_deleted: None,
4536
rcgen: Generation::new(),
4637
sled_id,
4738
physical_disk_id,
48-
total_size,
4939
}
5040
}
5141
}

nexus/db-queries/src/db/datastore/dataset.rs

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -229,12 +229,7 @@ mod test {
229229

230230
// Create a fake zpool that backs our fake datasets.
231231
let zpool_id = Uuid::new_v4();
232-
let zpool = Zpool::new(
233-
zpool_id,
234-
sled_id,
235-
Uuid::new_v4(),
236-
(1 << 30).try_into().unwrap(),
237-
);
232+
let zpool = Zpool::new(zpool_id, sled_id, Uuid::new_v4());
238233
datastore.zpool_upsert(zpool).await.expect("failed to upsert zpool");
239234

240235
// Inserting a new dataset should succeed.

nexus/db-queries/src/db/datastore/inventory.rs

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ use nexus_db_model::InvRotPage;
4545
use nexus_db_model::InvServiceProcessor;
4646
use nexus_db_model::InvSledAgent;
4747
use nexus_db_model::InvSledOmicronZones;
48+
use nexus_db_model::InvZpool;
4849
use nexus_db_model::RotPageWhichEnum;
4950
use nexus_db_model::SledRole;
5051
use nexus_db_model::SledRoleEnum;
@@ -137,6 +138,18 @@ impl DataStore {
137138
})
138139
.collect();
139140

141+
// Pull zpools out of all sled agents
142+
let zpools: Vec<_> = collection
143+
.sled_agents
144+
.iter()
145+
.flat_map(|(sled_id, sled_agent)| {
146+
sled_agent
147+
.zpools
148+
.iter()
149+
.map(|pool| InvZpool::new(collection_id, *sled_id, pool))
150+
})
151+
.collect();
152+
140153
// Partition the sled agents into those with an associated baseboard id
141154
// and those without one. We handle these pretty differently.
142155
let (sled_agents_baseboards, sled_agents_no_baseboards): (
@@ -670,6 +683,25 @@ impl DataStore {
670683
}
671684
}
672685

686+
// Insert rows for all the zpools we found.
687+
{
688+
use db::schema::inv_zpool::dsl;
689+
690+
let batch_size = SQL_BATCH_SIZE.get().try_into().unwrap();
691+
let mut zpools = zpools.into_iter();
692+
loop {
693+
let some_zpools =
694+
zpools.by_ref().take(batch_size).collect::<Vec<_>>();
695+
if some_zpools.is_empty() {
696+
break;
697+
}
698+
let _ = diesel::insert_into(dsl::inv_zpool)
699+
.values(some_zpools)
700+
.execute_async(&conn)
701+
.await?;
702+
}
703+
}
704+
673705
// Insert rows for the sled agents that we found. In practice, we'd
674706
// expect these to all have baseboards (if using Oxide hardware) or
675707
// none have baseboards (if not).
@@ -1470,6 +1502,34 @@ impl DataStore {
14701502
disks
14711503
};
14721504

1505+
// Mapping of "Sled ID" -> "All zpools reported by that sled"
1506+
let zpools: BTreeMap<Uuid, Vec<nexus_types::inventory::Zpool>> = {
1507+
use db::schema::inv_zpool::dsl;
1508+
1509+
let mut zpools =
1510+
BTreeMap::<Uuid, Vec<nexus_types::inventory::Zpool>>::new();
1511+
let mut paginator = Paginator::new(batch_size);
1512+
while let Some(p) = paginator.next() {
1513+
let batch = paginated_multicolumn(
1514+
dsl::inv_zpool,
1515+
(dsl::sled_id, dsl::id),
1516+
&p.current_pagparams(),
1517+
)
1518+
.filter(dsl::inv_collection_id.eq(id))
1519+
.select(InvZpool::as_select())
1520+
.load_async(&*conn)
1521+
.await
1522+
.map_err(|e| {
1523+
public_error_from_diesel(e, ErrorHandler::Server)
1524+
})?;
1525+
paginator = p.found_batch(&batch, &|row| (row.sled_id, row.id));
1526+
for zpool in batch {
1527+
zpools.entry(zpool.sled_id).or_default().push(zpool.into());
1528+
}
1529+
}
1530+
zpools
1531+
};
1532+
14731533
// Collect the unique baseboard ids referenced by SPs, RoTs, and Sled
14741534
// Agents.
14751535
let baseboard_id_ids: BTreeSet<_> = sps
@@ -1577,6 +1637,10 @@ impl DataStore {
15771637
.get(&sled_id)
15781638
.map(|disks| disks.to_vec())
15791639
.unwrap_or_default(),
1640+
zpools: zpools
1641+
.get(&sled_id)
1642+
.map(|zpools| zpools.to_vec())
1643+
.unwrap_or_default(),
15801644
};
15811645
Ok((sled_id, sled_agent))
15821646
})

0 commit comments

Comments
 (0)