Skip to content

Commit ba7be2a

Browse files
authored
feat(database): Add metadata table for upgradability and avoiding database switchups (#387)
- closes #357 Introduce the `metadata` table where we track the currently used database schema. This will later allow us to implement "migrations", i.e. updating the database schema without requiring the user to resync. Furthermore, when we create the database, we store the domain type used in order to avoid working with a database that exists, but was previously used on another network. The existing `block` table is removed and it's column moved to `metadata` in order to have only one table with data that exists only once.
1 parent ceae577 commit ba7be2a

File tree

16 files changed

+518
-53
lines changed

16 files changed

+518
-53
lines changed

Cargo.lock

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

anchor/client/src/lib.rs

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -187,9 +187,14 @@ impl Client {
187187
NetworkDatabase::new_as_impostor(
188188
&config.global_config.data_dir.database_file(),
189189
impostor,
190+
config.global_config.ssv_network.ssv_domain_type,
190191
)
191192
} else {
192-
NetworkDatabase::new(&config.global_config.data_dir.database_file(), &pubkey)
193+
NetworkDatabase::new(
194+
&config.global_config.data_dir.database_file(),
195+
&pubkey,
196+
config.global_config.ssv_network.ssv_domain_type,
197+
)
193198
}
194199
.map_err(|e| format!("Unable to open Anchor database: {e}"))?,
195200
);
@@ -429,7 +434,7 @@ impl Client {
429434
let signature_collector = SignatureCollectorManager::new(
430435
processor_senders.clone(),
431436
operator_id.clone(),
432-
config.global_config.ssv_network.ssv_domain_type.clone(),
437+
config.global_config.ssv_network.ssv_domain_type,
433438
message_sender.clone(),
434439
slot_clock.clone(),
435440
)
@@ -441,7 +446,7 @@ impl Client {
441446
operator_id.clone(),
442447
slot_clock.clone(),
443448
message_sender,
444-
config.global_config.ssv_network.ssv_domain_type.clone(),
449+
config.global_config.ssv_network.ssv_domain_type,
445450
)
446451
.map_err(|e| format!("Unable to initialize qbft manager: {e:?}"))?;
447452

anchor/common/ssv_types/src/domain_type.rs

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
use std::str::FromStr;
22

3-
#[derive(Clone, Debug, Default, PartialEq)]
3+
use rusqlite::{
4+
ToSql,
5+
types::{FromSql, FromSqlError, FromSqlResult, ToSqlOutput, Value, ValueRef},
6+
};
7+
8+
#[derive(Clone, Copy, Debug, Default, PartialEq)]
49
pub struct DomainType(pub [u8; 4]);
510

611
impl FromStr for DomainType {
@@ -28,3 +33,19 @@ impl From<[u8; 4]> for DomainType {
2833
Self(bytes)
2934
}
3035
}
36+
37+
impl FromSql for DomainType {
38+
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
39+
let value = value.as_i64()?;
40+
let value = u32::try_from(value).map_err(|_| FromSqlError::InvalidType)?;
41+
Ok(value.to_le_bytes().into())
42+
}
43+
}
44+
45+
impl ToSql for DomainType {
46+
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
47+
Ok(ToSqlOutput::Owned(Value::Integer(
48+
u32::from_le_bytes(self.0).into(),
49+
)))
50+
}
51+
}

anchor/database/src/lib.rs

Lines changed: 20 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
use std::{
22
collections::{HashMap, HashSet},
3-
fs::File,
43
path::Path,
54
time::Duration,
65
};
@@ -10,7 +9,10 @@ use once_cell::sync::OnceCell;
109
use openssl::{pkey::Public, rsa::Rsa};
1110
use r2d2_sqlite::SqliteConnectionManager;
1211
use rusqlite::{Transaction, params};
13-
use ssv_types::{Cluster, ClusterId, CommitteeId, Operator, OperatorId, Share, ValidatorMetadata};
12+
use ssv_types::{
13+
Cluster, ClusterId, CommitteeId, Operator, OperatorId, Share, ValidatorMetadata,
14+
domain_type::DomainType,
15+
};
1416
use tokio::sync::{
1517
watch,
1618
watch::{Receiver, Ref},
@@ -76,6 +78,7 @@ mod cluster_operations;
7678
mod error;
7779
mod keysplit_operations;
7880
mod operator_operations;
81+
mod schema;
7982
mod share_operations;
8083
mod sql_operations;
8184
mod state;
@@ -140,8 +143,12 @@ pub struct NetworkDatabase {
140143

141144
impl NetworkDatabase {
142145
/// Construct a new NetworkDatabase at the given path and the Public Key of the current operator
143-
pub fn new(path: &Path, pubkey: &Rsa<Public>) -> Result<Self, DatabaseError> {
144-
let conn_pool = Self::open_or_create(path)?;
146+
pub fn new(
147+
path: &Path,
148+
pubkey: &Rsa<Public>,
149+
domain: DomainType,
150+
) -> Result<Self, DatabaseError> {
151+
let conn_pool = Self::open_or_create(path, domain)?;
145152
let operator = PubkeyOrId::Pubkey(pubkey.clone());
146153
let state = watch::Sender::new(NetworkState::new_with_state(&conn_pool, &operator)?);
147154
Ok(Self {
@@ -152,8 +159,12 @@ impl NetworkDatabase {
152159
}
153160

154161
/// Act as if we had the pubkey of a certain operator
155-
pub fn new_as_impostor(path: &Path, operator: &OperatorId) -> Result<Self, DatabaseError> {
156-
let conn_pool = Self::open_or_create(path)?;
162+
pub fn new_as_impostor(
163+
path: &Path,
164+
operator: &OperatorId,
165+
domain: DomainType,
166+
) -> Result<Self, DatabaseError> {
167+
let conn_pool = Self::open_or_create(path, domain)?;
157168
let operator = PubkeyOrId::Id(*operator);
158169
let state = watch::Sender::new(NetworkState::new_with_state(&conn_pool, &operator)?);
159170
Ok(Self {
@@ -186,12 +197,9 @@ impl NetworkDatabase {
186197
}
187198

188199
// Open an existing database at the given `path`, or create one if none exists.
189-
fn open_or_create(path: &Path) -> Result<Pool, DatabaseError> {
190-
if path.exists() {
191-
Self::open_conn_pool(path)
192-
} else {
193-
Self::create(path)
194-
}
200+
fn open_or_create(path: &Path, domain: DomainType) -> Result<Pool, DatabaseError> {
201+
schema::ensure_up_to_date(path, domain)?;
202+
Self::open_conn_pool(path)
195203
}
196204

197205
// Build a new connection pool
@@ -205,23 +213,6 @@ impl NetworkDatabase {
205213
Ok(conn_pool)
206214
}
207215

208-
// Create a database at the given path.
209-
fn create(path: &Path) -> Result<Pool, DatabaseError> {
210-
let _file = File::options()
211-
.write(true)
212-
.read(true)
213-
.create_new(true)
214-
.open(path)?;
215-
216-
// restrict file permissions
217-
let conn_pool = Self::open_conn_pool(path)?;
218-
let conn = conn_pool.get()?;
219-
220-
// create all of the tables
221-
conn.execute_batch(include_str!("table_schema.sql"))?;
222-
Ok(conn_pool)
223-
}
224-
225216
// Open a new connection
226217
pub fn connection(&self) -> Result<PoolConn, DatabaseError> {
227218
Ok(self.conn_pool.get()?)

anchor/database/src/schema.rs

Lines changed: 147 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,147 @@
1+
use std::path::Path;
2+
3+
use rusqlite::{Connection, types::Value};
4+
use ssv_types::domain_type::DomainType;
5+
6+
use crate::{DatabaseError, sql_operations};
7+
8+
type SchemaVersion = u32;
9+
10+
struct Metadata {
11+
schema_version: SchemaVersion,
12+
domain: DomainType,
13+
}
14+
15+
enum UpgradeAction {
16+
UpToDate,
17+
// allow dead code until there are upgrade scripts
18+
#[allow(dead_code)]
19+
DoUpdate {
20+
script: &'static str,
21+
new_version: SchemaVersion,
22+
},
23+
Outdated,
24+
Future,
25+
}
26+
27+
enum DatabaseType {
28+
/// If the Option is none, the database is from an older version of Anchor where we did not
29+
/// track the schema version yet. We can change the type to "SchemaVersion" at some point and
30+
/// treat older versions as "Unknown".
31+
Anchor(Option<SchemaVersion>),
32+
IncorrectDomain(DomainType),
33+
Unknown,
34+
}
35+
36+
/// Ensure that there is an up-to-date database available at `db_path`. Also check or set the
37+
/// domain type to ensure the database is for the correct network.
38+
pub fn ensure_up_to_date(
39+
db_path: impl AsRef<Path>,
40+
domain: DomainType,
41+
) -> Result<(), DatabaseError> {
42+
let db_path = db_path.as_ref();
43+
let is_new_file = !db_path.exists();
44+
let conn = Connection::open(db_path)?;
45+
46+
let mut schema_version = if is_new_file {
47+
create_initial_schema(&conn, domain)?
48+
} else {
49+
match determine_database_type(&conn, domain) {
50+
DatabaseType::Anchor(schema_version) => schema_version,
51+
DatabaseType::Unknown => {
52+
// We do not know what this is. Let's be safe and error out.
53+
return Err(DatabaseError::AlreadyPresent(
54+
"Unknown database schema".to_string(),
55+
));
56+
}
57+
DatabaseType::IncorrectDomain(domain) => {
58+
return Err(DatabaseError::AlreadyPresent(format!(
59+
"Existing database for different network: {domain:?}"
60+
)));
61+
}
62+
}
63+
};
64+
65+
// Upgrade scripts are step by step, so we need to loop until we are up to date.
66+
loop {
67+
match get_upgrade_action(schema_version) {
68+
UpgradeAction::UpToDate => return Ok(()),
69+
UpgradeAction::DoUpdate {
70+
script,
71+
new_version,
72+
} => {
73+
conn.execute_batch(script)?;
74+
schema_version = Some(new_version);
75+
}
76+
UpgradeAction::Outdated => {
77+
return Err(DatabaseError::AlreadyPresent(
78+
"Database is outdated - please remove \"anchor_db.sqlite\" or use another data dir.".to_string(),
79+
));
80+
}
81+
UpgradeAction::Future => {
82+
return Err(DatabaseError::AlreadyPresent(
83+
"Database schema is newer than supported by this version of Anchor".to_string(),
84+
));
85+
}
86+
}
87+
}
88+
}
89+
90+
fn determine_database_type(conn: &Connection, domain: DomainType) -> DatabaseType {
91+
let result = conn.query_row(sql_operations::GET_METADATA, [], |row| {
92+
Ok(Metadata {
93+
schema_version: row.get("schema_version")?,
94+
domain: row.get("domain_type")?,
95+
})
96+
});
97+
98+
match result {
99+
Ok(metadata) => {
100+
if metadata.domain == domain {
101+
DatabaseType::Anchor(Some(metadata.schema_version))
102+
} else {
103+
DatabaseType::IncorrectDomain(metadata.domain)
104+
}
105+
}
106+
Err(_) => {
107+
// Something failed - this might be a non-Anchor or legacy Anchor database.
108+
// To check, try to get the block from the old table before `metadata` was introduced.
109+
let legacy = conn
110+
.query_row(sql_operations::GET_LEGACY_BLOCK, [], |row| {
111+
// Check if there is the expected column and no further columns.
112+
Ok(
113+
row.get::<_, u64>("block_number").is_ok()
114+
&& row.get::<_, Value>(1).is_err(),
115+
)
116+
})
117+
.unwrap_or(false);
118+
119+
if legacy {
120+
DatabaseType::Anchor(None)
121+
} else {
122+
DatabaseType::Unknown
123+
}
124+
}
125+
}
126+
}
127+
128+
// Before release, update the return value of this function if the initial table schema was changed.
129+
fn create_initial_schema(
130+
conn: &rusqlite::Connection,
131+
domain: DomainType,
132+
) -> Result<Option<SchemaVersion>, DatabaseError> {
133+
conn.execute_batch(include_str!("table_schema.sql"))?;
134+
conn.execute(sql_operations::INSERT_METADATA, [&domain])?;
135+
Ok(Some(0))
136+
}
137+
138+
// Register upgrade scripts in this function and mark the current version. Define any versions for
139+
// which the schema is not upgradable as "Recreate" and all versions after the current version as
140+
// "Future".
141+
fn get_upgrade_action(version: Option<SchemaVersion>) -> UpgradeAction {
142+
match version {
143+
None => UpgradeAction::Outdated,
144+
Some(0) => UpgradeAction::UpToDate,
145+
Some(1..) => UpgradeAction::Future,
146+
}
147+
}

anchor/database/src/sql_operations.rs

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,8 @@
1+
// Metadata
2+
pub const INSERT_METADATA: &str = r#"INSERT INTO metadata (domain_type) VALUES (?1)"#;
3+
pub const GET_METADATA: &str = r#"SELECT schema_version, domain_type FROM metadata"#;
4+
pub const GET_LEGACY_BLOCK: &str = r#"SELECT * FROM block"#;
5+
16
// Operator
27
pub const INSERT_OPERATOR: &str = r#"
38
INSERT INTO operators
@@ -81,8 +86,8 @@ pub const SET_INDEX: &str = r#"
8186
"#;
8287

8388
// Blocks
84-
pub const UPDATE_BLOCK_NUMBER: &str = r#"UPDATE block SET block_number = ?1"#;
85-
pub const GET_BLOCK_NUMBER: &str = r#"SELECT block_number FROM block"#;
89+
pub const UPDATE_BLOCK_NUMBER: &str = r#"UPDATE metadata SET block_number = ?1"#;
90+
pub const GET_BLOCK_NUMBER: &str = r#"SELECT block_number FROM metadata"#;
8691

8792
// Nonce
8893
pub const GET_ALL_NONCES: &str = r#"SELECT owner, nonce FROM owners"#;

anchor/database/src/table_schema.sql

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,17 @@
1-
CREATE TABLE block (
1+
-- SCHEMA VERSION 0
2+
3+
-- we should avoid removing columns from this to keep compatibility between anchor Versions
4+
CREATE TABLE metadata (
5+
schema_version INTEGER NOT NULL DEFAULT 0,
6+
domain_type INTEGER NOT NULL,
27
block_number INTEGER NOT NULL DEFAULT 0 CHECK (block_number >= 0)
38
);
4-
INSERT INTO block (block_number) VALUES (0);
9+
CREATE TRIGGER unique_metadata
10+
BEFORE INSERT ON metadata
11+
WHEN (SELECT COUNT(*) FROM metadata) >= 1
12+
BEGIN
13+
SELECT RAISE(FAIL, 'we can only have one metadata row');
14+
END;
515

616
CREATE TABLE owners (
717
owner TEXT PRIMARY KEY NOT NULL,

0 commit comments

Comments
 (0)