-
Notifications
You must be signed in to change notification settings - Fork 130
Support tiered data storage #692
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -54,6 +54,7 @@ use crate::event::EventQueue; | |
| use crate::fee_estimator::OnchainFeeEstimator; | ||
| use crate::gossip::GossipSource; | ||
| use crate::io::sqlite_store::SqliteStore; | ||
| use crate::io::tier_store::TierStore; | ||
| use crate::io::utils::{ | ||
| read_event_queue, read_external_pathfinding_scores_from_cache, read_network_graph, | ||
| read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_pending_payments, | ||
|
|
@@ -151,6 +152,21 @@ impl std::fmt::Debug for LogWriterConfig { | |
| } | ||
| } | ||
|
|
||
| #[derive(Default)] | ||
| struct TierStoreConfig { | ||
| ephemeral: Option<Arc<DynStore>>, | ||
| backup: Option<Arc<DynStore>>, | ||
| } | ||
|
|
||
| impl std::fmt::Debug for TierStoreConfig { | ||
| fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | ||
| f.debug_struct("TierStoreConfig") | ||
| .field("ephemeral", &self.ephemeral.as_ref().map(|_| "Arc<DynStore>")) | ||
| .field("backup", &self.backup.as_ref().map(|_| "Arc<DynStore>")) | ||
| .finish() | ||
| } | ||
| } | ||
|
|
||
| /// An error encountered during building a [`Node`]. | ||
| /// | ||
| /// [`Node`]: crate::Node | ||
|
|
@@ -278,6 +294,7 @@ pub struct NodeBuilder { | |
| liquidity_source_config: Option<LiquiditySourceConfig>, | ||
| log_writer_config: Option<LogWriterConfig>, | ||
| async_payments_role: Option<AsyncPaymentsRole>, | ||
| tier_store_config: Option<TierStoreConfig>, | ||
| runtime_handle: Option<tokio::runtime::Handle>, | ||
| pathfinding_scores_sync_config: Option<PathfindingScoresSyncConfig>, | ||
| recovery_mode: bool, | ||
|
|
@@ -296,6 +313,7 @@ impl NodeBuilder { | |
| let gossip_source_config = None; | ||
| let liquidity_source_config = None; | ||
| let log_writer_config = None; | ||
| let tier_store_config = None; | ||
| let runtime_handle = None; | ||
| let pathfinding_scores_sync_config = None; | ||
| let recovery_mode = false; | ||
|
|
@@ -305,6 +323,7 @@ impl NodeBuilder { | |
| gossip_source_config, | ||
| liquidity_source_config, | ||
| log_writer_config, | ||
| tier_store_config, | ||
| runtime_handle, | ||
| async_payments_role: None, | ||
| pathfinding_scores_sync_config, | ||
|
|
@@ -614,6 +633,36 @@ impl NodeBuilder { | |
| self | ||
| } | ||
|
|
||
| /// Configures the backup store for local disaster recovery. | ||
| /// | ||
| /// When building with tiered storage, this store receives a second durable | ||
| /// copy of data written to the primary store. | ||
| /// | ||
| /// Writes and removals for primary-backed data only succeed once both the | ||
| /// primary and backup stores complete successfully. | ||
| /// | ||
| /// If not set, durable data will be stored only in the primary store. | ||
| #[allow(dead_code)] // Used by subsequent FFI/test integration commits. | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why is this allowing
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I added this to silence warnings as I didn't think warnings were permissible in intermediate commits. Since you've clarified that they are, I can remove them from intermediate commits. However, the annotations on the final commit are still needed (the code isn't used except in #871), so CI will fail without them. |
||
| pub fn set_backup_store(&mut self, backup_store: Arc<DynStore>) -> &mut Self { | ||
| let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); | ||
| tier_store_config.backup = Some(backup_store); | ||
| self | ||
| } | ||
|
|
||
| /// Configures the ephemeral store for non-critical, frequently-accessed data. | ||
| /// | ||
| /// When building with tiered storage, this store is used for ephemeral data like | ||
| /// the network graph and scorer data to reduce latency for reads. Data stored here | ||
| /// can be rebuilt if lost. | ||
| /// | ||
| /// If not set, non-critical data will be stored in the primary store. | ||
| #[allow(dead_code)] // Used by subsequent FFI/test integration commits. | ||
| pub fn set_ephemeral_store(&mut self, ephemeral_store: Arc<DynStore>) -> &mut Self { | ||
| let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); | ||
| tier_store_config.ephemeral = Some(ephemeral_store); | ||
| self | ||
| } | ||
|
|
||
| /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options | ||
| /// previously configured. | ||
| pub fn build(&self, node_entropy: NodeEntropy) -> Result<Node, BuildError> { | ||
|
|
@@ -762,8 +811,23 @@ impl NodeBuilder { | |
| } | ||
|
|
||
| /// Builds a [`Node`] instance according to the options previously configured. | ||
| /// | ||
| /// The provided `kv_store` will be used as the primary storage backend. Optionally, | ||
| /// an ephemeral store for frequently-accessed non-critical data (e.g., network graph, scorer) | ||
| /// and a backup store for local disaster recovery can be configured via | ||
| /// [`set_ephemeral_store`] and [`set_backup_store`]. | ||
| /// | ||
| /// [`set_ephemeral_store`]: Self::set_ephemeral_store | ||
| /// [`set_backup_store`]: Self::set_backup_store | ||
| pub fn build_with_store<S: SyncAndAsyncKVStore + Send + Sync + 'static>( | ||
| &self, node_entropy: NodeEntropy, kv_store: S, | ||
| ) -> Result<Node, BuildError> { | ||
| let primary_store: Arc<DynStore> = Arc::new(DynStoreWrapper(kv_store)); | ||
| self.build_with_dynstore(node_entropy, primary_store) | ||
| } | ||
|
|
||
| fn build_with_dynstore( | ||
| &self, node_entropy: NodeEntropy, primary_store: Arc<DynStore>, | ||
| ) -> Result<Node, BuildError> { | ||
| let logger = setup_logger(&self.log_writer_config, &self.config)?; | ||
|
|
||
|
|
@@ -776,6 +840,13 @@ impl NodeBuilder { | |
| })?) | ||
| }; | ||
|
|
||
| let ts_config = self.tier_store_config.as_ref(); | ||
| let mut tier_store = TierStore::new(primary_store, Arc::clone(&logger)); | ||
| if let Some(config) = ts_config { | ||
| config.ephemeral.as_ref().map(|s| tier_store.set_ephemeral_store(Arc::clone(s))); | ||
| config.backup.as_ref().map(|s| tier_store.set_backup_store(Arc::clone(s))); | ||
| } | ||
|
|
||
| let seed_bytes = node_entropy.to_seed_bytes(); | ||
| let config = Arc::new(self.config.clone()); | ||
|
|
||
|
|
@@ -790,7 +861,7 @@ impl NodeBuilder { | |
| seed_bytes, | ||
| runtime, | ||
| logger, | ||
| Arc::new(DynStoreWrapper(kv_store)), | ||
| Arc::new(DynStoreWrapper(tier_store)), | ||
| ) | ||
| } | ||
| } | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So here we set the backup store, but how do we envision the restore to work? Should that be part of the
recovery_mode?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are two approaches we can take here. For the first, have both primary and backup concrete stores implement
MigratableKVStoreand have the user callmigrate_kv_store_databefore building. It's simple but not ideal as it's not part of any existing node or builder APIs, and would require explicit documentation.Alternatively, we can add a
restore_from_backup(backup)method onNodeBuilderand have the migration/restoration of data from backup (source) to primary (target) happen inside build. This requires addinglist_all_keystoDynStoreTrait, andMigratableKVStorefor both stores so the migration can work through the type-erasedArc<DynStore>layer.For the second approach, we could also refactor
recovery_modefrom a bool into a struct:This keeps both recovery concerns under one concept while remaining independent. A user restoring from backup may not need a full wallet resync, and vice versa.