diff --git a/node-uniffi/Cargo.toml b/node-uniffi/Cargo.toml index 57b77bbc..c444f0d9 100644 --- a/node-uniffi/Cargo.toml +++ b/node-uniffi/Cargo.toml @@ -23,4 +23,4 @@ uniffi = { version = "0.28.3", features = ["bindgen", "tokio", "cli"] } tokio = { version = "1.38.0", features = ["macros", "sync"] } [target.'cfg(target_os = "ios")'.dependencies] -directories = "5.0.1" \ No newline at end of file +directories = "5.0.1" diff --git a/node-uniffi/build-android.sh b/node-uniffi/build-android.sh index 54c3273d..f406a5d4 100755 --- a/node-uniffi/build-android.sh +++ b/node-uniffi/build-android.sh @@ -18,4 +18,4 @@ cargo ndk -o ./app/src/main/jniLibs \ cargo run --bin uniffi-bindgen generate --library ../target/debug/liblumina_node_uniffi.dylib --language kotlin --out-dir ./app/src/main/java/tech/forgen/lumina_node_uniffi/rust -echo "Android build complete" \ No newline at end of file +echo "Android build complete" diff --git a/node-uniffi/build-ios.sh b/node-uniffi/build-ios.sh index fdc326dd..8d02bcca 100755 --- a/node-uniffi/build-ios.sh +++ b/node-uniffi/build-ios.sh @@ -1,5 +1,7 @@ #!/bin/bash +cargo build + cd .. for TARGET in \ @@ -34,4 +36,4 @@ cp ./bindings/*.swift ./ios/ rm -rf bindings -echo "iOS build complete" \ No newline at end of file +echo "iOS build complete" diff --git a/node-uniffi/src/lib.rs b/node-uniffi/src/lib.rs index 39ce6a3f..e7ff6b1c 100644 --- a/node-uniffi/src/lib.rs +++ b/node-uniffi/src/lib.rs @@ -10,17 +10,13 @@ mod types; use celestia_types::ExtendedHeader; use error::{LuminaError, Result}; use lumina_node::{ - blockstore::RedbBlockstore, - events::{EventSubscriber, TryRecvError}, - network::Network, - node::PeerTrackerInfo, - store::RedbStore, + blockstore::RedbBlockstore, events::EventSubscriber, node::PeerTrackerInfo, store::RedbStore, Node, }; -use std::{str::FromStr, sync::Arc}; +use std::str::FromStr; use tendermint::hash::Hash; -use tokio::sync::Mutex; -use types::{NetworkInfo, NodeEvent, NodeStartConfig, PeerId, SyncingInfo}; +use tokio::sync::{Mutex, RwLock}; +use types::{NetworkInfo, NodeConfig, NodeEvent, PeerId, SyncingInfo}; use uniffi::Object; uniffi::setup_scaffolding!(); @@ -30,129 +26,134 @@ lumina_node::uniffi_reexport_scaffolding!(); /// The main Lumina node that manages the connection to the Celestia network. #[derive(Object)] pub struct LuminaNode { - node: Arc>>>, - network: Network, - events_subscriber: Arc>>, + node: RwLock>>, + events_subscriber: Mutex>, + config: NodeConfig, } #[uniffi::export(async_runtime = "tokio")] impl LuminaNode { /// Sets a new connection to the Lumina node for the specified network. #[uniffi::constructor] - pub fn new(network: Network) -> Result> { - Ok(Arc::new(Self { - node: Arc::new(Mutex::new(None)), - network, - events_subscriber: Arc::new(Mutex::new(None)), - })) + pub fn new(config: NodeConfig) -> Result { + Ok(Self { + node: RwLock::new(None), + events_subscriber: Mutex::new(None), + config, + }) } - /// Start the node without optional configuration. - /// UniFFI needs explicit handling for optional parameters to generate correct bindings for different languages. + /// Starts the node and connects to the network. pub async fn start(&self) -> Result { - self.start_with_config(None).await - } - - /// Start the node with specific configuration - pub async fn start_with_config(&self, config: Option) -> Result { - let mut node_guard = self.node.lock().await; - - if node_guard.is_some() { - return Err(LuminaError::AlreadyRunning); + let mut node_lock = self.node.write().await; + if node_lock.is_some() { + return Err(LuminaError::NetworkError { + msg: "Node is already running".to_string(), + }); } - let config = config.unwrap_or_else(|| NodeStartConfig { - network: self.network.clone(), - bootnodes: None, - syncing_window_secs: None, - pruning_delay_secs: None, - batch_size: None, - ed25519_secret_key_bytes: None, - }); - - let builder = config.into_node_builder().await?; - + let builder = self.config.clone().into_node_builder().await?; let (new_node, subscriber) = builder .start_subscribed() .await .map_err(|e| LuminaError::NetworkError { msg: e.to_string() })?; - let mut events_guard = self.events_subscriber.lock().await; + *self.events_subscriber.lock().await = Some(subscriber); + *node_lock = Some(new_node); - *events_guard = Some(subscriber); - *node_guard = Some(new_node); Ok(true) } /// Stops the running node and closes all network connections. pub async fn stop(&self) -> Result<()> { - let mut node_guard = self.node.lock().await; - let node = node_guard.take().ok_or(LuminaError::NodeNotRunning)?; - node.stop().await; - Ok(()) + let mut node = self.node.write().await; + if let Some(node) = node.take() { + node.stop().await; + Ok(()) + } else { + Err(LuminaError::NetworkError { + msg: "Node is already stopped".to_string(), + }) + } } /// Checks if the node is currently running. pub async fn is_running(&self) -> bool { - self.node.lock().await.is_some() + self.node.read().await.is_some() } /// Gets the local peer ID as a string. pub async fn local_peer_id(&self) -> Result { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; Ok(node.local_peer_id().to_base58()) } /// Gets information about connected peers. pub async fn peer_tracker_info(&self) -> Result { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; Ok(node.peer_tracker_info()) } /// Waits until the node is connected to at least one peer. pub async fn wait_connected(&self) -> Result<()> { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; Ok(node.wait_connected().await?) } /// Waits until the node is connected to at least one trusted peer. pub async fn wait_connected_trusted(&self) -> Result<()> { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; Ok(node.wait_connected_trusted().await?) } /// Gets current network information. pub async fn network_info(&self) -> Result { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let info = node.network_info().await?; Ok(info.into()) } /// Gets list of addresses the node is listening to. pub async fn listeners(&self) -> Result> { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let listeners = node.listeners().await?; Ok(listeners.into_iter().map(|l| l.to_string()).collect()) } /// Gets list of currently connected peer IDs. pub async fn connected_peers(&self) -> Result> { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let peers = node.connected_peers().await?; Ok(peers.into_iter().map(PeerId::from).collect()) } /// Sets whether a peer with give ID is trusted. pub async fn set_peer_trust(&self, peer_id: PeerId, is_trusted: bool) -> Result<()> { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let peer_id = peer_id .to_libp2p() .map_err(|e| LuminaError::NetworkError { msg: e.to_string() })?; @@ -163,16 +164,20 @@ impl LuminaNode { /// /// Returns a serialized ExtendedHeader string. pub async fn request_head_header(&self) -> Result { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let header = node.request_head_header().await?; Ok(header.to_string()) //if extended header is needed, we need a wrapper } /// Request a header for the block with a given hash from the network. pub async fn request_header_by_hash(&self, hash: String) -> Result { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let hash = Hash::from_str(&hash).map_err(|e| LuminaError::InvalidHash { msg: e.to_string() })?; let header = node.request_header_by_hash(&hash).await?; @@ -181,23 +186,27 @@ impl LuminaNode { /// Requests a header by its height. pub async fn request_header_by_height(&self, height: u64) -> Result { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let header = node.request_header_by_height(height).await?; Ok(header.to_string()) } /// Request headers in range (from, from + amount] from the network. /// - /// The headers will be verified with the `from` header. + /// The headers will be verified with the from header. /// Returns array of serialized ExtendedHeader strings. pub async fn request_verified_headers( &self, from: String, // serialized header like its done for WASM amount: u64, ) -> Result> { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let from: ExtendedHeader = serde_json::from_str(&from).map_err(|e| LuminaError::InvalidHeader { msg: format!("Invalid header JSON: {}", e), @@ -208,16 +217,20 @@ impl LuminaNode { /// Gets current syncing information. pub async fn syncer_info(&self) -> Result { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let info = node.syncer_info().await?; Ok(info.into()) } /// Gets the latest header announced in the network. pub async fn get_network_head_header(&self) -> Result { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let header = node.get_network_head_header().await?; header.map_or( // todo: better error handling, its undefined in wasm @@ -230,16 +243,20 @@ impl LuminaNode { /// Gets the latest locally synced header. pub async fn get_local_head_header(&self) -> Result { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let header = node.get_local_head_header().await?; Ok(header.to_string()) } /// Get a synced header for the block with a given hash. pub async fn get_header_by_hash(&self, hash: String) -> Result { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let hash = Hash::from_str(&hash).map_err(|e| LuminaError::InvalidHash { msg: e.to_string() })?; let header = node.get_header_by_hash(&hash).await?; @@ -248,8 +265,10 @@ impl LuminaNode { /// Get a synced header for the block with a given height. pub async fn get_header_by_height(&self, height: u64) -> Result { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let header = node.get_header_by_height(height).await?; Ok(header.to_string()) } @@ -266,8 +285,10 @@ impl LuminaNode { start_height: Option, end_height: Option, ) -> Result> { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; let headers = match (start_height, end_height) { (None, None) => node.get_headers(..).await, @@ -283,21 +304,26 @@ impl LuminaNode { /// /// Returns serialized SamplingMetadata string if metadata exists for the height. pub async fn get_sampling_metadata(&self, height: u64) -> Result> { - let node_guard = self.node.lock().await; - let node = node_guard.as_ref().ok_or(LuminaError::NodeNotRunning)?; + let node = self.node.read().await; + let node = node.as_ref().ok_or(LuminaError::NetworkError { + msg: "Node not initialized".to_string(), + })?; + let metadata = node.get_sampling_metadata(height).await?; Ok(metadata.map(|m| serde_json::to_string(&m).unwrap())) } /// Returns the next event from the node's event channel. pub async fn events_channel(&self) -> Result> { - let mut events_guard = self.events_subscriber.lock().await; - let subscriber = events_guard.as_mut().ok_or(LuminaError::NodeNotRunning)?; - - match subscriber.try_recv() { - Ok(event) => Ok(Some(event.event.into())), - Err(TryRecvError::Empty) => Ok(None), - Err(e) => Err(LuminaError::NetworkError { msg: e.to_string() }), + let mut events_subscriber = self.events_subscriber.lock().await; + match events_subscriber.as_mut() { + Some(subscriber) => match subscriber.try_recv() { + Ok(event) => Ok(Some(event.event.into())), + Err(e) => Err(LuminaError::NetworkError { msg: e.to_string() }), + }, + None => Err(LuminaError::NetworkError { + msg: "Node is not running".to_string(), + }), } } } diff --git a/node-uniffi/src/types.rs b/node-uniffi/src/types.rs deleted file mode 100644 index 6a568dc2..00000000 --- a/node-uniffi/src/types.rs +++ /dev/null @@ -1,516 +0,0 @@ -use libp2p::identity::Keypair; -use libp2p::swarm::ConnectionCounters as Libp2pConnectionCounters; -use libp2p::swarm::NetworkInfo as Libp2pNetworkInfo; -use libp2p::PeerId as Libp2pPeerId; -use lumina_node::block_ranges::BlockRange as LuminaBlockRange; -use lumina_node::events::{NodeEvent as LuminaNodeEvent, NodeEventInfo as LuminaNodeEventInfo}; -use lumina_node::node::SyncingInfo as LuminaSyncingInfo; -use lumina_node::{blockstore::RedbBlockstore, network, NodeBuilder}; -use std::sync::Arc; -use std::{ - path::PathBuf, - str::FromStr, - time::{Duration, SystemTime}, -}; -use uniffi::Record; - -use lumina_node::store::RedbStore; - -use crate::{error::Result, LuminaError}; - -#[cfg(target_os = "ios")] -use directories::ProjectDirs; - -#[cfg(target_os = "ios")] -/// Returns the platform-specific base path for storing on iOS. -fn get_base_path_impl() -> Result { - if let Some(proj_dirs) = ProjectDirs::from("com", "example", "Lumina") { - Ok(proj_dirs.data_dir().to_path_buf()) - } else { - Err(LuminaError::StorageError { - msg: "Could not determine a platform-specific data directory".to_string(), - }) - } -} - -#[cfg(target_os = "android")] -/// Returns the platform-specific base path for storing on Android. -/// -/// On Android, this function attempts to read the `LUMINA_DATA_DIR` environment variable. -/// If `LUMINA_DATA_DIR` is not set, it falls back to `/data/data/com.example.lumina/files`. -fn get_base_path_impl() -> Result { - match std::env::var("LUMINA_DATA_DIR") { - Ok(dir) => Ok(PathBuf::from(dir)), - Err(_) => { - let fallback = "/data/data/com.example.lumina/files"; - Ok(PathBuf::from(fallback)) - } - } -} - -#[cfg(not(any(target_os = "ios", target_os = "android")))] -/// Returns an error for unsupported platforms. -fn get_base_path_impl() -> Result { - Err(LuminaError::StorageError { - msg: "Unsupported platform".to_string(), - }) -} - -/// Returns the platform-specific base path for storing Lumina data. -/// -/// The function determines the base path based on the target operating system: -/// - **iOS**: `~/Library/Application Support/lumina` -/// - **Android**: Value of the `LUMINA_DATA_DIR` environment variable -/// - **Other platforms**: Returns an error indicating unsupported platform. -fn get_base_path() -> Result { - get_base_path_impl() -} - -/// Configuration options for the Lumina node -#[derive(Debug, Clone, Record)] -pub struct NodeStartConfig { - /// Network to connect to - pub network: network::Network, - /// Custom list of bootstrap peers to connect to. - /// If None, uses the canonical bootnodes for the network. - pub bootnodes: Option>, - /// Custom syncing window in seconds. Default is 30 days. - pub syncing_window_secs: Option, - /// Custom pruning delay after syncing window in seconds. Default is 1 hour. - pub pruning_delay_secs: Option, - /// Maximum number of headers in batch while syncing. Default is 128. - pub batch_size: Option, - /// Optional Set the keypair to be used as Node's identity. If None, generates a new Ed25519 keypair. - pub ed25519_secret_key_bytes: Option>, -} - -impl NodeStartConfig { - /// Convert into NodeBuilder for the implementation - pub(crate) async fn into_node_builder(self) -> Result> { - let base_path = get_base_path()?; - let network_id = self.network.id(); - let store_path = base_path.join(format!("store-{}", network_id)); - std::fs::create_dir_all(&base_path).map_err(|e| LuminaError::StorageError { - msg: format!("Failed to create data directory: {}", e), - })?; - let db = Arc::new(redb::Database::create(&store_path).map_err(|e| { - LuminaError::StorageInit { - msg: format!("Failed to create database: {}", e), - } - })?); - - let store = RedbStore::new(db.clone()) - .await - .map_err(|e| LuminaError::StorageInit { - msg: format!("Failed to initialize store: {}", e), - })?; - - let blockstore = RedbBlockstore::new(db); - - let bootnodes = if let Some(bootnodes) = self.bootnodes { - let mut resolved = Vec::with_capacity(bootnodes.len()); - for addr in bootnodes { - resolved.push(addr.parse()?); - } - resolved - } else { - self.network.canonical_bootnodes().collect::>() - }; - - let keypair = if let Some(key_bytes) = self.ed25519_secret_key_bytes { - if key_bytes.len() != 32 { - return Err(LuminaError::NetworkError { - msg: "Ed25519 private key must be 32 bytes".into(), - }); - } - - Keypair::ed25519_from_bytes(key_bytes).map_err(|e| LuminaError::NetworkError { - msg: format!("Invalid Ed25519 key: {}", e), - })? - } else { - libp2p::identity::Keypair::generate_ed25519() - }; - - let mut builder = NodeBuilder::new() - .store(store) - .blockstore(blockstore) - .network(self.network) - .bootnodes(bootnodes) - .keypair(keypair) - .sync_batch_size(self.batch_size.unwrap_or(128)); - - if let Some(secs) = self.syncing_window_secs { - builder = builder.sampling_window(Duration::from_secs(secs.into())); - } - - if let Some(secs) = self.pruning_delay_secs { - builder = builder.pruning_delay(Duration::from_secs(secs.into())); - } - - Ok(builder) - } -} - -#[derive(Record)] -pub struct NetworkInfo { - /// The total number of connected peers. - pub num_peers: u32, - /// Counters of ongoing network connections. - pub connection_counters: ConnectionCounters, -} - -/// Counters of ongoing network connections. -#[derive(Record)] -pub struct ConnectionCounters { - /// The current number of connections. - pub num_connections: u32, - /// The current number of pending connections. - pub num_pending: u32, - /// The current number of incoming connections. - pub num_pending_incoming: u32, - /// The current number of outgoing connections. - pub num_pending_outgoing: u32, - /// The current number of established connections. - pub num_established: u32, - /// The current number of established inbound connections. - pub num_established_incoming: u32, - /// The current number of established outbound connections. - pub num_established_outgoing: u32, -} - -impl From for NetworkInfo { - fn from(info: Libp2pNetworkInfo) -> Self { - Self { - num_peers: info.num_peers() as u32, - connection_counters: info.connection_counters().into(), - } - } -} - -impl From<&Libp2pConnectionCounters> for ConnectionCounters { - fn from(counters: &Libp2pConnectionCounters) -> Self { - Self { - num_connections: counters.num_connections(), - num_pending: counters.num_pending(), - num_pending_incoming: counters.num_pending_incoming(), - num_pending_outgoing: counters.num_pending_outgoing(), - num_established: counters.num_established(), - num_established_incoming: counters.num_established_incoming(), - num_established_outgoing: counters.num_established_outgoing(), - } - } -} - -/// A range of blocks. -#[derive(Record)] -pub struct BlockRange { - pub start: u64, - pub end: u64, -} - -impl From for BlockRange { - fn from(range: LuminaBlockRange) -> Self { - Self { - start: *range.start(), - end: *range.end(), - } - } -} - -/// Status of the node syncing. -#[derive(Record)] -pub struct SyncingInfo { - /// Ranges of headers that are already synchronised - pub stored_headers: Vec, - /// Syncing target. The latest height seen in the network that was successfully verified. - pub subjective_head: u64, -} - -impl From for SyncingInfo { - fn from(info: LuminaSyncingInfo) -> Self { - Self { - stored_headers: info - .stored_headers - .into_inner() - .into_iter() - .map(BlockRange::from) - .collect(), - subjective_head: info.subjective_head, - } - } -} - -#[derive(Record, Clone, Debug)] -pub struct PeerId { - /// The peer ID stored as base58 string. - pub peer_id: String, -} - -impl PeerId { - pub fn to_libp2p(&self) -> std::result::Result { - Libp2pPeerId::from_str(&self.peer_id).map_err(|e| format!("Invalid peer ID format: {}", e)) - } - - pub fn from_libp2p(peer_id: &Libp2pPeerId) -> Self { - Self { - peer_id: peer_id.to_string(), - } - } -} - -impl From for PeerId { - fn from(peer_id: Libp2pPeerId) -> Self { - Self { - peer_id: peer_id.to_string(), - } - } -} - -#[derive(Record)] -pub struct ShareCoordinate { - pub row: u16, - pub column: u16, -} - -/// Events emitted by the node. -#[derive(uniffi::Enum)] -pub enum NodeEvent { - /// Node is connecting to bootnodes - ConnectingToBootnodes, - /// Peer just connected - PeerConnected { - /// The ID of the peer. - id: PeerId, - /// Whether peer was in the trusted list or not. - trusted: bool, - }, - PeerDisconnected { - /// The ID of the peer. - id: PeerId, - /// Whether peer was in the trusted list or not. - trusted: bool, - }, - /// Sampling just started. - SamplingStarted { - /// The block height that will be sampled. - height: u64, - /// The square width of the block. - square_width: u16, - /// The coordinates of the shares that will be sampled. - shares: Vec, - }, - /// A share was sampled. - ShareSamplingResult { - /// The block height of the share. - height: u64, - /// The square width of the block. - square_width: u16, - /// The row of the share. - row: u16, - /// The column of the share. - column: u16, - /// The result of the sampling of the share. - accepted: bool, - }, - /// Sampling just finished. - SamplingFinished { - /// The block height that was sampled. - height: u64, - /// The overall result of the sampling. - accepted: bool, - /// How much time sampling took in milliseconds. - took_ms: u64, - }, - /// Data sampling fatal error. - FatalDaserError { - /// A human readable error. - error: String, - }, - /// A new header was added from HeaderSub. - AddedHeaderFromHeaderSub { - /// The height of the header. - height: u64, - }, - /// Fetching header of network head just started. - FetchingHeadHeaderStarted, - /// Fetching header of network head just finished. - FetchingHeadHeaderFinished { - /// The height of the network head. - height: u64, - /// How much time fetching took in milliseconds. - took_ms: u64, - }, - /// Fetching headers of a specific block range just started. - FetchingHeadersStarted { - /// Start of the range. - from_height: u64, - /// End of the range (included). - to_height: u64, - }, - /// Fetching headers of a specific block range just finished. - FetchingHeadersFinished { - /// Start of the range. - from_height: u64, - /// End of the range (included). - to_height: u64, - /// How much time fetching took in milliseconds. - took_ms: u64, - }, - /// Fetching headers of a specific block range just failed. - FetchingHeadersFailed { - /// Start of the range. - from_height: u64, - /// End of the range (included). - to_height: u64, - /// A human readable error. - error: String, - /// How much time fetching took in milliseconds. - took_ms: u64, - }, - /// Header syncing fatal error. - FatalSyncerError { - /// A human readable error. - error: String, - }, - /// Pruned headers up to and including specified height. - PrunedHeaders { - /// Last header height that was pruned - to_height: u64, - }, - /// Pruning fatal error. - FatalPrunerError { - /// A human readable error. - error: String, - }, - /// Network was compromised. - /// - /// This happens when a valid bad encoding fraud proof is received. - /// Ideally it would never happen, but protection needs to exist. - /// In case of compromised network, syncing and data sampling will - /// stop immediately. - NetworkCompromised, - /// Node stopped. - NodeStopped, -} - -impl From for NodeEvent { - fn from(event: LuminaNodeEvent) -> Self { - match event { - LuminaNodeEvent::ConnectingToBootnodes => NodeEvent::ConnectingToBootnodes, - LuminaNodeEvent::PeerConnected { id, trusted } => NodeEvent::PeerConnected { - id: PeerId::from_libp2p(&id), - trusted, - }, - LuminaNodeEvent::PeerDisconnected { id, trusted } => NodeEvent::PeerDisconnected { - id: PeerId::from_libp2p(&id), - trusted, - }, - LuminaNodeEvent::SamplingStarted { - height, - square_width, - shares, - } => NodeEvent::SamplingStarted { - height, - square_width, - shares: shares - .into_iter() - .map(|(row, col)| ShareCoordinate { row, column: col }) - .collect(), - }, - LuminaNodeEvent::ShareSamplingResult { - height, - square_width, - row, - column, - accepted, - } => NodeEvent::ShareSamplingResult { - height, - square_width, - row, - column, - accepted, - }, - LuminaNodeEvent::SamplingFinished { - height, - accepted, - took, - } => NodeEvent::SamplingFinished { - height, - accepted, - took_ms: took.as_millis() as u64, - }, - LuminaNodeEvent::FatalDaserError { error } => NodeEvent::FatalDaserError { error }, - LuminaNodeEvent::AddedHeaderFromHeaderSub { height } => { - NodeEvent::AddedHeaderFromHeaderSub { height } - } - LuminaNodeEvent::FetchingHeadHeaderStarted => NodeEvent::FetchingHeadHeaderStarted, - LuminaNodeEvent::FetchingHeadHeaderFinished { height, took } => { - NodeEvent::FetchingHeadHeaderFinished { - height, - took_ms: took.as_millis() as u64, - } - } - LuminaNodeEvent::FetchingHeadersStarted { - from_height, - to_height, - } => NodeEvent::FetchingHeadersStarted { - from_height, - to_height, - }, - LuminaNodeEvent::FetchingHeadersFinished { - from_height, - to_height, - took, - } => NodeEvent::FetchingHeadersFinished { - from_height, - to_height, - took_ms: took.as_millis() as u64, - }, - LuminaNodeEvent::FetchingHeadersFailed { - from_height, - to_height, - error, - took, - } => NodeEvent::FetchingHeadersFailed { - from_height, - to_height, - error, - took_ms: took.as_millis() as u64, - }, - LuminaNodeEvent::FatalSyncerError { error } => NodeEvent::FatalSyncerError { error }, - LuminaNodeEvent::PrunedHeaders { to_height } => NodeEvent::PrunedHeaders { to_height }, - LuminaNodeEvent::FatalPrunerError { error } => NodeEvent::FatalPrunerError { error }, - LuminaNodeEvent::NetworkCompromised => NodeEvent::NetworkCompromised, - LuminaNodeEvent::NodeStopped => NodeEvent::NodeStopped, - _ => panic!("Unknown event: {:?}", event), - } - } -} - -/// Information about a node event. -#[derive(Record)] -pub struct NodeEventInfo { - /// The event that occurred. - pub event: NodeEvent, - /// Unix timestamp in milliseconds when the event occurred. - pub timestamp: u64, - /// Source file path where the event was emitted. - pub file_path: String, - /// Line number in source file where event was emitted. - pub file_line: u32, -} - -impl From for NodeEventInfo { - fn from(info: LuminaNodeEventInfo) -> Self { - Self { - event: info.event.into(), - timestamp: info - .time - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap_or_default() - .as_millis() as u64, - file_path: info.file_path.to_string(), - file_line: info.file_line, - } - } -} diff --git a/node-uniffi/src/types/config.rs b/node-uniffi/src/types/config.rs new file mode 100644 index 00000000..7c8ddcf1 --- /dev/null +++ b/node-uniffi/src/types/config.rs @@ -0,0 +1,109 @@ +use std::{path::PathBuf, sync::Arc, time::Duration}; + +use libp2p::identity::Keypair; +use lumina_node::{blockstore::RedbBlockstore, network, store::RedbStore, NodeBuilder}; +use tokio::task::spawn_blocking; +use uniffi::Record; + +use crate::error::{LuminaError, Result}; + +/// Configuration options for the Lumina node +#[derive(Debug, Clone, Record)] +pub struct NodeConfig { + /// Base path for storing node data as a string + pub base_path: String, + /// Network to connect to + pub network: network::Network, + /// Custom list of bootstrap peers to connect to. + /// If None, uses the canonical bootnodes for the network. + pub bootnodes: Option>, + /// Custom syncing window in seconds. Default is 30 days. + pub syncing_window_secs: Option, + /// Custom pruning delay after syncing window in seconds. Default is 1 hour. + pub pruning_delay_secs: Option, + /// Maximum number of headers in batch while syncing. Default is 128. + pub batch_size: Option, + /// Optional Set the keypair to be used as Node's identity. If None, generates a new Ed25519 keypair. + pub ed25519_secret_key_bytes: Option>, +} + +impl NodeConfig { + /// Convert into NodeBuilder for the implementation + pub(crate) async fn into_node_builder(self) -> Result> { + let network_id = self.network.id(); + let base_path = PathBuf::from(self.base_path); + let store_path = base_path.join(format!("store-{}", network_id)); + + spawn_blocking(move || { + std::fs::create_dir_all(&base_path).map_err(|e| LuminaError::StorageError { + msg: format!("Failed to create data directory: {}", e), + }) + }) + .await + .map_err(|e| LuminaError::StorageError { + msg: format!("Task join error: {}", e), + })??; + + let db = spawn_blocking(move || { + redb::Database::create(&store_path) + .map(Arc::new) + .map_err(|e| LuminaError::StorageInit { + msg: format!("Failed to create database: {}", e), + }) + }) + .await + .map_err(|e| LuminaError::StorageError { + msg: format!("Task join error: {}", e), + })??; + + let store = RedbStore::new(db.clone()) + .await + .map_err(|e| LuminaError::StorageInit { + msg: format!("Failed to initialize store: {}", e), + })?; + + let blockstore = RedbBlockstore::new(db); + + let bootnodes = if let Some(bootnodes) = self.bootnodes { + let mut resolved = Vec::with_capacity(bootnodes.len()); + for addr in bootnodes { + resolved.push(addr.parse()?); + } + resolved + } else { + self.network.canonical_bootnodes().collect::>() + }; + + let keypair = if let Some(key_bytes) = self.ed25519_secret_key_bytes { + if key_bytes.len() != 32 { + return Err(LuminaError::NetworkError { + msg: "Ed25519 private key must be 32 bytes".into(), + }); + } + + Keypair::ed25519_from_bytes(key_bytes).map_err(|e| LuminaError::NetworkError { + msg: format!("Invalid Ed25519 key: {}", e), + })? + } else { + libp2p::identity::Keypair::generate_ed25519() + }; + + let mut builder = NodeBuilder::new() + .store(store) + .blockstore(blockstore) + .network(self.network) + .bootnodes(bootnodes) + .keypair(keypair) + .sync_batch_size(self.batch_size.unwrap_or(128)); + + if let Some(secs) = self.syncing_window_secs { + builder = builder.sampling_window(Duration::from_secs(secs.into())); + } + + if let Some(secs) = self.pruning_delay_secs { + builder = builder.pruning_delay(Duration::from_secs(secs.into())); + } + + Ok(builder) + } +} diff --git a/node-uniffi/src/types/event.rs b/node-uniffi/src/types/event.rs new file mode 100644 index 00000000..01917ab6 --- /dev/null +++ b/node-uniffi/src/types/event.rs @@ -0,0 +1,251 @@ +use libp2p::PeerId as Libp2pPeerId; +use lumina_node::events::NodeEvent as LuminaNodeEvent; +use std::str::FromStr; +use uniffi::Record; + +#[derive(Record, Clone, Debug)] +pub struct PeerId { + /// The peer ID stored as base58 string. + pub peer_id: String, +} + +impl PeerId { + pub fn to_libp2p(&self) -> std::result::Result { + Libp2pPeerId::from_str(&self.peer_id).map_err(|e| format!("Invalid peer ID format: {}", e)) + } + + pub fn from_libp2p(peer_id: &Libp2pPeerId) -> Self { + Self { + peer_id: peer_id.to_string(), + } + } +} + +impl From for PeerId { + fn from(peer_id: Libp2pPeerId) -> Self { + Self { + peer_id: peer_id.to_string(), + } + } +} + +#[derive(Record)] +pub struct ShareCoordinate { + row: u16, + column: u16, +} + +/// Events emitted by the node. +#[derive(uniffi::Enum)] +pub enum NodeEvent { + /// Node is connecting to bootnodes + ConnectingToBootnodes, + /// Peer just connected + PeerConnected { + /// The ID of the peer. + id: PeerId, + /// Whether peer was in the trusted list or not. + trusted: bool, + }, + PeerDisconnected { + /// The ID of the peer. + id: PeerId, + /// Whether peer was in the trusted list or not. + trusted: bool, + }, + /// Sampling just started. + SamplingStarted { + /// The block height that will be sampled. + height: u64, + /// The square width of the block. + square_width: u16, + /// The coordinates of the shares that will be sampled. + shares: Vec, + }, + /// A share was sampled. + ShareSamplingResult { + /// The block height of the share. + height: u64, + /// The square width of the block. + square_width: u16, + /// The row of the share. + row: u16, + /// The column of the share. + column: u16, + /// The result of the sampling of the share. + accepted: bool, + }, + /// Sampling just finished. + SamplingFinished { + /// The block height that was sampled. + height: u64, + /// The overall result of the sampling. + accepted: bool, + /// How much time sampling took in milliseconds. + took_ms: u64, + }, + /// Data sampling fatal error. + FatalDaserError { + /// A human readable error. + error: String, + }, + /// A new header was added from HeaderSub. + AddedHeaderFromHeaderSub { + /// The height of the header. + height: u64, + }, + /// Fetching header of network head just started. + FetchingHeadHeaderStarted, + /// Fetching header of network head just finished. + FetchingHeadHeaderFinished { + /// The height of the network head. + height: u64, + /// How much time fetching took in milliseconds. + took_ms: u64, + }, + /// Fetching headers of a specific block range just started. + FetchingHeadersStarted { + /// Start of the range. + from_height: u64, + /// End of the range (included). + to_height: u64, + }, + /// Fetching headers of a specific block range just finished. + FetchingHeadersFinished { + /// Start of the range. + from_height: u64, + /// End of the range (included). + to_height: u64, + /// How much time fetching took in milliseconds. + took_ms: u64, + }, + /// Fetching headers of a specific block range just failed. + FetchingHeadersFailed { + /// Start of the range. + from_height: u64, + /// End of the range (included). + to_height: u64, + /// A human readable error. + error: String, + /// How much time fetching took in milliseconds. + took_ms: u64, + }, + /// Header syncing fatal error. + FatalSyncerError { + /// A human readable error. + error: String, + }, + /// Pruned headers up to and including specified height. + PrunedHeaders { + /// Last header height that was pruned + to_height: u64, + }, + /// Pruning fatal error. + FatalPrunerError { + /// A human readable error. + error: String, + }, + /// Network was compromised. + /// + /// This happens when a valid bad encoding fraud proof is received. + /// Ideally it would never happen, but protection needs to exist. + /// In case of compromised network, syncing and data sampling will + /// stop immediately. + NetworkCompromised, + /// Node stopped. + NodeStopped, +} + +impl From for NodeEvent { + fn from(event: LuminaNodeEvent) -> Self { + match event { + LuminaNodeEvent::ConnectingToBootnodes => NodeEvent::ConnectingToBootnodes, + LuminaNodeEvent::PeerConnected { id, trusted } => NodeEvent::PeerConnected { + id: PeerId::from_libp2p(&id), + trusted, + }, + LuminaNodeEvent::PeerDisconnected { id, trusted } => NodeEvent::PeerDisconnected { + id: PeerId::from_libp2p(&id), + trusted, + }, + LuminaNodeEvent::SamplingStarted { + height, + square_width, + shares, + } => NodeEvent::SamplingStarted { + height, + square_width, + shares: shares + .into_iter() + .map(|(row, col)| ShareCoordinate { row, column: col }) + .collect(), + }, + LuminaNodeEvent::ShareSamplingResult { + height, + square_width, + row, + column, + accepted, + } => NodeEvent::ShareSamplingResult { + height, + square_width, + row, + column, + accepted, + }, + LuminaNodeEvent::SamplingFinished { + height, + accepted, + took, + } => NodeEvent::SamplingFinished { + height, + accepted, + took_ms: took.as_millis() as u64, + }, + LuminaNodeEvent::FatalDaserError { error } => NodeEvent::FatalDaserError { error }, + LuminaNodeEvent::AddedHeaderFromHeaderSub { height } => { + NodeEvent::AddedHeaderFromHeaderSub { height } + } + LuminaNodeEvent::FetchingHeadHeaderStarted => NodeEvent::FetchingHeadHeaderStarted, + LuminaNodeEvent::FetchingHeadHeaderFinished { height, took } => { + NodeEvent::FetchingHeadHeaderFinished { + height, + took_ms: took.as_millis() as u64, + } + } + LuminaNodeEvent::FetchingHeadersStarted { + from_height, + to_height, + } => NodeEvent::FetchingHeadersStarted { + from_height, + to_height, + }, + LuminaNodeEvent::FetchingHeadersFinished { + from_height, + to_height, + took, + } => NodeEvent::FetchingHeadersFinished { + from_height, + to_height, + took_ms: took.as_millis() as u64, + }, + LuminaNodeEvent::FetchingHeadersFailed { + from_height, + to_height, + error, + took, + } => NodeEvent::FetchingHeadersFailed { + from_height, + to_height, + error, + took_ms: took.as_millis() as u64, + }, + LuminaNodeEvent::FatalSyncerError { error } => NodeEvent::FatalSyncerError { error }, + LuminaNodeEvent::PrunedHeaders { to_height } => NodeEvent::PrunedHeaders { to_height }, + LuminaNodeEvent::FatalPrunerError { error } => NodeEvent::FatalPrunerError { error }, + LuminaNodeEvent::NetworkCompromised => NodeEvent::NetworkCompromised, + LuminaNodeEvent::NodeStopped => NodeEvent::NodeStopped, + _ => panic!("Unknown event: {:?}", event), + } + } +} diff --git a/node-uniffi/src/types/mod.rs b/node-uniffi/src/types/mod.rs new file mode 100644 index 00000000..a66c1beb --- /dev/null +++ b/node-uniffi/src/types/mod.rs @@ -0,0 +1,9 @@ +mod config; +mod event; +mod network; +mod sync; + +pub use config::NodeConfig; +pub use event::{NodeEvent, PeerId}; +pub use network::NetworkInfo; +pub use sync::SyncingInfo; diff --git a/node-uniffi/src/types/network.rs b/node-uniffi/src/types/network.rs new file mode 100644 index 00000000..dc7d0a1c --- /dev/null +++ b/node-uniffi/src/types/network.rs @@ -0,0 +1,53 @@ +use libp2p::swarm::ConnectionCounters as Libp2pConnectionCounters; +use libp2p::swarm::NetworkInfo as Libp2pNetworkInfo; +use uniffi::Record; + +#[derive(Record)] +pub struct NetworkInfo { + /// The total number of connected peers. + pub num_peers: u32, + /// Counters of ongoing network connections. + connection_counters: ConnectionCounters, +} + +/// Counters of ongoing network connections. +#[derive(Record)] +struct ConnectionCounters { + /// The current number of connections. + pub num_connections: u32, + /// The current number of pending connections. + pub num_pending: u32, + /// The current number of incoming connections. + pub num_pending_incoming: u32, + /// The current number of outgoing connections. + pub num_pending_outgoing: u32, + /// The current number of established connections. + pub num_established: u32, + /// The current number of established inbound connections. + pub num_established_incoming: u32, + /// The current number of established outbound connections. + pub num_established_outgoing: u32, +} + +impl From for NetworkInfo { + fn from(info: Libp2pNetworkInfo) -> Self { + Self { + num_peers: info.num_peers() as u32, + connection_counters: info.connection_counters().into(), + } + } +} + +impl From<&Libp2pConnectionCounters> for ConnectionCounters { + fn from(counters: &Libp2pConnectionCounters) -> Self { + Self { + num_connections: counters.num_connections(), + num_pending: counters.num_pending(), + num_pending_incoming: counters.num_pending_incoming(), + num_pending_outgoing: counters.num_pending_outgoing(), + num_established: counters.num_established(), + num_established_incoming: counters.num_established_incoming(), + num_established_outgoing: counters.num_established_outgoing(), + } + } +} diff --git a/node-uniffi/src/types/sync.rs b/node-uniffi/src/types/sync.rs new file mode 100644 index 00000000..764676dc --- /dev/null +++ b/node-uniffi/src/types/sync.rs @@ -0,0 +1,42 @@ +use lumina_node::block_ranges::BlockRange as LuminaBlockRange; +use lumina_node::node::SyncingInfo as LuminaSyncingInfo; +use uniffi::Record; + +/// A range of blocks. +#[derive(Record)] +struct BlockRange { + start: u64, + end: u64, +} + +impl From for BlockRange { + fn from(range: LuminaBlockRange) -> Self { + Self { + start: *range.start(), + end: *range.end(), + } + } +} + +/// Status of the node syncing. +#[derive(Record)] +pub struct SyncingInfo { + /// Ranges of headers that are already synchronised + stored_headers: Vec, + /// Syncing target. The latest height seen in the network that was successfully verified. + subjective_head: u64, +} + +impl From for SyncingInfo { + fn from(info: LuminaSyncingInfo) -> Self { + Self { + stored_headers: info + .stored_headers + .into_inner() + .into_iter() + .map(BlockRange::from) + .collect(), + subjective_head: info.subjective_head, + } + } +}