Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
- [BREAKING] Added `block_prover_url` to block producer configuration (#719).
- [BREAKING] Updated to Rust Edition 2024 (#727).
- [BREAKING] MSRV bumped to 1.85 (#727).
- [BREAKING] Replaced `toml` configuration with CLI (#732).

### Enhancements

Expand Down
5 changes: 1 addition & 4 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion bin/node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ tracing-forest = ["miden-node-block-producer/tracing-forest"]

[dependencies]
anyhow = { version = "1.0" }
clap = { version = "4.5", features = ["derive", "string"] }
clap = { version = "4.5", features = ["derive", "env", "string"] }
miden-lib = { workspace = true }
miden-node-block-producer = { workspace = true }
miden-node-rpc = { workspace = true }
Expand Down
75 changes: 75 additions & 0 deletions bin/node/src/commands/block_producer.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
use anyhow::Context;
use miden_node_block_producer::server::BlockProducer;
use miden_node_utils::grpc::UrlExt;
use url::Url;

use super::{
ENV_BATCH_PROVER_URL, ENV_BLOCK_PRODUCER_URL, ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL,
ENV_STORE_URL,
};

#[derive(clap::Subcommand)]
pub enum BlockProducerCommand {
/// Starts the block-producer component.
Start {
/// Url at which to serve the gRPC API.
#[arg(env = ENV_BLOCK_PRODUCER_URL)]
url: Url,

/// The store's gRPC url.
#[arg(long = "store.url", env = ENV_STORE_URL)]
store_url: Url,

/// The remote batch prover's gRPC url. If unset, will default to running a prover
/// in-process which is expensive.
#[arg(long = "batch-prover.url", env = ENV_BATCH_PROVER_URL)]
batch_prover_url: Option<Url>,

/// The remote block prover's gRPC url. If unset, will default to running a prover
/// in-process which is expensive.
#[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL)]
block_prover_url: Option<Url>,

/// Enables the exporting of traces for OpenTelemetry.
///
/// This can be further configured using environment variables as defined in the official
/// OpenTelemetry documentation. See our operator manual for further details.
#[arg(long = "enable-otel", default_value_t = false, env = ENV_ENABLE_OTEL)]
open_telemetry: bool,
},
}
Comment on lines +13 to +40
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Considering your comment, is there anything to document (here and elsewhere) about the https support?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think I can solve the issue by making component start up more robust. Right now I wouldn't even know what exactly would work beyond (sometimes) discarding the protocol.

One startup is robust it should be able to handle https as well, even if we ourselves never use it for performance/simplicity reasons.


impl BlockProducerCommand {
pub async fn handle(self) -> anyhow::Result<()> {
let Self::Start {
url,
store_url,
batch_prover_url,
block_prover_url,
// Note: open-telemetry is handled in main.
open_telemetry: _,
} = self;

let store_url = store_url
.to_socket()
.context("Failed to extract socket address from store URL")?;

let listener =
url.to_socket().context("Failed to extract socket address from store URL")?;
let listener = tokio::net::TcpListener::bind(listener)
.await
.context("Failed to bind to store's gRPC URL")?;

BlockProducer::init(listener, store_url, batch_prover_url, block_prover_url)
.await
.context("Loading store")?
.serve()
.await
.context("Serving store")
}

pub fn is_open_telemetry_enabled(&self) -> bool {
let Self::Start { open_telemetry, .. } = self;
*open_telemetry
}
}
189 changes: 189 additions & 0 deletions bin/node/src/commands/bundled.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
use std::{collections::HashMap, path::PathBuf};

use anyhow::Context;
use miden_node_block_producer::server::BlockProducer;
use miden_node_rpc::server::Rpc;
use miden_node_store::server::Store;
use miden_node_utils::grpc::UrlExt;
use tokio::{net::TcpListener, task::JoinSet};
use url::Url;

use super::{
ENV_BATCH_PROVER_URL, ENV_BLOCK_PROVER_URL, ENV_DATA_DIRECTORY, ENV_ENABLE_OTEL, ENV_RPC_URL,
};

#[derive(clap::Subcommand)]
#[expect(clippy::large_enum_variant, reason = "This is a single use enum")]
pub enum BundledCommand {
/// Bootstraps the blockchain database with the genesis block.
///
/// This populates the genesis block's data with the accounts and data listed in the
/// configuration file.
///
/// Each generated genesis account's data is also written to disk. This includes the private
/// key which can be used to create transactions for these accounts.
///
/// See also: `store dump-genesis`
Bootstrap {
/// Genesis configuration file.
///
/// If not provided the default configuration is used.
#[arg(long, value_name = "FILE")]
config: Option<PathBuf>,
/// Directory in which to store the database and raw block data.
#[arg(long, env = ENV_DATA_DIRECTORY, value_name = "DIR")]
data_directory: PathBuf,
// Directory to write the account data to.
#[arg(long, value_name = "DIR")]
accounts_directory: PathBuf,
},

/// Runs all three node components in the same process.
///
/// The internal gRPC endpoints for the store and block-producer will each be assigned a random
/// open port on localhost (127.0.0.1:0).
Start {
/// Url at which to serve the RPC component's gRPC API.
#[arg(long = "rpc.url", env = ENV_RPC_URL, value_name = "URL")]
rpc_url: Url,

/// Directory in which the Store component should store the database and raw block data.
#[arg(long = "data-directory", env = ENV_DATA_DIRECTORY, value_name = "DIR")]
data_directory: PathBuf,

/// The remote batch prover's gRPC url. If unset, will default to running a prover
/// in-process which is expensive.
#[arg(long = "batch-prover.url", env = ENV_BATCH_PROVER_URL, value_name = "URL")]
batch_prover_url: Option<Url>,

/// The remote block prover's gRPC url. If unset, will default to running a prover
/// in-process which is expensive.
#[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")]
block_prover_url: Option<Url>,

/// Enables the exporting of traces for OpenTelemetry.
///
/// This can be further configured using environment variables as defined in the official
/// OpenTelemetry documentation. See our operator manual for further details.
#[arg(long = "enable-otel", default_value_t = false, env = ENV_ENABLE_OTEL, value_name = "bool")]
open_telemetry: bool,
},
}

impl BundledCommand {
pub async fn handle(self) -> anyhow::Result<()> {
match self {
BundledCommand::Bootstrap {
config,
data_directory,
accounts_directory,
} => {
// Currently the bundled bootstrap is identical to the store's bootstrap.
crate::commands::store::StoreCommand::Bootstrap {
config,
data_directory,
accounts_directory,
}
.handle()
.await
.context("failed to bootstrap the store component")
},
BundledCommand::Start {
rpc_url,
data_directory,
batch_prover_url,
block_prover_url,
// Note: open-telemetry is handled in main.
open_telemetry: _,
} => Self::start(rpc_url, data_directory, batch_prover_url, block_prover_url).await,
}
}

async fn start(
rpc_url: Url,
data_directory: PathBuf,
batch_prover_url: Option<Url>,
block_prover_url: Option<Url>,
) -> anyhow::Result<()> {
// Start listening on all gRPC urls so that inter-component connections can be created
// before each component is fully started up.
//
// This is required because `tonic` does not handle retries nor reconnections and our
// services expect to be able to connect on startup.
let grpc_rpc = rpc_url.to_socket().context("Failed to to RPC gRPC socket")?;
let grpc_rpc = TcpListener::bind(grpc_rpc)
.await
.context("Failed to bind to RPC gRPC endpoint")?;
let grpc_store = TcpListener::bind("127.0.0.1:0")
.await
.context("Failed to bind to store gRPC endpoint")?;
let grpc_block_producer = TcpListener::bind("127.0.0.1:0")
.await
.context("Failed to bind to block-producer gRPC endpoint")?;

let store_address =
grpc_store.local_addr().context("Failed to retrieve the store's gRPC address")?;
let block_producer_address = grpc_block_producer
.local_addr()
.context("Failed to retrieve the block-producer's gRPC address")?;

let mut join_set = JoinSet::new();

// Start store. The store endpoint is available after loading completes.
let store = Store::init(grpc_store, data_directory).await.context("Loading store")?;
let store_id =
join_set.spawn(async move { store.serve().await.context("Serving store") }).id();

// Start block-producer. The block-producer's endpoint is available after loading completes.
let block_producer = BlockProducer::init(
grpc_block_producer,
store_address,
batch_prover_url,
block_prover_url,
)
.await
.context("Loading block-producer")?;
let block_producer_id = join_set
.spawn(async move { block_producer.serve().await.context("Serving block-producer") })
.id();

// Start RPC component.
let rpc = Rpc::init(grpc_rpc, store_address, block_producer_address)
.await
.context("Loading RPC")?;
let rpc_id = join_set.spawn(async move { rpc.serve().await.context("Serving RPC") }).id();

// Lookup table so we can identify the failed component.
let component_ids = HashMap::from([
(store_id, "store"),
(block_producer_id, "block-producer"),
(rpc_id, "rpc"),
]);

// SAFETY: The joinset is definitely not empty.
let component_result = join_set.join_next_with_id().await.unwrap();

// We expect components to run indefinitely, so we treat any return as fatal.
//
// Map all outcomes to an error, and provide component context.
let (id, err) = match component_result {
Ok((id, Ok(_))) => (id, Err(anyhow::anyhow!("Component completed unexpectedly"))),
Ok((id, Err(err))) => (id, Err(err)),
Err(join_err) => (join_err.id(), Err(join_err).context("Joining component task")),
};
let component = component_ids.get(&id).unwrap_or(&"unknown");

// We could abort and gracefully shutdown the other components, but since we're crashing the
// node there is no point.

err.context(format!("Component {component} failed"))
}

pub fn is_open_telemetry_enabled(&self) -> bool {
if let Self::Start { open_telemetry, .. } = self {
*open_telemetry
} else {
false
}
}
}
55 changes: 0 additions & 55 deletions bin/node/src/commands/genesis/inputs.rs

This file was deleted.

Loading