Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement PeerDAS Fulu fork activation #6795

Merged
Show file tree
Hide file tree
Changes from 18 commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
2e11554
Implement PeerDAS Fulu fork activation.
jimmygchen Jan 13, 2025
cd77b2c
Update spec tests.
jimmygchen Jan 13, 2025
b029342
Fix compilation and update Kurtosis test config for PeerDAS.
jimmygchen Jan 13, 2025
64e44e1
Fix failing tests now `fulu` fork is included.
jimmygchen Jan 14, 2025
0c9d64b
Merge remote-tracking branch 'origin/unstable' into jimmy/lh-2271-act…
jimmygchen Jan 15, 2025
4e25302
Address review comments and fix lint.
jimmygchen Jan 15, 2025
8cdf82e
Use engine v4 methods for Fulu (v5 methods do not exist yet). Update …
jimmygchen Jan 15, 2025
4d407fe
Merge remote-tracking branch 'origin/unstable' into jimmy/lh-2271-act…
jimmygchen Jan 15, 2025
8980832
Update Fulu spec tests. Revert back to testing Fulu as "feature", bec…
jimmygchen Jan 17, 2025
6d5b5ed
Merge remote-tracking branch 'origin/unstable' into jimmy/lh-2271-act…
jimmygchen Jan 17, 2025
b7da075
More test fixes for Fulu.
jimmygchen Jan 20, 2025
b63a6c4
Merge remote-tracking branch 'origin/unstable' into jimmy/lh-2271-act…
jimmygchen Jan 20, 2025
eff9a5b
More test fixes for Fulu.
jimmygchen Jan 20, 2025
614f984
Fix range sync to select custody peers from its syncing chain instead…
jimmygchen Jan 21, 2025
0e8f671
Merge branch 'unstable' into jimmy/lh-2271-activate-peerdas-at-fulu-f…
jimmygchen Jan 21, 2025
e813532
Skip blob pruning tests for Fulu.
jimmygchen Jan 21, 2025
b3da74b
Merge remote-tracking branch 'origin/unstable' into jimmy/lh-2271-act…
jimmygchen Jan 23, 2025
492c1c6
Use pre-computed data columns for testing and fix tests.
jimmygchen Jan 24, 2025
e21b31e
More beacon chain test fixes.
jimmygchen Jan 24, 2025
d8cba4b
Revert change: select peers from chain for custody by range requests
dapplion Jan 28, 2025
38c7f05
Improve request identification in range sync test
dapplion Jan 28, 2025
8cb49c7
Merge branch 'unstable' into jimmy/lh-2271-activate-peerdas-at-fulu-f…
jimmygchen Jan 30, 2025
ce8090d
Address review comments.
jimmygchen Jan 30, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1249,6 +1249,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.store.get_blobs(block_root).map_err(Error::from)
}

/// Returns the data columns at the given root, if any.
///
/// ## Errors
/// May return a database error.
pub fn get_data_columns(
&self,
block_root: &Hash256,
) -> Result<Option<DataColumnSidecarList<T::EthSpec>>, Error> {
self.store.get_data_columns(block_root).map_err(Error::from)
}

/// Returns the data columns at the given root, if any.
///
/// ## Errors
Expand Down Expand Up @@ -5850,6 +5861,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {

let kzg = self.kzg.as_ref();

// TODO(fulu): we no longer need blob proofs from PeerDAS and could avoid computing.
kzg_utils::validate_blobs::<T::EthSpec>(
kzg,
expected_kzg_commitments,
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/src/data_column_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -699,7 +699,7 @@ mod test {

#[tokio::test]
async fn empty_data_column_sidecars_fails_validation() {
let spec = ForkName::latest().make_genesis_spec(E::default_spec());
let spec = ForkName::Fulu.make_genesis_spec(E::default_spec());
let harness = BeaconChainHarness::builder(E::default())
.spec(spec.into())
.deterministic_keypairs(64)
Expand Down
11 changes: 6 additions & 5 deletions beacon_node/beacon_chain/src/fulu_readiness.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//! Provides tools for checking if a node is ready for the Fulu upgrade.

use crate::{BeaconChain, BeaconChainTypes};
use execution_layer::http::{ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V5};
use execution_layer::http::{ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V4};
use serde::{Deserialize, Serialize};
use std::fmt;
use std::time::Duration;
Expand Down Expand Up @@ -87,14 +87,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(capabilities) => {
let mut missing_methods = String::from("Required Methods Unsupported:");
let mut all_good = true;
if !capabilities.get_payload_v5 {
// TODO(fulu) switch to v5 when the EL is ready
if !capabilities.get_payload_v4 {
missing_methods.push(' ');
missing_methods.push_str(ENGINE_GET_PAYLOAD_V5);
missing_methods.push_str(ENGINE_GET_PAYLOAD_V4);
all_good = false;
}
if !capabilities.new_payload_v5 {
if !capabilities.new_payload_v4 {
missing_methods.push(' ');
missing_methods.push_str(ENGINE_NEW_PAYLOAD_V5);
missing_methods.push_str(ENGINE_NEW_PAYLOAD_V4);
all_good = false;
}

Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/src/kzg_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ pub fn blobs_to_data_column_sidecars<E: EthSpec>(
.map_err(DataColumnSidecarError::BuildSidecarFailed)
}

fn build_data_column_sidecars<E: EthSpec>(
pub(crate) fn build_data_column_sidecars<E: EthSpec>(
kzg_commitments: KzgCommitments<E>,
kzg_commitments_inclusion_proof: FixedVector<Hash256, E::KzgCommitmentsInclusionProofDepth>,
signed_block_header: SignedBeaconBlockHeader,
Expand Down
213 changes: 190 additions & 23 deletions beacon_node/beacon_chain/src/test_utils.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
use crate::blob_verification::GossipVerifiedBlob;
use crate::block_verification_types::{AsBlock, RpcBlock};
use crate::kzg_utils::blobs_to_data_column_sidecars;
use crate::data_column_verification::CustodyDataColumn;
use crate::kzg_utils::build_data_column_sidecars;
use crate::observed_operations::ObservationOutcome;
pub use crate::persisted_beacon_chain::PersistedBeaconChain;
use crate::BeaconBlockResponseWrapper;
pub use crate::{
beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY},
migrate::MigratorConfig,
Expand All @@ -16,6 +17,7 @@ use crate::{
BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler,
StateSkipConfig,
};
use crate::{get_block_root, BeaconBlockResponseWrapper};
use bls::get_withdrawal_credentials;
use eth2::types::SignedBlockContentsTuple;
use execution_layer::test_utils::generate_genesis_header;
Expand Down Expand Up @@ -74,6 +76,11 @@ pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
// Environment variable to read if `ci_logger` feature is enabled.
pub const CI_LOGGER_DIR_ENV_VAR: &str = "CI_LOGGER_DIR";

// Pre-computed data column sidecar using a single static blob from:
// `beacon_node/execution_layer/src/test_utils/fixtures/mainnet/test_blobs_bundle.ssz`
const TEST_DATA_COLUMN_SIDECARS_SSZ: &[u8] =
include_bytes!("test_utils/fixtures/test_data_column_sidecars.ssz");

// Default target aggregators to set during testing, this ensures an aggregator at each slot.
//
// You should mutate the `ChainSpec` prior to initialising the harness if you would like to use
Expand Down Expand Up @@ -105,7 +112,7 @@ static KZG_NO_PRECOMP: LazyLock<Arc<Kzg>> = LazyLock::new(|| {
});

pub fn get_kzg(spec: &ChainSpec) -> Arc<Kzg> {
if spec.eip7594_fork_epoch.is_some() {
if spec.fulu_fork_epoch.is_some() {
KZG_PEERDAS.clone()
} else if spec.deneb_fork_epoch.is_some() {
KZG.clone()
Expand Down Expand Up @@ -762,15 +769,13 @@ where
pub fn get_head_block(&self) -> RpcBlock<E> {
let block = self.chain.head_beacon_block();
let block_root = block.canonical_root();
let blobs = self.chain.get_blobs(&block_root).unwrap().blobs();
RpcBlock::new(Some(block_root), block, blobs).unwrap()
self.build_rpc_block_from_store_blobs(Some(block_root), block)
}

pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock<E> {
let block = self.chain.get_blinded_block(block_root).unwrap().unwrap();
let full_block = self.chain.store.make_full_block(block_root, block).unwrap();
let blobs = self.chain.get_blobs(block_root).unwrap().blobs();
RpcBlock::new(Some(*block_root), Arc::new(full_block), blobs).unwrap()
self.build_rpc_block_from_store_blobs(Some(*block_root), Arc::new(full_block))
}

pub fn get_all_validators(&self) -> Vec<usize> {
Expand Down Expand Up @@ -2271,22 +2276,19 @@ where
self.set_current_slot(slot);
let (block, blob_items) = block_contents;

let sidecars = blob_items
.map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec))
.transpose()
.unwrap();
let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?;
let block_hash: SignedBeaconBlockHash = self
.chain
.process_block(
block_root,
RpcBlock::new(Some(block_root), block, sidecars).unwrap(),
rpc_block,
NotifyExecutionLayer::Yes,
BlockImportSource::RangeSync,
|| Ok(()),
)
.await?
.try_into()
.unwrap();
.expect("block blobs are available");
self.chain.recompute_head_at_current_slot().await;
Ok(block_hash)
}
Expand All @@ -2297,16 +2299,13 @@ where
) -> Result<SignedBeaconBlockHash, BlockError> {
let (block, blob_items) = block_contents;

let sidecars = blob_items
.map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec))
.transpose()
.unwrap();
let block_root = block.canonical_root();
let rpc_block = self.build_rpc_block_from_blobs(block_root, block, blob_items)?;
let block_hash: SignedBeaconBlockHash = self
.chain
.process_block(
block_root,
RpcBlock::new(Some(block_root), block, sidecars).unwrap(),
rpc_block,
NotifyExecutionLayer::Yes,
BlockImportSource::RangeSync,
|| Ok(()),
Expand All @@ -2318,6 +2317,75 @@ where
Ok(block_hash)
}

/// Builds an `Rpc` block from a `SignedBeaconBlock` and blobs or data columns retrieved from
/// the database.
pub fn build_rpc_block_from_store_blobs(
&self,
block_root: Option<Hash256>,
block: Arc<SignedBeaconBlock<E>>,
) -> RpcBlock<E> {
let block_root = block_root.unwrap_or_else(|| get_block_root(&block));
let has_blobs = block
.message()
.body()
.blob_kzg_commitments()
.is_ok_and(|c| !c.is_empty());
if !has_blobs {
return RpcBlock::new_without_blobs(Some(block_root), block);
}

// Blobs are stored as data columns from Fulu (PeerDAS)
if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) {
let columns = self.chain.get_data_columns(&block_root).unwrap().unwrap();
let custody_columns = columns
.into_iter()
.map(CustodyDataColumn::from_asserted_custody)
.collect::<Vec<_>>();
RpcBlock::new_with_custody_columns(Some(block_root), block, custody_columns, &self.spec)
.unwrap()
} else {
let blobs = self.chain.get_blobs(&block_root).unwrap().blobs();
RpcBlock::new(Some(block_root), block, blobs).unwrap()
}
}

/// Builds an `RpcBlock` from a `SignedBeaconBlock` and `BlobsList`.
fn build_rpc_block_from_blobs(
&self,
block_root: Hash256,
block: Arc<SignedBeaconBlock<E, FullPayload<E>>>,
blob_items: Option<(KzgProofs<E>, BlobsList<E>)>,
) -> Result<RpcBlock<E>, BlockError> {
Ok(if self.spec.is_peer_das_enabled_for_epoch(block.epoch()) {
let sampling_column_count = self
.chain
.data_availability_checker
.get_sampling_column_count();

if blob_items.is_some_and(|(_, blobs)| !blobs.is_empty()) {
// Note: this method ignores the actual custody columns and just take the first
// `sampling_column_count` for testing purpose only, because the chain does not
// currently have any knowledge of the columns being custodied.
let columns = generate_data_column_sidecars_from_block(&block, &self.spec)
.into_iter()
.take(sampling_column_count)
.map(CustodyDataColumn::from_asserted_custody)
.collect::<Vec<_>>();
RpcBlock::new_with_custody_columns(Some(block_root), block, columns, &self.spec)?
} else {
RpcBlock::new_without_blobs(Some(block_root), block)
}
} else {
let blobs = blob_items
.map(|(proofs, blobs)| {
BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)
})
.transpose()
.unwrap();
RpcBlock::new(Some(block_root), block, blobs)?
})
}

pub fn process_attestations(&self, attestations: HarnessAttestations<E>) {
let num_validators = self.validator_keypairs.len();
let mut unaggregated = Vec::with_capacity(num_validators);
Expand Down Expand Up @@ -2991,6 +3059,56 @@ where

Ok(())
}

/// Simulate some of the blobs / data columns being seen on gossip.
/// Converts the blobs to data columns if the slot is Fulu or later.
pub async fn process_gossip_blobs_or_columns<'a>(
&self,
block: &SignedBeaconBlock<E>,
blobs: impl Iterator<Item = &'a Blob<E>>,
proofs: impl Iterator<Item = &'a KzgProof>,
custody_columns_opt: Option<HashSet<ColumnIndex>>,
) {
let is_peerdas_enabled = self.chain.spec.is_peer_das_enabled_for_epoch(block.epoch());
if is_peerdas_enabled {
let custody_columns = custody_columns_opt.unwrap_or_else(|| {
let sampling_column_count = self
.chain
.data_availability_checker
.get_sampling_column_count() as u64;
(0..sampling_column_count).collect()
});

let verified_columns = generate_data_column_sidecars_from_block(block, &self.spec)
.into_iter()
.filter(|c| custody_columns.contains(&c.index))
.map(|sidecar| {
let column_index = sidecar.index;
self.chain
.verify_data_column_sidecar_for_gossip(sidecar, column_index)
})
.collect::<Result<Vec<_>, _>>()
.unwrap();

if !verified_columns.is_empty() {
self.chain
.process_gossip_data_columns(verified_columns, || Ok(()))
.await
.unwrap();
}
} else {
for (i, (kzg_proof, blob)) in proofs.into_iter().zip(blobs).enumerate() {
let sidecar =
Arc::new(BlobSidecar::new(i, blob.clone(), block, *kzg_proof).unwrap());
let gossip_blob = GossipVerifiedBlob::new(sidecar, i as u64, &self.chain)
.expect("should obtain gossip verified blob");
self.chain
.process_gossip_blob(gossip_blob)
.await
.expect("should import valid gossip verified blob");
}
}
}
}

// Junk `Debug` impl to satistfy certain trait bounds during testing.
Expand Down Expand Up @@ -3176,10 +3294,59 @@ pub fn generate_rand_block_and_data_columns<E: EthSpec>(
SignedBeaconBlock<E, FullPayload<E>>,
DataColumnSidecarList<E>,
) {
let kzg = get_kzg(spec);
let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec);
let blob_refs = blobs.iter().map(|b| &b.blob).collect::<Vec<_>>();
let data_columns = blobs_to_data_column_sidecars(&blob_refs, &block, &kzg, spec).unwrap();

let (block, _blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec);
let data_columns = generate_data_column_sidecars_from_block(&block, spec);
(block, data_columns)
}

/// Generate data column sidecars from pre-computed cells and proofs.
fn generate_data_column_sidecars_from_block<E: EthSpec>(
block: &SignedBeaconBlock<E>,
spec: &ChainSpec,
) -> DataColumnSidecarList<E> {
let kzg_commitments = block.message().body().blob_kzg_commitments().unwrap();
if kzg_commitments.is_empty() {
return vec![];
}

let kzg_commitments_inclusion_proof = block
.message()
.body()
.kzg_commitments_merkle_proof()
.unwrap();
let signed_block_header = block.signed_block_header();

// load the precomputed column sidecar to avoid computing them for every block in the tests.
let template_data_columns = RuntimeVariableList::<DataColumnSidecar<E>>::from_ssz_bytes(
TEST_DATA_COLUMN_SIDECARS_SSZ,
spec.number_of_columns as usize,
)
.unwrap();

let (cells, proofs) = template_data_columns
.into_iter()
.map(|sidecar| {
let DataColumnSidecar {
column, kzg_proofs, ..
} = sidecar;
// There's only one cell per column for a single blob
let cell_bytes: Vec<u8> = column.into_iter().next().unwrap().into();
let kzg_cell = cell_bytes.try_into().unwrap();
let kzg_proof = kzg_proofs.into_iter().next().unwrap();
(kzg_cell, kzg_proof)
})
.collect::<(Vec<_>, Vec<_>)>();

// Repeat the cells and proofs for every blob
let blob_cells_and_proofs_vec =
vec![(cells.try_into().unwrap(), proofs.try_into().unwrap()); kzg_commitments.len()];

build_data_column_sidecars(
kzg_commitments.clone(),
kzg_commitments_inclusion_proof,
signed_block_header,
blob_cells_and_proofs_vec,
spec,
)
.unwrap()
}
Binary file not shown.
Loading
Loading