Add --semi-supernode support (#8254)

Addresses #8218

A simplified version of #8241 for the initial release.

I've tried to minimise the logic change in this PR, although introducing the `NodeCustodyType` enum still result in quite a bit a of diff, but the actual logic change in `CustodyContext` is quite small.

The main changes are in the `CustdoyContext` struct
* ~~combining `validator_custody_count` and `current_is_supernode` fields into a single `custody_group_count_at_head` field. We persist the cgc of the initial cli values into the `custody_group_count_at_head` field and only allow for increase (same behaviour as before).~~
* I noticed the above approach caused a backward compatibility issue, I've [made a fix](15569bc085) and changed the approach slightly (which was actually what I had originally in mind):
* when initialising, only override the  `validator_custody_count` value if either flag `--supernode` or `--semi-supernode` is used; otherwise leave it as the existing default `0`. Most other logic remains unchanged.

All existing validator custody unit tests are still all passing, and I've added additional tests to cover semi-supernode, and restoring `CustodyContext` from disk.

Note: I've added a `WARN` if the user attempts to switch to a `--semi-supernode` or `--supernode` - this currently has no effect, but once @eserilev column backfill is merged, we should be able to support this quite easily.

Things to test
- [x] cgc in metadata / enr
- [x] cgc in metrics
- [x] subscribed subnets
- [x] getBlobs endpoint


  


Co-Authored-By: Jimmy Chen <jchen.tc@gmail.com>
This commit is contained in:
Jimmy Chen
2025-10-22 16:23:17 +11:00
committed by GitHub
parent 33e21634cb
commit 43c5e924d7
21 changed files with 420 additions and 114 deletions

View File

@@ -4,6 +4,7 @@ use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock};
use beacon_chain::data_column_verification::CustodyDataColumn;
use beacon_chain::{
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock,
custody_context::NodeCustodyType,
test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, test_spec,
},
@@ -45,7 +46,7 @@ async fn get_chain_segment() -> (Vec<BeaconSnapshot<E>>, Vec<Option<DataSidecars
// The assumption that you can re-import a block based on what you have in your DB
// is no longer true, as fullnodes stores less than what they sample.
// We use a supernode here to build a chain segment.
let harness = get_harness(VALIDATOR_COUNT, true);
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Supernode);
harness
.extend_chain(
@@ -106,7 +107,7 @@ async fn get_chain_segment() -> (Vec<BeaconSnapshot<E>>, Vec<Option<DataSidecars
fn get_harness(
validator_count: usize,
supernode: bool,
node_custody_type: NodeCustodyType,
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.default_spec()
@@ -115,7 +116,7 @@ fn get_harness(
..ChainConfig::default()
})
.keypairs(KEYPAIRS[0..validator_count].to_vec())
.import_all_data_columns(supernode)
.node_custody_type(node_custody_type)
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
@@ -259,7 +260,7 @@ fn update_data_column_signed_header<E: EthSpec>(
#[tokio::test]
async fn chain_segment_full_segment() {
let harness = get_harness(VALIDATOR_COUNT, false);
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
@@ -297,7 +298,7 @@ async fn chain_segment_full_segment() {
#[tokio::test]
async fn chain_segment_varying_chunk_size() {
for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] {
let harness = get_harness(VALIDATOR_COUNT, false);
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
@@ -329,7 +330,7 @@ async fn chain_segment_varying_chunk_size() {
#[tokio::test]
async fn chain_segment_non_linear_parent_roots() {
let harness = get_harness(VALIDATOR_COUNT, false);
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
harness
@@ -386,7 +387,7 @@ async fn chain_segment_non_linear_parent_roots() {
#[tokio::test]
async fn chain_segment_non_linear_slots() {
let harness = get_harness(VALIDATOR_COUNT, false);
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
harness
.chain
@@ -528,7 +529,7 @@ async fn assert_invalid_signature(
async fn get_invalid_sigs_harness(
chain_segment: &[BeaconSnapshot<E>],
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
let harness = get_harness(VALIDATOR_COUNT, false);
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
harness
.chain
.slot_clock
@@ -986,7 +987,7 @@ fn unwrap_err<T, U>(result: Result<T, U>) -> U {
#[tokio::test]
async fn block_gossip_verification() {
let harness = get_harness(VALIDATOR_COUNT, false);
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let block_index = CHAIN_SEGMENT_LENGTH - 2;
@@ -1389,7 +1390,7 @@ async fn verify_block_for_gossip_slashing_detection() {
#[tokio::test]
async fn verify_block_for_gossip_doppelganger_detection() {
let harness = get_harness(VALIDATOR_COUNT, false);
let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode);
let state = harness.get_current_state();
let ((block, _), _) = harness.make_block(state.clone(), Slot::new(1)).await;

View File

@@ -1,5 +1,6 @@
#![cfg(not(debug_assertions))]
use beacon_chain::custody_context::NodeCustodyType;
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
generate_data_column_sidecars_from_block, test_spec,
@@ -24,7 +25,7 @@ static KEYPAIRS: LazyLock<Vec<Keypair>> =
fn get_harness(
validator_count: usize,
spec: Arc<ChainSpec>,
supernode: bool,
node_custody_type: NodeCustodyType,
) -> BeaconChainHarness<EphemeralHarnessType<E>> {
create_test_tracing_subscriber();
let harness = BeaconChainHarness::builder(MainnetEthSpec)
@@ -34,7 +35,7 @@ fn get_harness(
..ChainConfig::default()
})
.keypairs(KEYPAIRS[0..validator_count].to_vec())
.import_all_data_columns(supernode)
.node_custody_type(node_custody_type)
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
@@ -54,8 +55,7 @@ async fn rpc_columns_with_invalid_header_signature() {
return;
}
let supernode = true;
let harness = get_harness(VALIDATOR_COUNT, spec, supernode);
let harness = get_harness(VALIDATOR_COUNT, spec, NodeCustodyType::Supernode);
let num_blocks = E::slots_per_epoch() as usize;

View File

@@ -3,6 +3,7 @@
use beacon_chain::attestation_verification::Error as AttnError;
use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::builder::BeaconChainBuilder;
use beacon_chain::custody_context::CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS;
use beacon_chain::data_availability_checker::AvailableBlock;
use beacon_chain::historical_data_columns::HistoricalDataColumnError;
use beacon_chain::schema_change::migrate_schema;
@@ -11,13 +12,13 @@ use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, get_kzg,
mock_execution_layer_from_parts, test_spec,
};
use beacon_chain::validator_custody::CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS;
use beacon_chain::{
BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig,
NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped,
beacon_proposer_cache::{
compute_proposer_duties_from_head, ensure_state_can_determine_proposers_for_epoch,
},
custody_context::NodeCustodyType,
data_availability_checker::MaybeAvailableBlock,
historical_blocks::HistoricalBlockError,
migrate::MigratorConfig,
@@ -98,7 +99,12 @@ fn get_harness(
reconstruct_historic_states: true,
..ChainConfig::default()
};
get_harness_generic(store, validator_count, chain_config, false)
get_harness_generic(
store,
validator_count,
chain_config,
NodeCustodyType::Fullnode,
)
}
fn get_harness_import_all_data_columns(
@@ -110,14 +116,19 @@ fn get_harness_import_all_data_columns(
reconstruct_historic_states: true,
..ChainConfig::default()
};
get_harness_generic(store, validator_count, chain_config, true)
get_harness_generic(
store,
validator_count,
chain_config,
NodeCustodyType::Supernode,
)
}
fn get_harness_generic(
store: Arc<HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>>>,
validator_count: usize,
chain_config: ChainConfig,
import_all_data_columns: bool,
node_custody_type: NodeCustodyType,
) -> TestHarness {
let harness = TestHarness::builder(MinimalEthSpec)
.spec(store.get_chain_spec().clone())
@@ -125,7 +136,7 @@ fn get_harness_generic(
.fresh_disk_store(store)
.mock_execution_layer()
.chain_config(chain_config)
.import_all_data_columns(import_all_data_columns)
.node_custody_type(node_custody_type)
.build();
harness.advance_slot();
harness
@@ -3420,7 +3431,12 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() {
reconstruct_historic_states: false,
..ChainConfig::default()
};
let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config, false);
let harness = get_harness_generic(
store.clone(),
LOW_VALIDATOR_COUNT,
chain_config,
NodeCustodyType::Fullnode,
);
let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
@@ -3839,14 +3855,13 @@ async fn schema_downgrade_to_min_version(
reconstruct_historic_states,
..ChainConfig::default()
};
let import_all_data_columns = false;
let store = get_store_generic(&db_path, store_config.clone(), spec.clone());
let harness = get_harness_generic(
store.clone(),
LOW_VALIDATOR_COUNT,
chain_config.clone(),
import_all_data_columns,
NodeCustodyType::Fullnode,
);
harness
@@ -4862,14 +4877,13 @@ async fn ancestor_state_root_prior_to_split() {
reconstruct_historic_states: false,
..ChainConfig::default()
};
let import_all_data_columns = false;
let store = get_store_generic(&db_path, store_config, spec);
let harness = get_harness_generic(
store.clone(),
LOW_VALIDATOR_COUNT,
chain_config,
import_all_data_columns,
NodeCustodyType::Fullnode,
);
// Produce blocks until we have passed through two full snapshot periods. This period length is
@@ -4956,14 +4970,13 @@ async fn replay_from_split_state() {
reconstruct_historic_states: false,
..ChainConfig::default()
};
let import_all_data_columns = false;
let store = get_store_generic(&db_path, store_config.clone(), spec.clone());
let harness = get_harness_generic(
store.clone(),
LOW_VALIDATOR_COUNT,
chain_config,
import_all_data_columns,
NodeCustodyType::Fullnode,
);
// Produce blocks until we finalize epoch 3 which will not be stored as a snapshot.