Initial work to remove engines fallback from the execution_layer crate (#3257)

## Issue Addressed

Part of #3160 

## Proposed Changes
Use only the first url given in the execution engine, if more than one is provided log it.
This change only moves having multiple engines to one. The amount of code cleanup that can and should be done forward is not small and would interfere with ongoing PRs. I'm keeping the changes intentionally very very minimal.

## Additional Info

Future works:
- In [ `EngineError` ](9c429d0764/beacon_node/execution_layer/src/engines.rs (L173-L177)) the id is not needed since it now has no meaning.
- the [ `first_success_without_retry` ](9c429d0764/beacon_node/execution_layer/src/engines.rs (L348-L351)) function can return a single error.
- the [`first_success`](9c429d0764/beacon_node/execution_layer/src/engines.rs (L324)) function can return a single error.
- After the redundancy is removed for the builders, we can probably make the [ `EngineErrors` ](9c429d0764/beacon_node/execution_layer/src/lib.rs (L69)) carry a single error.
- Merge the [`Engines`](9c429d0764/beacon_node/execution_layer/src/engines.rs (L161-L165)) struct and [`Engine` ](9c429d0764/beacon_node/execution_layer/src/engines.rs (L62-L67))
- Fix the associated configurations and cli params. Not sure if both are done in https://github.com/sigp/lighthouse/pull/3214

In general I think those changes can be done incrementally and in individual pull requests.
This commit is contained in:
Divma
2022-06-22 14:27:16 +00:00
parent 8faaa35b58
commit 2063c0fa0d
2 changed files with 201 additions and 285 deletions

View File

@@ -158,72 +158,60 @@ impl ExecutionLayer {
let Config {
execution_endpoints: urls,
builder_endpoints: builder_urls,
mut secret_files,
secret_files,
suggested_fee_recipient,
jwt_id,
jwt_version,
default_datadir,
} = config;
if urls.is_empty() {
return Err(Error::NoEngines);
if urls.len() > 1 {
warn!(log, "Only the first execution engine url will be used");
}
let execution_url = urls.into_iter().next().ok_or(Error::NoEngines)?;
// Extend the jwt secret files with the default jwt secret path if not provided via cli.
// This ensures that we have a jwt secret for every EL.
secret_files.extend(vec![
default_datadir.join(DEFAULT_JWT_FILE);
urls.len().saturating_sub(secret_files.len())
]);
let secrets: Vec<(JwtKey, PathBuf)> = secret_files
.iter()
.map(|p| {
// Read secret from file if it already exists
if p.exists() {
std::fs::read_to_string(p)
.map_err(|e| {
format!("Failed to read JWT secret file {:?}, error: {:?}", p, e)
})
.and_then(|ref s| {
let secret = JwtKey::from_slice(
&hex::decode(strip_prefix(s.trim_end()))
.map_err(|e| format!("Invalid hex string: {:?}", e))?,
)?;
Ok((secret, p.to_path_buf()))
})
} else {
// Create a new file and write a randomly generated secret to it if file does not exist
std::fs::File::options()
.write(true)
.create_new(true)
.open(p)
.map_err(|e| {
format!("Failed to open JWT secret file {:?}, error: {:?}", p, e)
})
.and_then(|mut f| {
let secret = auth::JwtKey::random();
f.write_all(secret.hex_string().as_bytes()).map_err(|e| {
format!("Failed to write to JWT secret file: {:?}", e)
})?;
Ok((secret, p.to_path_buf()))
})
}
})
.collect::<Result<_, _>>()
.map_err(Error::InvalidJWTSecret)?;
let engines: Vec<Engine<EngineApi>> = urls
// Use the default jwt secret path if not provided via cli.
let secret_file = secret_files
.into_iter()
.zip(secrets.into_iter())
.map(|(url, (secret, path))| {
let id = url.to_string();
let auth = Auth::new(secret, jwt_id.clone(), jwt_version.clone());
debug!(log, "Loaded execution endpoint"; "endpoint" => %id, "jwt_path" => ?path);
let api = HttpJsonRpc::<EngineApi>::new_with_auth(url, auth)?;
Ok(Engine::<EngineApi>::new(id, api))
})
.collect::<Result<_, ApiError>>()?;
.next()
.unwrap_or_else(|| default_datadir.join(DEFAULT_JWT_FILE));
let jwt_key = if secret_file.exists() {
// Read secret from file if it already exists
std::fs::read_to_string(&secret_file)
.map_err(|e| format!("Failed to read JWT secret file. Error: {:?}", e))
.and_then(|ref s| {
let secret = JwtKey::from_slice(
&hex::decode(strip_prefix(s.trim_end()))
.map_err(|e| format!("Invalid hex string: {:?}", e))?,
)?;
Ok(secret)
})
.map_err(Error::InvalidJWTSecret)
} else {
// Create a new file and write a randomly generated secret to it if file does not exist
std::fs::File::options()
.write(true)
.create_new(true)
.open(&secret_file)
.map_err(|e| format!("Failed to open JWT secret file. Error: {:?}", e))
.and_then(|mut f| {
let secret = auth::JwtKey::random();
f.write_all(secret.hex_string().as_bytes())
.map_err(|e| format!("Failed to write to JWT secret file: {:?}", e))?;
Ok(secret)
})
.map_err(Error::InvalidJWTSecret)
}?;
let engine: Engine<EngineApi> = {
let id = execution_url.to_string();
let auth = Auth::new(jwt_key, jwt_id, jwt_version);
debug!(log, "Loaded execution endpoint"; "endpoint" => %id, "jwt_path" => ?secret_file.as_path());
let api = HttpJsonRpc::<EngineApi>::new_with_auth(execution_url, auth)
.map_err(Error::ApiError)?;
Engine::<EngineApi>::new(id, api)
};
let builders: Vec<Engine<BuilderApi>> = builder_urls
.into_iter()
@@ -236,7 +224,7 @@ impl ExecutionLayer {
let inner = Inner {
engines: Engines {
engines,
engine,
latest_forkchoice_state: <_>::default(),
log: log.clone(),
},
@@ -455,7 +443,7 @@ impl ExecutionLayer {
/// Returns `true` if there is at least one synced and reachable engine.
pub async fn is_synced(&self) -> bool {
self.engines().any_synced().await
self.engines().is_synced().await
}
/// Updates the proposer preparation data provided by validators
@@ -750,7 +738,7 @@ impl ExecutionLayer {
process_multiple_payload_statuses(
execution_payload.block_hash,
broadcast_results.into_iter(),
Some(broadcast_results).into_iter(),
self.log(),
)
}
@@ -903,7 +891,7 @@ impl ExecutionLayer {
};
process_multiple_payload_statuses(
head_block_hash,
broadcast_results
Some(broadcast_results)
.into_iter()
.chain(builder_broadcast_results.into_iter())
.map(|result| result.map(|response| response.payload_status)),
@@ -918,49 +906,49 @@ impl ExecutionLayer {
terminal_block_number: 0,
};
let broadcast_results = self
let broadcast_result = self
.engines()
.broadcast(|engine| engine.api.exchange_transition_configuration_v1(local))
.await;
let mut errors = vec![];
for (i, result) in broadcast_results.into_iter().enumerate() {
match result {
Ok(remote) => {
if local.terminal_total_difficulty != remote.terminal_total_difficulty
|| local.terminal_block_hash != remote.terminal_block_hash
{
error!(
self.log(),
"Execution client config mismatch";
"msg" => "ensure lighthouse and the execution client are up-to-date and \
configured consistently",
"execution_endpoint" => i,
"remote" => ?remote,
"local" => ?local,
);
errors.push(EngineError::Api {
id: i.to_string(),
error: ApiError::TransitionConfigurationMismatch,
});
} else {
debug!(
self.log(),
"Execution client config is OK";
"execution_endpoint" => i
);
}
}
Err(e) => {
// Having no fallbacks, the id of the used node is 0
let i = 0usize;
match broadcast_result {
Ok(remote) => {
if local.terminal_total_difficulty != remote.terminal_total_difficulty
|| local.terminal_block_hash != remote.terminal_block_hash
{
error!(
self.log(),
"Unable to get transition config";
"error" => ?e,
"Execution client config mismatch";
"msg" => "ensure lighthouse and the execution client are up-to-date and \
configured consistently",
"execution_endpoint" => i,
"remote" => ?remote,
"local" => ?local,
);
errors.push(EngineError::Api {
id: i.to_string(),
error: ApiError::TransitionConfigurationMismatch,
});
} else {
debug!(
self.log(),
"Execution client config is OK";
"execution_endpoint" => i
);
errors.push(e);
}
}
Err(e) => {
error!(
self.log(),
"Unable to get transition config";
"error" => ?e,
"execution_endpoint" => i,
);
errors.push(e);
}
}
if errors.is_empty() {
@@ -1102,8 +1090,7 @@ impl ExecutionLayer {
&[metrics::IS_VALID_TERMINAL_POW_BLOCK_HASH],
);
let broadcast_results = self
.engines()
self.engines()
.broadcast(|engine| async move {
if let Some(pow_block) = self.get_pow_block(engine, block_hash).await? {
if let Some(pow_parent) =
@@ -1116,38 +1103,8 @@ impl ExecutionLayer {
}
Ok(None)
})
.await;
let mut errors = vec![];
let mut terminal = 0;
let mut not_terminal = 0;
let mut block_missing = 0;
for result in broadcast_results {
match result {
Ok(Some(true)) => terminal += 1,
Ok(Some(false)) => not_terminal += 1,
Ok(None) => block_missing += 1,
Err(e) => errors.push(e),
}
}
if terminal > 0 && not_terminal > 0 {
crit!(
self.log(),
"Consensus failure between execution nodes";
"method" => "is_valid_terminal_pow_block_hash"
);
}
if terminal > 0 {
Ok(Some(true))
} else if not_terminal > 0 {
Ok(Some(false))
} else if block_missing > 0 {
Ok(None)
} else {
Err(Error::EngineErrors(errors))
}
.await
.map_err(|e| Error::EngineErrors(vec![e]))
}
/// This function should remain internal.