Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions crates/builder/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,10 @@ pub mod testing {
start_voting_time: 0,
stop_proposing_time: 0,
stop_voting_time: 0,
upgrade_propose_offset: None,
upgrade_decide_by_offset: None,
upgrade_begin_offset: None,
upgrade_finish_offset: None,
epoch_height: 0,
epoch_start_block: 0,
stake_table_capacity: hotshot_types::light_client::DEFAULT_STAKE_TABLE_CAPACITY,
Expand Down
61 changes: 59 additions & 2 deletions crates/espresso/node/src/consensus_handle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ use hotshot_new_protocol::{
client::ClientApi,
consensus::ConsensusOutput,
coordinator::{Coordinator, CoordinatorOutput, error::Severity},
harvest::{
LegacyPreCutoverSeed, forward_legacy_timeout_votes, harvest_legacy_pre_cutover_seed,
try_perform_handover,
},
network::Network,
state::UpdateLeaf,
storage::NewProtocolStorage,
Expand All @@ -32,6 +36,24 @@ use tokio::spawn;
use tokio_util::task::AbortOnDropHandle;
use versions::version;

/// Status of the legacy → 0.8 protocol cutover.
#[derive(Clone, Debug)]
pub enum CutoverStatus {
/// No upgrade certificate has been decided yet — the network is running
/// purely on the legacy protocol.
NotConfigured,
/// The cutover view is in the future. `views_remaining` is how many views
/// before the new protocol takes over. Operators should ensure Cliquenet
/// peer connectivity is established by the time this hits 0.
Approaching {
cur_view: ViewNumber,
cutover_view: ViewNumber,
views_remaining: u64,
},
/// The new protocol is active.
Active { cutover_view: ViewNumber },
}

fn consensus_event<T: NodeType>(output: &ConsensusOutput<T>) -> Option<CoordinatorEvent<T>> {
match output {
ConsensusOutput::LeafDecided {
Expand Down Expand Up @@ -112,6 +134,15 @@ where
let coordinator_task =
AbortOnDropHandle::new(spawn(run_coordinator(coordinator, event_tx)));

// Forward `LegacyTimeoutVoteEmitted` events from the legacy task into
// the new-protocol coordinator's timeout collectors. This is how the
// first 0.8 leader gets a `TimeoutCertificate2` for the boundary
// view if 0.4 timed out before its QC formed.
spawn(forward_legacy_timeout_votes(
legacy_event_rx.clone(),
client_api.clone(),
));

Self {
legacy_handle,
client_api,
Expand All @@ -123,6 +154,11 @@ where
}
}

pub async fn harvest_legacy_pre_cutover_seed(&self) -> Option<LegacyPreCutoverSeed<T>> {
let legacy = self.legacy_handle.read().await;
harvest_legacy_pre_cutover_seed(&legacy).await
}

pub fn legacy_consensus(&self) -> Arc<RwLock<SystemContextHandle<T, I>>> {
self.legacy_handle.clone()
}
Expand All @@ -141,12 +177,33 @@ where
>= version(0, 8)
}

/// Status of the legacy → 0.8 cutover relative to the current view.
/// Use for operator monitoring around the upgrade boundary.
pub async fn cutover_status(&self) -> CutoverStatus {
let legacy = self.legacy_handle.read().await;
let cur_view = legacy.cur_view().await;
let lock = &legacy.hotshot.upgrade_lock;
let Some(cert) = lock.decided_upgrade_cert() else {
return CutoverStatus::NotConfigured;
};
let cutover_view = cert.data.new_version_first_view;
if cur_view >= cutover_view {
CutoverStatus::Active { cutover_view }
} else {
CutoverStatus::Approaching {
cur_view,
cutover_view,
views_remaining: *cutover_view - *cur_view,
}
}
}

async fn new_protocol(&self) -> bool {
if self.new_protocol_active.load(Ordering::Relaxed) {
return true;
}
let view = self.legacy_handle.read().await.cur_view().await;
let active = self.new_protocol_at(view).await;
let legacy = self.legacy_handle.read().await;
let active = try_perform_handover(&legacy, &self.client_api).await;
if active {
self.new_protocol_active.store(true, Ordering::Relaxed);
}
Expand Down
4 changes: 4 additions & 0 deletions crates/espresso/node/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1234,6 +1234,10 @@ pub mod testing {
start_voting_time: 0,
stop_proposing_time: 0,
stop_voting_time: 0,
upgrade_propose_offset: None,
upgrade_decide_by_offset: None,
upgrade_begin_offset: None,
upgrade_finish_offset: None,
epoch_height: 30,
epoch_start_block: 1,
stake_table_capacity: hotshot_types::light_client::DEFAULT_STAKE_TABLE_CAPACITY,
Expand Down
20 changes: 20 additions & 0 deletions crates/espresso/types/src/v0/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,14 @@ pub struct PublicHotShotConfig {
stop_proposing_time: u64,
start_voting_time: u64,
stop_voting_time: u64,
#[serde(default)]
upgrade_propose_offset: Option<u64>,
#[serde(default)]
upgrade_decide_by_offset: Option<u64>,
#[serde(default)]
upgrade_begin_offset: Option<u64>,
#[serde(default)]
upgrade_finish_offset: Option<u64>,
epoch_height: u64,
epoch_start_block: u64,
#[serde(default = "default_stake_table_capacity")]
Expand Down Expand Up @@ -132,6 +140,10 @@ impl From<HotShotConfig<SeqTypes>> for PublicHotShotConfig {
stop_proposing_time,
start_voting_time,
stop_voting_time,
upgrade_propose_offset,
upgrade_decide_by_offset,
upgrade_begin_offset,
upgrade_finish_offset,
epoch_height,
epoch_start_block,
stake_table_capacity,
Expand Down Expand Up @@ -161,6 +173,10 @@ impl From<HotShotConfig<SeqTypes>> for PublicHotShotConfig {
stop_proposing_time,
start_voting_time,
stop_voting_time,
upgrade_propose_offset,
upgrade_decide_by_offset,
upgrade_begin_offset,
upgrade_finish_offset,
epoch_height,
epoch_start_block,
stake_table_capacity,
Expand Down Expand Up @@ -194,6 +210,10 @@ impl PublicHotShotConfig {
stop_proposing_time: self.stop_proposing_time,
start_voting_time: self.start_voting_time,
stop_voting_time: self.stop_voting_time,
upgrade_propose_offset: self.upgrade_propose_offset,
upgrade_decide_by_offset: self.upgrade_decide_by_offset,
upgrade_begin_offset: self.upgrade_begin_offset,
upgrade_finish_offset: self.upgrade_finish_offset,
epoch_height: self.epoch_height,
epoch_start_block: self.epoch_start_block,
stake_table_capacity: self.stake_table_capacity,
Expand Down
4 changes: 4 additions & 0 deletions crates/hotshot/hotshot/src/tasks/task_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,10 @@ impl<TYPES: NodeType, I: NodeImplementation<TYPES>> CreateTaskState<TYPES, I>
stop_proposing_time: handle.hotshot.config.stop_proposing_time,
start_voting_time: handle.hotshot.config.start_voting_time,
stop_voting_time: handle.hotshot.config.stop_voting_time,
upgrade_propose_offset: handle.hotshot.config.upgrade_propose_offset,
upgrade_decide_by_offset: handle.hotshot.config.upgrade_decide_by_offset,
upgrade_begin_offset: handle.hotshot.config.upgrade_begin_offset,
upgrade_finish_offset: handle.hotshot.config.upgrade_finish_offset,
epoch_start_block: handle.hotshot.config.epoch_start_block,
upgrade_lock: handle.hotshot.upgrade_lock.clone(),
epoch_height: handle.epoch_height,
Expand Down
2 changes: 2 additions & 0 deletions crates/hotshot/new-protocol/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ time = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
url = { workspace = true }
vec1 = { workspace = true }
versions = { workspace = true }

[lints]
Expand Down
77 changes: 76 additions & 1 deletion crates/hotshot/new-protocol/src/client.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
use std::{num::NonZeroUsize, sync::Arc};
use std::{collections::BTreeMap, num::NonZeroUsize, sync::Arc};

use async_trait::async_trait;
use committable::Commitment;
use hotshot_types::{
data::{EpochNumber, Leaf2, ViewNumber},
message::Proposal as SignedProposal,
simple_certificate::QuorumCertificate2,
simple_vote::TimeoutVote2,
traits::{leaf_fetcher_network::LeafFetcherNetwork, node_implementation::NodeType},
utils::StateAndDelta,
};
Expand Down Expand Up @@ -112,6 +114,66 @@ impl<T: NodeType> ClientApi<T> {
.await?
}

/// Forward a `TimeoutVote2` produced by the legacy (pre-0.8) consensus
/// task into the new-protocol coordinator's timeout collectors. Used at
/// the legacy → new-protocol boundary: when a legacy view near the
/// cutover times out, the legacy task signs a `TimeoutVote2` (whose
/// commitment is version-tagged via the shared `UpgradeLock`) and
/// submits it here so the first 0.8 leader can collect a
/// `TimeoutCertificate2` for that pre-cutover view.
///
/// `TimeoutVote2` is structurally identical between 0.4 and 0.8
/// (`SimpleVote<TYPES, TimeoutData2>`) so the same vote feeds both
/// systems' aggregators without re-signing.
pub async fn submit_timeout_vote(&self, vote: TimeoutVote2<T>) -> Result<(), QueryError> {
let (respond, rx) = oneshot::channel();
self.call(ClientRequest::SubmitTimeoutVote { vote, respond }, rx)
.await
}

/// Bridge legacy (pre-0.8) state into the running coordinator at the
/// legacy → new-protocol cutover.
///
/// - `decided_anchor` is the highest leaf 0.4 had decided.
/// - `undecided` is the chain of undecided 0.4 leaves above the anchor
/// (oldest-first).
/// - `high_qc` is the QC of the topmost undecided leaf, if 0.4 voting
/// completed enough for that QC to form. Required for the first 0.8
/// leader to find `certs[N-1]` when proposing at view N (= the
/// topmost leaf's view + 1). May be `None` if the chain stalled
/// before the topmost leaf got a QC; in that case the first 0.8
/// leader will need view-change evidence.
/// - `validated_states` is the validated state of every seeded leaf
/// (anchor + undecided), keyed by view number. The new protocol
/// pipelines header creation and state validation against the
/// parent's stored state — without seeding these, the first
/// post-cutover leader cannot build a header (no parent state) and
/// peers cannot validate the first post-cutover proposal.
///
/// Idempotent at the consensus level: `set_pre_cutover_anchor` no-ops if
/// the supplied view is not above the current `last_decided_view`, and
/// `seed_pre_cutover_leaves` reinserts views that are already in the set.
pub async fn seed_pre_cutover(
&self,
decided_anchor: Leaf2<T>,
undecided: Vec<Leaf2<T>>,
high_qc: Option<QuorumCertificate2<T>>,
validated_states: BTreeMap<ViewNumber, Arc<T::ValidatedState>>,
) -> Result<(), QueryError> {
let (respond, rx) = oneshot::channel();
self.call(
ClientRequest::SeedPreCutover {
decided_anchor,
undecided,
high_qc,
validated_states,
respond,
},
rx,
)
.await
}

async fn call<A>(
&self,
request: ClientRequest<T>,
Expand Down Expand Up @@ -192,6 +254,19 @@ pub(crate) enum ClientRequest<T: NodeType> {
recipient: T::SignatureKey,
respond: oneshot::Sender<Result<(), QueryError>>,
},
SeedPreCutover {
decided_anchor: Leaf2<T>,
undecided: Vec<Leaf2<T>>,
high_qc: Option<QuorumCertificate2<T>>,
/// Validated state for each seeded leaf, keyed by view. Empty if
/// the caller has no states to seed (e.g. legacy-only test paths).
validated_states: BTreeMap<ViewNumber, Arc<T::ValidatedState>>,
respond: oneshot::Sender<()>,
},
SubmitTimeoutVote {
vote: TimeoutVote2<T>,
respond: oneshot::Sender<()>,
},
}

#[derive(Debug, thiserror::Error)]
Expand Down
Loading
Loading