Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 52 additions & 0 deletions fault-proof/src/challenger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -545,6 +545,32 @@ where
};

for game in candidates {
// Pre-flight on-chain status check at `latest`. The cached `should_attempt_to_resolve`
// flag is captured at sync time and can be stale by submission — between sync and
// this loop, another actor's `resolve()` may have landed (or this loop already
// resolved an earlier candidate that affected this one). Re-checking at `latest`
// avoids submitting a resolution that would only revert on chain.
let contract = OPSuccinctFaultDisputeGame::new(game.address, self.l1_provider.clone());
match contract.status().call().await {
Ok(status) if status != GameStatus::IN_PROGRESS => {
tracing::info!(
game_index = %game.index,
game_address = ?game.address,
?status,
"Skipping resolve: game already resolved on chain"
);
continue;
}
Err(e) => {
tracing::warn!(
game_address = ?game.address,
error = ?e,
"Pre-flight status check failed, proceeding with resolve"
);
}
_ => {}
}

if let Err(error) = self.submit_resolution_transaction(&game).await {
if error.is_revert() {
tracing::error!(
Expand Down Expand Up @@ -611,7 +637,33 @@ where
.collect::<Vec<_>>()
};

let signer_address = self.signer.address();
for game in candidates {
// Pre-flight on-chain credit check at `latest`. The cached
// `should_attempt_to_claim_bond` flag is captured at sync time and can be stale by
// submission — a recently confirmed `claimCredit()` (e.g., from a prior cycle or
// another actor) is already reflected at `latest`. Re-checking avoids submitting a
// claim that would only revert on chain.
let contract = OPSuccinctFaultDisputeGame::new(game.address, self.l1_provider.clone());
match contract.credit(signer_address).call().await {
Ok(credit) if credit == U256::ZERO => {
tracing::info!(
game_index = %game.index,
game_address = ?game.address,
"Skipping claim: bond already claimed on chain"
);
continue;
}
Err(e) => {
tracing::warn!(
game_address = ?game.address,
error = ?e,
"Pre-flight credit check failed, proceeding with claim"
);
}
_ => {}
}

if let Err(error) = self.submit_bond_claim_transaction(&game).await {
if error.is_revert() {
tracing::error!(
Expand Down
130 changes: 125 additions & 5 deletions fault-proof/src/proposer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -654,13 +654,26 @@ where
latest_block.header.number.saturating_sub(self.config.sync_l1_confirmations);

// If L1 hasn't advanced past the last synced block, all on-chain state is identical.
//
// `confirmed_number < prev` indicates backend regression from a load-balanced RPC, or a
// deep L1 reorg past `sync_l1_confirmations`. This case should be logged at WARN so
// operators can detect unhealthy backends or L1 reorg; the equal case stays at DEBUG since
// it's the normal "L1 hasn't ticked" path.
let prev = self.last_synced_l1_block.load(Ordering::Relaxed);
if confirmed_number > 0 && confirmed_number <= prev {
tracing::debug!(
confirmed_number,
last_synced = prev,
"L1 head unchanged, skipping sync"
);
if confirmed_number < prev {
tracing::warn!(
confirmed_number,
last_synced = prev,
"L1 confirmed head moved backwards (backend regression or deep reorg), skipping sync"
);
} else {
tracing::debug!(
confirmed_number,
last_synced = prev,
"L1 head unchanged, skipping sync"
);
}
return Ok(());
}

Expand Down Expand Up @@ -734,6 +747,20 @@ where
.copied()
.collect();
if !future_games.is_empty() {
// Determine if the duplicate-creation guard's tracked game is among the
// entries this prune is about to remove. Must be evaluated BEFORE the
// removal loop while state.games still holds them. Checking "absent from
// post-prune cache" instead would over-clear the guard when the just-
// created game has not yet been added to the cache (e.g., right after
// creation, or after a backup restore that prunes unrelated entries),
// allowing should_create_game to re-submit a duplicate at the same L2
// block before the cache catches up.
let guarded_addr = *self.last_created_game_address.lock().await;
let guard_in_pruned = guarded_addr != Address::ZERO &&
future_games.iter().any(|idx| {
state.games.get(idx).is_some_and(|g| g.address == guarded_addr)
});

for idx in &future_games {
state.games.remove(idx);
}
Expand All @@ -747,6 +774,14 @@ where
if should_clear_anchor {
state.anchor_game = None;
}
if guard_in_pruned {
self.last_created_game_l2_block.store(0, Ordering::Relaxed);
*self.last_created_game_address.lock().await = Address::ZERO;
tracing::warn!(
?guarded_addr,
"Reset creation guard: tracked game was among pruned entries"
);
}
}
}

Expand Down Expand Up @@ -1378,6 +1413,31 @@ where
};

for game in candidates {
// Pre-flight on-chain status check at `latest`. The cached `should_attempt_to_resolve`
// is derived from the pinned (lagged) snapshot, so a recently confirmed `resolve()` tx
// may not yet be reflected. Querying at `latest` avoids re-submitting a resolution
// that would only revert on chain.
let contract = OPSuccinctFaultDisputeGame::new(game.address, self.l1_provider.clone());
match contract.status().call().await {
Ok(status) if status != GameStatus::IN_PROGRESS => {
tracing::info!(
game_index = %game.index,
game_address = ?game.address,
?status,
"Skipping resolve: game already resolved on chain"
);
continue;
}
Err(e) => {
tracing::warn!(
game_address = ?game.address,
error = ?e,
"Pre-flight status check failed, proceeding with resolve"
);
}
_ => {}
}

if let Err(error) = self.submit_resolution_transaction(&game).await {
if error.is_revert() {
tracing::error!(
Expand Down Expand Up @@ -1420,7 +1480,33 @@ where
.collect::<Vec<_>>()
};

let signer_address = self.signer.address();
for game in candidates {
// Pre-flight on-chain credit check at `latest`. The cached
// `should_attempt_to_claim_bond` is derived from the pinned (lagged)
// snapshot, so a recently confirmed `claimCredit()` tx may not yet be
// reflected. Querying at `latest` avoids re-submitting a claim that
// would only revert on chain.
let contract = OPSuccinctFaultDisputeGame::new(game.address, self.l1_provider.clone());
match contract.credit(signer_address).call().await {
Ok(credit) if credit == U256::ZERO => {
tracing::info!(
game_index = %game.index,
game_address = ?game.address,
"Skipping claim: bond already claimed on chain"
);
continue;
}
Err(e) => {
tracing::warn!(
game_address = ?game.address,
error = ?e,
"Pre-flight credit check failed, proceeding with claim"
);
}
_ => {}
}

if let Err(error) = self.submit_bond_claim_transaction(&game).await {
if error.is_revert() {
tracing::error!(
Expand Down Expand Up @@ -2336,6 +2422,7 @@ where
/// Returns `Ok(true)` if proving should be skipped:
/// - Game not found in cache
/// - Game not owned (vkeys don't match)
/// - Game is already proven or resolved on chain (pre-flight check at `latest`)
/// - Deadline has passed
///
/// Returns `Ok(false)` if proving should proceed.
Expand Down Expand Up @@ -2364,6 +2451,39 @@ where
}
}

// Pre-flight on-chain status check at `latest`. The cached `proposal_status` is read
// from the pinned (lagged) block, so a recently confirmed prove() or resolve() tx may
// not yet be reflected. Querying at `latest` avoids expensive proof regeneration that
// would only revert on submission. Skip when:
// - ProposalStatus is *ValidProofProvided (proof already submitted), or
// - ProposalStatus is Resolved (game concluded — set whenever GameStatus moves out of
// IN_PROGRESS, including timeout default-loss).
let contract = OPSuccinctFaultDisputeGame::new(game_address, self.l1_provider.clone());
match contract.claimData().call().await {
Ok(claim_data) => {
if matches!(
claim_data.status,
ProposalStatus::UnchallengedAndValidProofProvided |
ProposalStatus::ChallengedAndValidProofProvided |
ProposalStatus::Resolved
) {
tracing::info!(
?game_address,
proposal_status = ?claim_data.status,
"Skipping proving: game already proven or resolved on chain"
);
return Ok(true);
}
}
Err(e) => {
tracing::warn!(
?game_address,
error = ?e,
"Pre-flight proposal status check failed, proceeding with proving"
);
}
}

// Check deadline if provided
if let Some(deadline) = deadline {
let now = std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH)?.as_secs();
Expand Down
22 changes: 20 additions & 2 deletions scripts/utils/bin/cost_estimator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,22 @@ use op_succinct_host_utils::{
};
use op_succinct_proof_utils::{get_range_elf_embedded, initialize_host};
use op_succinct_scripts::HostExecutorArgs;

// Cost-estimator-specific CLI args. Wraps `HostExecutorArgs` and adds the estimator-only
// `--no-safe-head-split` flag so unrelated host binaries (e.g. `multi`,
// `gen-sp1-test-artifacts`) don't advertise a flag they ignore.
#[derive(Debug, Clone, Parser)]
#[command(about = "Estimate OP Succinct execution costs over an L2 block range")]
struct CostEstimatorArgs {
#[command(flatten)]
host: HostExecutorArgs,
/// Bypass span-batch-aligned splitting even when SafeDB is active. Forces the basic
/// fixed-size splitter so the range is partitioned solely by `--batch-size`. Useful for
/// estimating per-segment cost as the proposer sees it (one zkVM execution per
/// `RANGE_SPLIT_COUNT` segment) rather than per span batch.
#[arg(long)]
no_safe_head_split: bool,
}
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use sp1_sdk::{
blocking::{CpuProver, Prover},
Expand Down Expand Up @@ -227,7 +243,9 @@ fn aggregate_execution_stats(

#[tokio::main]
async fn main() -> Result<()> {
let args = HostExecutorArgs::parse();
let args = CostEstimatorArgs::parse();
let no_safe_head_split = args.no_safe_head_split;
let args = args.host;

dotenv::from_path(&args.env_file).ok();
utils::setup_logger();
Expand Down Expand Up @@ -261,7 +279,7 @@ async fn main() -> Result<()> {
// splitting algorithm. Otherwise, we use the simple range splitting algorithm.
let safe_db_activated = data_fetcher.is_safe_db_activated().await?;

let split_ranges = if safe_db_activated {
let split_ranges = if safe_db_activated && !no_safe_head_split {
split_range_based_on_safe_heads(
&data_fetcher,
l2_start_block,
Expand Down
Loading